提交 01e54a1f 编写于 作者: J jack

use google style

上级 afb8620c
...@@ -13,18 +13,18 @@ ...@@ -13,18 +13,18 @@
// limitations under the License. // limitations under the License.
#include <glog/logging.h> #include <glog/logging.h>
#include <omp.h>
#include <algorithm> #include <algorithm>
#include <chrono> #include <chrono> // NOLINT
#include <fstream> #include <fstream>
#include <iostream> #include <iostream>
#include <string> #include <string>
#include <vector> #include <vector>
#include <utility> #include <utility>
#include <omp.h>
#include "include/paddlex/paddlex.h" #include "include/paddlex/paddlex.h"
using namespace std::chrono; using namespace std::chrono; // NOLINT
DEFINE_string(model_dir, "", "Path of inference model"); DEFINE_string(model_dir, "", "Path of inference model");
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU"); DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
...@@ -34,7 +34,9 @@ DEFINE_string(key, "", "key of encryption"); ...@@ -34,7 +34,9 @@ DEFINE_string(key, "", "key of encryption");
DEFINE_string(image, "", "Path of test image file"); DEFINE_string(image, "", "Path of test image file");
DEFINE_string(image_list, "", "Path of test image list file"); DEFINE_string(image_list, "", "Path of test image list file");
DEFINE_int32(batch_size, 1, "Batch size of infering"); DEFINE_int32(batch_size, 1, "Batch size of infering");
DEFINE_int32(thread_num, omp_get_num_procs(), "Number of preprocessing threads"); DEFINE_int32(thread_num,
omp_get_num_procs(),
"Number of preprocessing threads");
int main(int argc, char** argv) { int main(int argc, char** argv) {
// Parsing command-line // Parsing command-line
...@@ -51,7 +53,12 @@ int main(int argc, char** argv) { ...@@ -51,7 +53,12 @@ int main(int argc, char** argv) {
// 加载模型 // 加载模型
PaddleX::Model model; PaddleX::Model model;
model.Init(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_use_trt, FLAGS_gpu_id, FLAGS_key, FLAGS_batch_size); model.Init(FLAGS_model_dir,
FLAGS_use_gpu,
FLAGS_use_trt,
FLAGS_gpu_id,
FLAGS_key,
FLAGS_batch_size);
// 进行预测 // 进行预测
double total_running_time_s = 0.0; double total_running_time_s = 0.0;
...@@ -70,32 +77,38 @@ int main(int argc, char** argv) { ...@@ -70,32 +77,38 @@ int main(int argc, char** argv) {
image_paths.push_back(image_path); image_paths.push_back(image_path);
} }
imgs = image_paths.size(); imgs = image_paths.size();
for(int i = 0; i < image_paths.size(); i += FLAGS_batch_size) { for (int i = 0; i < image_paths.size(); i += FLAGS_batch_size) {
auto start = system_clock::now(); auto start = system_clock::now();
// 读图像 // 读图像
int im_vec_size = std::min((int)image_paths.size(), i + FLAGS_batch_size); int im_vec_size =
std::min(static_cat<int>(image_paths.size()), i + FLAGS_batch_size);
std::vector<cv::Mat> im_vec(im_vec_size - i); std::vector<cv::Mat> im_vec(im_vec_size - i);
std::vector<PaddleX::ClsResult> results(im_vec_size - i, PaddleX::ClsResult()); std::vector<PaddleX::ClsResult> results(im_vec_size - i,
PaddleX::ClsResult());
int thread_num = std::min(FLAGS_thread_num, im_vec_size - i); int thread_num = std::min(FLAGS_thread_num, im_vec_size - i);
#pragma omp parallel for num_threads(thread_num) #pragma omp parallel for num_threads(thread_num)
for(int j = i; j < im_vec_size; ++j){ for (int j = i; j < im_vec_size; ++j) {
im_vec[j - i] = std::move(cv::imread(image_paths[j], 1)); im_vec[j - i] = std::move(cv::imread(image_paths[j], 1));
} }
auto imread_end = system_clock::now(); auto imread_end = system_clock::now();
model.predict(im_vec, results, thread_num); model.predict(im_vec, &results, thread_num);
auto imread_duration = duration_cast<microseconds>(imread_end - start); auto imread_duration = duration_cast<microseconds>(imread_end - start);
total_imread_time_s += double(imread_duration.count()) * microseconds::period::num / microseconds::period::den; total_imread_time_s += static_cast<double>(imread_duration.count()) *
microseconds::period::num /
microseconds::period::den;
auto end = system_clock::now(); auto end = system_clock::now();
auto duration = duration_cast<microseconds>(end - start); auto duration = duration_cast<microseconds>(end - start);
total_running_time_s += double(duration.count()) * microseconds::period::num / microseconds::period::den; total_running_time_s += static_cast<double>(duration.count()) *
for(int j = i; j < im_vec_size; ++j) { microseconds::period::num /
std::cout << "Path:" << image_paths[j] microseconds::period::den;
<< ", predict label: " << results[j - i].category for (int j = i; j < im_vec_size; ++j) {
<< ", label_id:" << results[j - i].category_id std::cout << "Path:" << image_paths[j]
<< ", score: " << results[j - i].score << std::endl; << ", predict label: " << results[j - i].category
} << ", label_id:" << results[j - i].category_id
<< ", score: " << results[j - i].score << std::endl;
}
} }
} else { } else {
auto start = system_clock::now(); auto start = system_clock::now();
...@@ -104,21 +117,17 @@ int main(int argc, char** argv) { ...@@ -104,21 +117,17 @@ int main(int argc, char** argv) {
model.predict(im, &result); model.predict(im, &result);
auto end = system_clock::now(); auto end = system_clock::now();
auto duration = duration_cast<microseconds>(end - start); auto duration = duration_cast<microseconds>(end - start);
total_running_time_s += double(duration.count()) * microseconds::period::num / microseconds::period::den; total_running_time_s += static_cast<double>(duration.count()) *
microseconds::period::num /
microseconds::period::den;
std::cout << "Predict label: " << result.category std::cout << "Predict label: " << result.category
<< ", label_id:" << result.category_id << ", label_id:" << result.category_id
<< ", score: " << result.score << std::endl; << ", score: " << result.score << std::endl;
} }
std::cout << "Total running time: " std::cout << "Total running time: " << total_running_time_s
<< total_running_time_s << " s, average running time: " << total_running_time_s / imgs
<< " s, average running time: " << " s/img, total read img time: " << total_imread_time_s
<< total_running_time_s / imgs << " s, average read time: " << total_imread_time_s / imgs
<< " s/img, total read img time: " << " s/img, batch_size = " << FLAGS_batch_size << std::endl;
<< total_imread_time_s
<< " s, average read time: "
<< total_imread_time_s / imgs
<< " s/img, batch_size = "
<< FLAGS_batch_size
<< std::endl;
return 0; return 0;
} }
...@@ -13,20 +13,20 @@ ...@@ -13,20 +13,20 @@
// limitations under the License. // limitations under the License.
#include <glog/logging.h> #include <glog/logging.h>
#include <omp.h>
#include <algorithm> #include <algorithm>
#include <chrono> #include <chrono> // NOLINT
#include <fstream> #include <fstream>
#include <iostream> #include <iostream>
#include <string> #include <string>
#include <vector> #include <vector>
#include <utility> #include <utility>
#include <omp.h>
#include "include/paddlex/paddlex.h" #include "include/paddlex/paddlex.h"
#include "include/paddlex/visualize.h" #include "include/paddlex/visualize.h"
using namespace std::chrono; using namespace std::chrono; // NOLINT
DEFINE_string(model_dir, "", "Path of inference model"); DEFINE_string(model_dir, "", "Path of inference model");
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU"); DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
...@@ -37,13 +37,17 @@ DEFINE_string(image, "", "Path of test image file"); ...@@ -37,13 +37,17 @@ DEFINE_string(image, "", "Path of test image file");
DEFINE_string(image_list, "", "Path of test image list file"); DEFINE_string(image_list, "", "Path of test image list file");
DEFINE_string(save_dir, "output", "Path to save visualized image"); DEFINE_string(save_dir, "output", "Path to save visualized image");
DEFINE_int32(batch_size, 1, "Batch size of infering"); DEFINE_int32(batch_size, 1, "Batch size of infering");
DEFINE_double(threshold, 0.5, "The minimum scores of target boxes which are shown"); DEFINE_double(threshold,
DEFINE_int32(thread_num, omp_get_num_procs(), "Number of preprocessing threads"); 0.5,
"The minimum scores of target boxes which are shown");
DEFINE_int32(thread_num,
omp_get_num_procs(),
"Number of preprocessing threads");
int main(int argc, char** argv) { int main(int argc, char** argv) {
// 解析命令行参数 // 解析命令行参数
google::ParseCommandLineFlags(&argc, &argv, true); google::ParseCommandLineFlags(&argc, &argv, true);
if (FLAGS_model_dir == "") { if (FLAGS_model_dir == "") {
std::cerr << "--model_dir need to be defined" << std::endl; std::cerr << "--model_dir need to be defined" << std::endl;
return -1; return -1;
...@@ -55,7 +59,12 @@ int main(int argc, char** argv) { ...@@ -55,7 +59,12 @@ int main(int argc, char** argv) {
std::cout << "Thread num: " << FLAGS_thread_num << std::endl; std::cout << "Thread num: " << FLAGS_thread_num << std::endl;
// 加载模型 // 加载模型
PaddleX::Model model; PaddleX::Model model;
model.Init(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_use_trt, FLAGS_gpu_id, FLAGS_key, FLAGS_batch_size); model.Init(FLAGS_model_dir,
FLAGS_use_gpu,
FLAGS_use_trt,
FLAGS_gpu_id,
FLAGS_key,
FLAGS_batch_size);
double total_running_time_s = 0.0; double total_running_time_s = 0.0;
double total_imread_time_s = 0.0; double total_imread_time_s = 0.0;
...@@ -75,41 +84,47 @@ int main(int argc, char** argv) { ...@@ -75,41 +84,47 @@ int main(int argc, char** argv) {
image_paths.push_back(image_path); image_paths.push_back(image_path);
} }
imgs = image_paths.size(); imgs = image_paths.size();
for(int i = 0; i < image_paths.size(); i += FLAGS_batch_size) { for (int i = 0; i < image_paths.size(); i += FLAGS_batch_size) {
auto start = system_clock::now(); auto start = system_clock::now();
int im_vec_size = std::min((int)image_paths.size(), i + FLAGS_batch_size); int im_vec_size =
std::min(static_cast<int>(image_paths.size()), i + FLAGS_batch_size);
std::vector<cv::Mat> im_vec(im_vec_size - i); std::vector<cv::Mat> im_vec(im_vec_size - i);
std::vector<PaddleX::DetResult> results(im_vec_size - i, PaddleX::DetResult()); std::vector<PaddleX::DetResult> results(im_vec_size - i,
PaddleX::DetResult());
int thread_num = std::min(FLAGS_thread_num, im_vec_size - i); int thread_num = std::min(FLAGS_thread_num, im_vec_size - i);
#pragma omp parallel for num_threads(thread_num) #pragma omp parallel for num_threads(thread_num)
for(int j = i; j < im_vec_size; ++j){ for (int j = i; j < im_vec_size; ++j) {
im_vec[j - i] = std::move(cv::imread(image_paths[j], 1)); im_vec[j - i] = std::move(cv::imread(image_paths[j], 1));
} }
auto imread_end = system_clock::now(); auto imread_end = system_clock::now();
model.predict(im_vec, results, thread_num); model.predict(im_vec, &results, thread_num);
auto imread_duration = duration_cast<microseconds>(imread_end - start); auto imread_duration = duration_cast<microseconds>(imread_end - start);
total_imread_time_s += double(imread_duration.count()) * microseconds::period::num / microseconds::period::den; total_imread_time_s += static_cast<double>(imread_duration.count()) *
microseconds::period::num /
microseconds::period::den;
auto end = system_clock::now(); auto end = system_clock::now();
auto duration = duration_cast<microseconds>(end - start); auto duration = duration_cast<microseconds>(end - start);
total_running_time_s += double(duration.count()) * microseconds::period::num / microseconds::period::den; total_running_time_s += static_cast<double>(duration.count()) *
//输出结果目标框 microseconds::period::num /
for(int j = 0; j < im_vec_size - i; ++j) { microseconds::period::den;
for(int k = 0; k < results[j].boxes.size(); ++k) { // 输出结果目标框
std::cout << "image file: " << image_paths[i + j] << ", ";// << std::endl; for (int j = 0; j < im_vec_size - i; ++j) {
for (int k = 0; k < results[j].boxes.size(); ++k) {
std::cout << "image file: " << image_paths[i + j] << ", ";
std::cout << "predict label: " << results[j].boxes[k].category std::cout << "predict label: " << results[j].boxes[k].category
<< ", label_id:" << results[j].boxes[k].category_id << ", label_id:" << results[j].boxes[k].category_id
<< ", score: " << results[j].boxes[k].score << ", box(xmin, ymin, w, h):(" << ", score: " << results[j].boxes[k].score
<< ", box(xmin, ymin, w, h):("
<< results[j].boxes[k].coordinate[0] << ", " << results[j].boxes[k].coordinate[0] << ", "
<< results[j].boxes[k].coordinate[1] << ", " << results[j].boxes[k].coordinate[1] << ", "
<< results[j].boxes[k].coordinate[2] << ", " << results[j].boxes[k].coordinate[2] << ", "
<< results[j].boxes[k].coordinate[3] << ")" << std::endl; << results[j].boxes[k].coordinate[3] << ")" << std::endl;
} }
} }
// 可视化 // 可视化
for(int j = 0; j < im_vec_size - i; ++j) { for (int j = 0; j < im_vec_size - i; ++j) {
cv::Mat vis_img = cv::Mat vis_img = PaddleX::Visualize(
PaddleX::Visualize(im_vec[j], results[j], model.labels, colormap, FLAGS_threshold); im_vec[j], results[j], model.labels, colormap, FLAGS_threshold);
std::string save_path = std::string save_path =
PaddleX::generate_save_path(FLAGS_save_dir, image_paths[i + j]); PaddleX::generate_save_path(FLAGS_save_dir, image_paths[i + j]);
cv::imwrite(save_path, vis_img); cv::imwrite(save_path, vis_img);
...@@ -121,12 +136,12 @@ int main(int argc, char** argv) { ...@@ -121,12 +136,12 @@ int main(int argc, char** argv) {
cv::Mat im = cv::imread(FLAGS_image, 1); cv::Mat im = cv::imread(FLAGS_image, 1);
model.predict(im, &result); model.predict(im, &result);
for (int i = 0; i < result.boxes.size(); ++i) { for (int i = 0; i < result.boxes.size(); ++i) {
std::cout << "image file: " << FLAGS_image << std::endl; std::cout << "image file: " << FLAGS_image << std::endl;
std::cout << ", predict label: " << result.boxes[i].category std::cout << ", predict label: " << result.boxes[i].category
<< ", label_id:" << result.boxes[i].category_id << ", label_id:" << result.boxes[i].category_id
<< ", score: " << result.boxes[i].score << ", box(xmin, ymin, w, h):(" << ", score: " << result.boxes[i].score
<< result.boxes[i].coordinate[0] << ", " << ", box(xmin, ymin, w, h):(" << result.boxes[i].coordinate[0]
<< result.boxes[i].coordinate[1] << ", " << ", " << result.boxes[i].coordinate[1] << ", "
<< result.boxes[i].coordinate[2] << ", " << result.boxes[i].coordinate[2] << ", "
<< result.boxes[i].coordinate[3] << ")" << std::endl; << result.boxes[i].coordinate[3] << ")" << std::endl;
} }
...@@ -140,18 +155,12 @@ int main(int argc, char** argv) { ...@@ -140,18 +155,12 @@ int main(int argc, char** argv) {
result.clear(); result.clear();
std::cout << "Visualized output saved as " << save_path << std::endl; std::cout << "Visualized output saved as " << save_path << std::endl;
} }
std::cout << "Total running time: " std::cout << "Total running time: " << total_running_time_s
<< total_running_time_s << " s, average running time: " << total_running_time_s / imgs
<< " s, average running time: " << " s/img, total read img time: " << total_imread_time_s
<< total_running_time_s / imgs << " s, average read img time: " << total_imread_time_s / imgs
<< " s/img, total read img time: " << " s, batch_size = " << FLAGS_batch_size << std::endl;
<< total_imread_time_s
<< " s, average read img time: "
<< total_imread_time_s / imgs
<< " s, batch_size = "
<< FLAGS_batch_size
<< std::endl;
return 0; return 0;
} }
...@@ -13,19 +13,19 @@ ...@@ -13,19 +13,19 @@
// limitations under the License. // limitations under the License.
#include <glog/logging.h> #include <glog/logging.h>
#include <omp.h>
#include <algorithm> #include <algorithm>
#include <chrono> #include <chrono> // NOLINT
#include <fstream> #include <fstream>
#include <iostream> #include <iostream>
#include <string> #include <string>
#include <vector> #include <vector>
#include <utility> #include <utility>
#include <omp.h>
#include "include/paddlex/paddlex.h" #include "include/paddlex/paddlex.h"
#include "include/paddlex/visualize.h" #include "include/paddlex/visualize.h"
using namespace std::chrono; using namespace std::chrono; // NOLINT
DEFINE_string(model_dir, "", "Path of inference model"); DEFINE_string(model_dir, "", "Path of inference model");
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU"); DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
...@@ -36,7 +36,9 @@ DEFINE_string(image, "", "Path of test image file"); ...@@ -36,7 +36,9 @@ DEFINE_string(image, "", "Path of test image file");
DEFINE_string(image_list, "", "Path of test image list file"); DEFINE_string(image_list, "", "Path of test image list file");
DEFINE_string(save_dir, "output", "Path to save visualized image"); DEFINE_string(save_dir, "output", "Path to save visualized image");
DEFINE_int32(batch_size, 1, "Batch size of infering"); DEFINE_int32(batch_size, 1, "Batch size of infering");
DEFINE_int32(thread_num, omp_get_num_procs(), "Number of preprocessing threads"); DEFINE_int32(thread_num,
omp_get_num_procs(),
"Number of preprocessing threads");
int main(int argc, char** argv) { int main(int argc, char** argv) {
// 解析命令行参数 // 解析命令行参数
...@@ -53,7 +55,12 @@ int main(int argc, char** argv) { ...@@ -53,7 +55,12 @@ int main(int argc, char** argv) {
// 加载模型 // 加载模型
PaddleX::Model model; PaddleX::Model model;
model.Init(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_use_trt, FLAGS_gpu_id, FLAGS_key, FLAGS_batch_size); model.Init(FLAGS_model_dir,
FLAGS_use_gpu,
FLAGS_use_trt,
FLAGS_gpu_id,
FLAGS_key,
FLAGS_batch_size);
double total_running_time_s = 0.0; double total_running_time_s = 0.0;
double total_imread_time_s = 0.0; double total_imread_time_s = 0.0;
...@@ -72,25 +79,31 @@ int main(int argc, char** argv) { ...@@ -72,25 +79,31 @@ int main(int argc, char** argv) {
image_paths.push_back(image_path); image_paths.push_back(image_path);
} }
imgs = image_paths.size(); imgs = image_paths.size();
for(int i = 0; i < image_paths.size(); i += FLAGS_batch_size){ for (int i = 0; i < image_paths.size(); i += FLAGS_batch_size) {
auto start = system_clock::now(); auto start = system_clock::now();
int im_vec_size = std::min((int)image_paths.size(), i + FLAGS_batch_size); int im_vec_size =
std::min(static_cast<int>(image_paths.size()), i + FLAGS_batch_size);
std::vector<cv::Mat> im_vec(im_vec_size - i); std::vector<cv::Mat> im_vec(im_vec_size - i);
std::vector<PaddleX::SegResult> results(im_vec_size - i, PaddleX::SegResult()); std::vector<PaddleX::SegResult> results(im_vec_size - i,
PaddleX::SegResult());
int thread_num = std::min(FLAGS_thread_num, im_vec_size - i); int thread_num = std::min(FLAGS_thread_num, im_vec_size - i);
#pragma omp parallel for num_threads(thread_num) #pragma omp parallel for num_threads(thread_num)
for(int j = i; j < im_vec_size; ++j){ for (int j = i; j < im_vec_size; ++j) {
im_vec[j - i] = std::move(cv::imread(image_paths[j], 1)); im_vec[j - i] = std::move(cv::imread(image_paths[j], 1));
} }
auto imread_end = system_clock::now(); auto imread_end = system_clock::now();
model.predict(im_vec, results, thread_num); model.predict(im_vec, &results, thread_num);
auto imread_duration = duration_cast<microseconds>(imread_end - start); auto imread_duration = duration_cast<microseconds>(imread_end - start);
total_imread_time_s += double(imread_duration.count()) * microseconds::period::num / microseconds::period::den; total_imread_time_s += static_cast<double>(imread_duration.count()) *
microseconds::period::num /
microseconds::period::den;
auto end = system_clock::now(); auto end = system_clock::now();
auto duration = duration_cast<microseconds>(end - start); auto duration = duration_cast<microseconds>(end - start);
total_running_time_s += double(duration.count()) * microseconds::period::num / microseconds::period::den; total_running_time_s += static_cast<double>(duration.count()) *
microseconds::period::num /
microseconds::period::den;
// 可视化 // 可视化
for(int j = 0; j < im_vec_size - i; ++j) { for (int j = 0; j < im_vec_size - i; ++j) {
cv::Mat vis_img = cv::Mat vis_img =
PaddleX::Visualize(im_vec[j], results[j], model.labels, colormap); PaddleX::Visualize(im_vec[j], results[j], model.labels, colormap);
std::string save_path = std::string save_path =
...@@ -106,7 +119,9 @@ int main(int argc, char** argv) { ...@@ -106,7 +119,9 @@ int main(int argc, char** argv) {
model.predict(im, &result); model.predict(im, &result);
auto end = system_clock::now(); auto end = system_clock::now();
auto duration = duration_cast<microseconds>(end - start); auto duration = duration_cast<microseconds>(end - start);
total_running_time_s += double(duration.count()) * microseconds::period::num / microseconds::period::den; total_running_time_s += static_cast<double>(duration.count()) *
microseconds::period::num /
microseconds::period::den;
// 可视化 // 可视化
cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels, colormap); cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels, colormap);
std::string save_path = std::string save_path =
...@@ -115,17 +130,11 @@ int main(int argc, char** argv) { ...@@ -115,17 +130,11 @@ int main(int argc, char** argv) {
result.clear(); result.clear();
std::cout << "Visualized output saved as " << save_path << std::endl; std::cout << "Visualized output saved as " << save_path << std::endl;
} }
std::cout << "Total running time: " std::cout << "Total running time: " << total_running_time_s
<< total_running_time_s << " s, average running time: " << total_running_time_s / imgs
<< " s, average running time: " << " s/img, total read img time: " << total_imread_time_s
<< total_running_time_s / imgs << " s, average read img time: " << total_imread_time_s / imgs
<< " s/img, total read img time: " << " s, batch_size = " << FLAGS_batch_size << std::endl;
<< total_imread_time_s
<< " s, average read img time: "
<< total_imread_time_s / imgs
<< " s, batch_size = "
<< FLAGS_batch_size
<< std::endl;
return 0; return 0;
} }
...@@ -54,4 +54,4 @@ class ConfigPaser { ...@@ -54,4 +54,4 @@ class ConfigPaser {
YAML::Node Transforms_; YAML::Node Transforms_;
}; };
} // namespace PaddleDetection } // namespace PaddleX
...@@ -16,8 +16,11 @@ ...@@ -16,8 +16,11 @@
#include <functional> #include <functional>
#include <iostream> #include <iostream>
#include <map>
#include <memory>
#include <numeric> #include <numeric>
#include <string>
#include <vector>
#include "yaml-cpp/yaml.h" #include "yaml-cpp/yaml.h"
#ifdef _WIN32 #ifdef _WIN32
...@@ -28,21 +31,21 @@ ...@@ -28,21 +31,21 @@
#include "paddle_inference_api.h" // NOLINT #include "paddle_inference_api.h" // NOLINT
#include "config_parser.h" #include "config_parser.h" // NOLINT
#include "results.h" #include "results.h" // NOLINT
#include "transforms.h" #include "transforms.h" // NOLINT
#ifdef WITH_ENCRYPTION #ifdef WITH_ENCRYPTION
#include "paddle_model_decrypt.h" #include "paddle_model_decrypt.h" // NOLINT
#include "model_code.h" #include "model_code.h" // NOLINT
#endif #endif
namespace PaddleX { namespace PaddleX {
/* /*
* @brief * @brief
* This class encapsulates all necessary proccess steps of model infering, which * This class encapsulates all necessary proccess steps of model infering, which
* include image matrix preprocessing, model predicting and results postprocessing. * include image matrix preprocessing, model predicting and results postprocessing.
* The entire process of model infering can be simplified as below: * The entire process of model infering can be simplified as below:
* 1. preprocess image matrix (resize, padding, ......) * 1. preprocess image matrix (resize, padding, ......)
* 2. model infer * 2. model infer
...@@ -63,11 +66,11 @@ class Model { ...@@ -63,11 +66,11 @@ class Model {
/* /*
* @brief * @brief
* This method aims to initialize the model configuration * This method aims to initialize the model configuration
* *
* @param model_dir: the directory which contains model.yml * @param model_dir: the directory which contains model.yml
* @param use_gpu: use gpu or not when infering * @param use_gpu: use gpu or not when infering
* @param use_trt: use Tensor RT or not when infering * @param use_trt: use Tensor RT or not when infering
* @param gpu_id: the id of gpu when infering with using gpu * @param gpu_id: the id of gpu when infering with using gpu
* @param key: the key of encryption when using encrypted model * @param key: the key of encryption when using encrypted model
* @param batch_size: batch size of infering * @param batch_size: batch size of infering
* */ * */
...@@ -76,7 +79,7 @@ class Model { ...@@ -76,7 +79,7 @@ class Model {
bool use_trt = false, bool use_trt = false,
int gpu_id = 0, int gpu_id = 0,
std::string key = "", std::string key = "",
int batch_size = 1) { int batch_size = 1) {
create_predictor(model_dir, use_gpu, use_trt, gpu_id, key, batch_size); create_predictor(model_dir, use_gpu, use_trt, gpu_id, key, batch_size);
} }
...@@ -85,11 +88,11 @@ class Model { ...@@ -85,11 +88,11 @@ class Model {
bool use_trt = false, bool use_trt = false,
int gpu_id = 0, int gpu_id = 0,
std::string key = "", std::string key = "",
int batch_size = 1); int batch_size = 1);
/* /*
* @brief * @brief
* This method aims to load model configurations which include * This method aims to load model configurations which include
* transform steps and label list * transform steps and label list
* *
* @param model_dir: the directory which contains model.yml * @param model_dir: the directory which contains model.yml
...@@ -107,7 +110,7 @@ class Model { ...@@ -107,7 +110,7 @@ class Model {
* @return true if preprocess image matrix successfully * @return true if preprocess image matrix successfully
* */ * */
bool preprocess(const cv::Mat& input_im, ImageBlob* blob); bool preprocess(const cv::Mat& input_im, ImageBlob* blob);
/* /*
* @brief * @brief
* This method aims to transform mutiple image matrixs, the result will be * This method aims to transform mutiple image matrixs, the result will be
...@@ -115,15 +118,17 @@ class Model { ...@@ -115,15 +118,17 @@ class Model {
* *
* @param input_im_batch: a batch of image matrixs to be transformed * @param input_im_batch: a batch of image matrixs to be transformed
* @param blob_blob: raw data of a batch of image matrixs after transformed * @param blob_blob: raw data of a batch of image matrixs after transformed
* @param thread_num: the number of preprocessing threads, * @param thread_num: the number of preprocessing threads,
* each thread run preprocess on single image matrix * each thread run preprocess on single image matrix
* @return true if preprocess a batch of image matrixs successfully * @return true if preprocess a batch of image matrixs successfully
* */ * */
bool preprocess(const std::vector<cv::Mat> &input_im_batch, std::vector<ImageBlob> &blob_batch, int thread_num = 1); bool preprocess(const std::vector<cv::Mat> &input_im_batch,
std::vector<ImageBlob> *blob_batch,
int thread_num = 1);
/* /*
* @brief * @brief
* This method aims to execute classification model prediction on single image matrix, * This method aims to execute classification model prediction on single image matrix,
* the result will be returned at second parameter. * the result will be returned at second parameter.
* *
* @param im: single image matrix to be predicted * @param im: single image matrix to be predicted
...@@ -134,7 +139,7 @@ class Model { ...@@ -134,7 +139,7 @@ class Model {
/* /*
* @brief * @brief
* This method aims to execute classification model prediction on a batch of image matrixs, * This method aims to execute classification model prediction on a batch of image matrixs,
* the result will be returned at second parameter. * the result will be returned at second parameter.
* *
* @param im: a batch of image matrixs to be predicted * @param im: a batch of image matrixs to be predicted
...@@ -143,7 +148,9 @@ class Model { ...@@ -143,7 +148,9 @@ class Model {
* on single image matrix * on single image matrix
* @return true if predict successfully * @return true if predict successfully
* */ * */
bool predict(const std::vector<cv::Mat> &im_batch, std::vector<ClsResult> &results, int thread_num = 1); bool predict(const std::vector<cv::Mat> &im_batch,
std::vector<ClsResult> *results,
int thread_num = 1);
/* /*
* @brief * @brief
...@@ -167,11 +174,13 @@ class Model { ...@@ -167,11 +174,13 @@ class Model {
* on single image matrix * on single image matrix
* @return true if predict successfully * @return true if predict successfully
* */ * */
bool predict(const std::vector<cv::Mat> &im_batch, std::vector<DetResult> &result, int thread_num = 1); bool predict(const std::vector<cv::Mat> &im_batch,
std::vector<DetResult> *result,
int thread_num = 1);
/* /*
* @brief * @brief
* This method aims to execute segmentation model prediction on single image matrix, * This method aims to execute segmentation model prediction on single image matrix,
* the result will be returned at second parameter. * the result will be returned at second parameter.
* *
* @param im: single image matrix to be predicted * @param im: single image matrix to be predicted
...@@ -182,7 +191,7 @@ class Model { ...@@ -182,7 +191,7 @@ class Model {
/* /*
* @brief * @brief
* This method aims to execute segmentation model prediction on a batch of image matrix, * This method aims to execute segmentation model prediction on a batch of image matrix,
* the result will be returned at second parameter. * the result will be returned at second parameter.
* *
* @param im: a batch of image matrix to be predicted * @param im: a batch of image matrix to be predicted
...@@ -191,8 +200,10 @@ class Model { ...@@ -191,8 +200,10 @@ class Model {
* on single image matrix * on single image matrix
* @return true if predict successfully * @return true if predict successfully
* */ * */
bool predict(const std::vector<cv::Mat> &im_batch, std::vector<SegResult> &result, int thread_num = 1); bool predict(const std::vector<cv::Mat> &im_batch,
std::vector<SegResult> *result,
int thread_num = 1);
// model type, include 3 type: classifier, detector, segmenter // model type, include 3 type: classifier, detector, segmenter
std::string type; std::string type;
// model name, such as FasterRCNN, YOLOV3 and so on. // model name, such as FasterRCNN, YOLOV3 and so on.
...@@ -209,4 +220,4 @@ class Model { ...@@ -209,4 +220,4 @@ class Model {
// a predictor which run the model predicting // a predictor which run the model predicting
std::unique_ptr<paddle::PaddlePredictor> predictor_; std::unique_ptr<paddle::PaddlePredictor> predictor_;
}; };
} // namespce of PaddleX } // namespace PaddleX
...@@ -66,7 +66,7 @@ class Transform { ...@@ -66,7 +66,7 @@ class Transform {
* This method executes preprocessing operation on image matrix, * This method executes preprocessing operation on image matrix,
* result will be returned at second parameter. * result will be returned at second parameter.
* @param im: single image matrix to be preprocessed * @param im: single image matrix to be preprocessed
* @param data: the raw data of single image matrix after preprocessed * @param data: the raw data of single image matrix after preprocessed
* @return true if transform successfully * @return true if transform successfully
* */ * */
virtual bool Run(cv::Mat* im, ImageBlob* data) = 0; virtual bool Run(cv::Mat* im, ImageBlob* data) = 0;
...@@ -92,10 +92,10 @@ class Normalize : public Transform { ...@@ -92,10 +92,10 @@ class Normalize : public Transform {
/* /*
* @brief * @brief
* This class execute resize by short operation on image matrix. At first, it resizes * This class execute resize by short operation on image matrix. At first, it resizes
* the short side of image matrix to specified length. Accordingly, the long side * the short side of image matrix to specified length. Accordingly, the long side
* will be resized in the same proportion. If new length of long side exceeds max * will be resized in the same proportion. If new length of long side exceeds max
* size, the long size will be resized to max size, and the short size will be * size, the long size will be resized to max size, and the short size will be
* resized in the same proportion * resized in the same proportion
* */ * */
class ResizeByShort : public Transform { class ResizeByShort : public Transform {
...@@ -214,6 +214,7 @@ class Padding : public Transform { ...@@ -214,6 +214,7 @@ class Padding : public Transform {
} }
} }
virtual bool Run(cv::Mat* im, ImageBlob* data); virtual bool Run(cv::Mat* im, ImageBlob* data);
private: private:
int coarsest_stride_ = -1; int coarsest_stride_ = -1;
int width_ = 0; int width_ = 0;
...@@ -229,6 +230,7 @@ class Transforms { ...@@ -229,6 +230,7 @@ class Transforms {
void Init(const YAML::Node& node, bool to_rgb = true); void Init(const YAML::Node& node, bool to_rgb = true);
std::shared_ptr<Transform> CreateTransform(const std::string& name); std::shared_ptr<Transform> CreateTransform(const std::string& name);
bool Run(cv::Mat* im, ImageBlob* data); bool Run(cv::Mat* im, ImageBlob* data);
private: private:
std::vector<std::shared_ptr<Transform>> transforms_; std::vector<std::shared_ptr<Transform>> transforms_;
bool to_rgb_ = true; bool to_rgb_ = true;
......
...@@ -47,7 +47,7 @@ namespace PaddleX { ...@@ -47,7 +47,7 @@ namespace PaddleX {
* @brief * @brief
* Generate visualization colormap for each class * Generate visualization colormap for each class
* *
* @param number of class * @param number of class
* @return color map, the size of vector is 3 * num_class * @return color map, the size of vector is 3 * num_class
* */ * */
std::vector<int> GenerateColorMap(int num_class); std::vector<int> GenerateColorMap(int num_class);
...@@ -94,4 +94,4 @@ cv::Mat Visualize(const cv::Mat& img, ...@@ -94,4 +94,4 @@ cv::Mat Visualize(const cv::Mat& img,
* */ * */
std::string generate_save_path(const std::string& save_dir, std::string generate_save_path(const std::string& save_dir,
const std::string& file_path); const std::string& file_path);
} // namespce of PaddleX } // namespace PaddleX
此差异已折叠。
...@@ -145,4 +145,4 @@ std::string generate_save_path(const std::string& save_dir, ...@@ -145,4 +145,4 @@ std::string generate_save_path(const std::string& save_dir,
std::string image_name(file_path.substr(pos + 1)); std::string image_name(file_path.substr(pos + 1));
return save_dir + OS_PATH_SEP + image_name; return save_dir + OS_PATH_SEP + image_name;
} }
} // namespace of PaddleX } // namespace PaddleX
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册