From 398b7e468f1e0cf4b47cca5a061b2baeb82d2a9e Mon Sep 17 00:00:00 2001 From: jack <136876878@qq.com> Date: Mon, 13 Jul 2020 19:24:12 +0800 Subject: [PATCH] remove time record --- deploy/cpp/demo/classifier.cpp | 26 -------------------------- deploy/cpp/demo/detector.cpp | 27 --------------------------- deploy/cpp/demo/segmenter.cpp | 27 --------------------------- 3 files changed, 80 deletions(-) diff --git a/deploy/cpp/demo/classifier.cpp b/deploy/cpp/demo/classifier.cpp index 6fd354d..db36874 100644 --- a/deploy/cpp/demo/classifier.cpp +++ b/deploy/cpp/demo/classifier.cpp @@ -62,8 +62,6 @@ int main(int argc, char** argv) { FLAGS_use_ir_optim); // 进行预测 - double total_running_time_s = 0.0; - double total_imread_time_s = 0.0; int imgs = 1; if (FLAGS_image_list != "") { std::ifstream inf(FLAGS_image_list); @@ -79,7 +77,6 @@ int main(int argc, char** argv) { } imgs = image_paths.size(); for (int i = 0; i < image_paths.size(); i += FLAGS_batch_size) { - auto start = system_clock::now(); // 读图像 int im_vec_size = std::min(static_cast(image_paths.size()), i + FLAGS_batch_size); @@ -91,19 +88,7 @@ int main(int argc, char** argv) { for (int j = i; j < im_vec_size; ++j) { im_vec[j - i] = std::move(cv::imread(image_paths[j], 1)); } - auto imread_end = system_clock::now(); model.predict(im_vec, &results, thread_num); - - auto imread_duration = duration_cast(imread_end - start); - total_imread_time_s += static_cast(imread_duration.count()) * - microseconds::period::num / - microseconds::period::den; - - auto end = system_clock::now(); - auto duration = duration_cast(end - start); - total_running_time_s += static_cast(duration.count()) * - microseconds::period::num / - microseconds::period::den; for (int j = i; j < im_vec_size; ++j) { std::cout << "Path:" << image_paths[j] << ", predict label: " << results[j - i].category @@ -112,23 +97,12 @@ int main(int argc, char** argv) { } } } else { - auto start = system_clock::now(); PaddleX::ClsResult result; cv::Mat im = cv::imread(FLAGS_image, 1); model.predict(im, &result); - auto end = system_clock::now(); - auto duration = duration_cast(end - start); - total_running_time_s += static_cast(duration.count()) * - microseconds::period::num / - microseconds::period::den; std::cout << "Predict label: " << result.category << ", label_id:" << result.category_id << ", score: " << result.score << std::endl; } - std::cout << "Total running time: " << total_running_time_s - << " s, average running time: " << total_running_time_s / imgs - << " s/img, total read img time: " << total_imread_time_s - << " s, average read time: " << total_imread_time_s / imgs - << " s/img, batch_size = " << FLAGS_batch_size << std::endl; return 0; } diff --git a/deploy/cpp/demo/detector.cpp b/deploy/cpp/demo/detector.cpp index dc89396..32fbaaf 100644 --- a/deploy/cpp/demo/detector.cpp +++ b/deploy/cpp/demo/detector.cpp @@ -65,11 +65,7 @@ int main(int argc, char** argv) { FLAGS_gpu_id, FLAGS_key, FLAGS_use_ir_optim); - - double total_running_time_s = 0.0; - double total_imread_time_s = 0.0; int imgs = 1; - auto colormap = PaddleX::GenerateColorMap(model.labels.size()); std::string save_dir = "output"; // 进行预测 if (FLAGS_image_list != "") { @@ -85,7 +81,6 @@ int main(int argc, char** argv) { } imgs = image_paths.size(); for (int i = 0; i < image_paths.size(); i += FLAGS_batch_size) { - auto start = system_clock::now(); int im_vec_size = std::min(static_cast(image_paths.size()), i + FLAGS_batch_size); std::vector im_vec(im_vec_size - i); @@ -96,17 +91,7 @@ int main(int argc, char** argv) { for (int j = i; j < im_vec_size; ++j) { im_vec[j - i] = std::move(cv::imread(image_paths[j], 1)); } - auto imread_end = system_clock::now(); model.predict(im_vec, &results, thread_num); - auto imread_duration = duration_cast(imread_end - start); - total_imread_time_s += static_cast(imread_duration.count()) * - microseconds::period::num / - microseconds::period::den; - auto end = system_clock::now(); - auto duration = duration_cast(end - start); - total_running_time_s += static_cast(duration.count()) * - microseconds::period::num / - microseconds::period::den; // 输出结果目标框 for (int j = 0; j < im_vec_size - i; ++j) { for (int k = 0; k < results[j].boxes.size(); ++k) { @@ -132,15 +117,9 @@ int main(int argc, char** argv) { } } } else { - auto start = system_clock::now(); PaddleX::DetResult result; cv::Mat im = cv::imread(FLAGS_image, 1); model.predict(im, &result); - auto end = system_clock::now(); - auto duration = duration_cast(end - start); - total_running_time_s += static_cast(duration.count()) * - microseconds::period::num / - microseconds::period::den; // 输出结果目标框 for (int i = 0; i < result.boxes.size(); ++i) { std::cout << "image file: " << FLAGS_image << std::endl; @@ -163,11 +142,5 @@ int main(int argc, char** argv) { std::cout << "Visualized output saved as " << save_path << std::endl; } - std::cout << "Total running time: " << total_running_time_s - << " s, average running time: " << total_running_time_s / imgs - << " s/img, total read img time: " << total_imread_time_s - << " s, average read img time: " << total_imread_time_s / imgs - << " s, batch_size = " << FLAGS_batch_size << std::endl; - return 0; } diff --git a/deploy/cpp/demo/segmenter.cpp b/deploy/cpp/demo/segmenter.cpp index 4b3905a..b3b8fad 100644 --- a/deploy/cpp/demo/segmenter.cpp +++ b/deploy/cpp/demo/segmenter.cpp @@ -62,11 +62,7 @@ int main(int argc, char** argv) { FLAGS_gpu_id, FLAGS_key, FLAGS_use_ir_optim); - - double total_running_time_s = 0.0; - double total_imread_time_s = 0.0; int imgs = 1; - auto colormap = PaddleX::GenerateColorMap(model.labels.size()); // 进行预测 if (FLAGS_image_list != "") { std::ifstream inf(FLAGS_image_list); @@ -81,7 +77,6 @@ int main(int argc, char** argv) { } imgs = image_paths.size(); for (int i = 0; i < image_paths.size(); i += FLAGS_batch_size) { - auto start = system_clock::now(); int im_vec_size = std::min(static_cast(image_paths.size()), i + FLAGS_batch_size); std::vector im_vec(im_vec_size - i); @@ -92,17 +87,7 @@ int main(int argc, char** argv) { for (int j = i; j < im_vec_size; ++j) { im_vec[j - i] = std::move(cv::imread(image_paths[j], 1)); } - auto imread_end = system_clock::now(); model.predict(im_vec, &results, thread_num); - auto imread_duration = duration_cast(imread_end - start); - total_imread_time_s += static_cast(imread_duration.count()) * - microseconds::period::num / - microseconds::period::den; - auto end = system_clock::now(); - auto duration = duration_cast(end - start); - total_running_time_s += static_cast(duration.count()) * - microseconds::period::num / - microseconds::period::den; // 可视化 for (int j = 0; j < im_vec_size - i; ++j) { cv::Mat vis_img = @@ -114,15 +99,9 @@ int main(int argc, char** argv) { } } } else { - auto start = system_clock::now(); PaddleX::SegResult result; cv::Mat im = cv::imread(FLAGS_image, 1); model.predict(im, &result); - auto end = system_clock::now(); - auto duration = duration_cast(end - start); - total_running_time_s += static_cast(duration.count()) * - microseconds::period::num / - microseconds::period::den; // 可视化 cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels); std::string save_path = @@ -131,11 +110,5 @@ int main(int argc, char** argv) { result.clear(); std::cout << "Visualized output saved as " << save_path << std::endl; } - std::cout << "Total running time: " << total_running_time_s - << " s, average running time: " << total_running_time_s / imgs - << " s/img, total read img time: " << total_imread_time_s - << " s, average read img time: " << total_imread_time_s / imgs - << " s, batch_size = " << FLAGS_batch_size << std::endl; - return 0; } -- GitLab