未验证 提交 df98ac61 编写于 作者: Z zhiboniu 提交者: GitHub

update BenchmarkLog and fix some bug (#4383)

上级 97373901
......@@ -79,17 +79,32 @@ void PrintBenchmarkLog(std::vector<double> det_time, int img_num){
LOG(INFO) << "cpu_math_library_num_threads: " << FLAGS_cpu_threads;
LOG(INFO) << "----------------------- Data info -----------------------";
LOG(INFO) << "batch_size: " << FLAGS_batch_size;
LOG(INFO) << "batch_size_keypoint: " << FLAGS_batch_size_keypoint;
LOG(INFO) << "input_shape: " << "dynamic shape";
LOG(INFO) << "----------------------- Model info -----------------------";
FLAGS_model_dir.erase(FLAGS_model_dir.find_last_not_of("/") + 1);
LOG(INFO) << "model_name: " << FLAGS_model_dir.substr(FLAGS_model_dir.find_last_of('/') + 1);
LOG(INFO) << "model_name: " << FLAGS_model_dir;
LOG(INFO) << "----------------------- Perf info ------------------------";
LOG(INFO) << "Total number of predicted data: " << img_num
<< " and total time spent(ms): "
<< std::accumulate(det_time.begin(), det_time.end(), 0.);
img_num = std::max(1, img_num);
LOG(INFO) << "preproce_time(ms): " << det_time[0] / img_num
<< ", inference_time(ms): " << det_time[1] / img_num
<< ", postprocess_time(ms): " << det_time[2] / img_num;
}
void PrintKptsBenchmarkLog(std::vector<double> det_time, int img_num){
LOG(INFO) << "----------------------- Data info -----------------------";
LOG(INFO) << "batch_size_keypoint: " << FLAGS_batch_size_keypoint;
LOG(INFO) << "----------------------- Model info -----------------------";
FLAGS_model_dir_keypoint.erase(FLAGS_model_dir_keypoint.find_last_not_of("/") + 1);
LOG(INFO) << "model_name: " << FLAGS_model_dir_keypoint.substr(FLAGS_model_dir_keypoint.find_last_of('/') + 1);
LOG(INFO) << "keypoint_model_name: " << FLAGS_model_dir_keypoint;
LOG(INFO) << "----------------------- Perf info ------------------------";
LOG(INFO) << "Total number of predicted data: " << img_num
<< " and total time spent(ms): "
<< std::accumulate(det_time.begin(), det_time.end(), 0);
<< std::accumulate(det_time.begin(), det_time.end(), 0.);
img_num = std::max(1, img_num);
LOG(INFO) << "Average time cost per person:";
LOG(INFO) << "preproce_time(ms): " << det_time[0] / img_num
<< ", inference_time(ms): " << det_time[1] / img_num
<< ", postprocess_time(ms): " << det_time[2] / img_num;
......@@ -424,7 +439,9 @@ void PredictImage(const std::vector<std::string> all_img_paths,
det_t[2] += det_times[2];
}
PrintBenchmarkLog(det_t, all_img_paths.size());
PrintBenchmarkLog(keypoint_t, kpts_imgs);
if (keypoint) {
PrintKptsBenchmarkLog(keypoint_t, kpts_imgs);
}
}
int main(int argc, char** argv) {
......
......@@ -193,7 +193,7 @@ make ARM_ABI = arm8
5. 准备优化后的模型、预测库文件、测试图像。
```shell
mdkir deploy
mkdir deploy
cp main *runtime_config.json deploy/
cd deploy
mkdir model_det
......
......@@ -39,34 +39,47 @@ void PrintBenchmarkLog(std::vector<double> det_time, int img_num) {
<< std::endl;
std::cout << "batch_size_det: " << RT_Config["batch_size_det"].as<int>()
<< std::endl;
std::cout << "batch_size_keypoint: "
<< RT_Config["batch_size_keypoint"].as<int>() << std::endl;
std::cout << "----------------------- Model info -----------------------"
<< std::endl;
RT_Config["model_dir_det"].as<std::string>().erase(
RT_Config["model_dir_det"].as<std::string>().find_last_not_of("/") + 1);
std::cout
<< "detection model_name: "
<< RT_Config["model_dir_det"].as<std::string>().substr(
RT_Config["model_dir_det"].as<std::string>().find_last_of('/') + 1)
<< RT_Config["model_dir_det"].as<std::string>()
<< std::endl;
std::cout << "----------------------- Perf info ------------------------"
<< std::endl;
std::cout << "Total number of predicted data: " << img_num
<< " and total time spent(ms): "
<< std::accumulate(det_time.begin(), det_time.end(), 0.)
<< std::endl;
img_num = std::max(1, img_num);
std::cout << "preproce_time(ms): " << det_time[0] / img_num
<< ", inference_time(ms): " << det_time[1] / img_num
<< ", postprocess_time(ms): " << det_time[2] / img_num << std::endl;
}
void PrintKptsBenchmarkLog(std::vector<double> det_time, int img_num){
std::cout << "----------------------- Data info -----------------------"
<< std::endl;
std::cout << "batch_size_keypoint: "
<< RT_Config["batch_size_keypoint"].as<int>() << std::endl;
std::cout << "----------------------- Model info -----------------------"
<< std::endl;
RT_Config["model_dir_keypoint"].as<std::string>().erase(
RT_Config["model_dir_keypoint"].as<std::string>().find_last_not_of("/") +
1);
std::cout
<< "keypoint model_name: "
<< RT_Config["model_dir_keypoint"].as<std::string>().substr(
RT_Config["model_dir_keypoint"].as<std::string>().find_last_of(
'/') +
1)
<< std::endl;
<< RT_Config["model_dir_keypoint"].as<std::string>() << std::endl;
std::cout << "----------------------- Perf info ------------------------"
<< std::endl;
std::cout << "Total number of predicted data: " << img_num
<< " and total time spent(ms): "
<< std::accumulate(det_time.begin(), det_time.end(), 0)
<< std::endl;
std::cout << "preproce_time(ms): " << det_time[0] / img_num
<< std::accumulate(det_time.begin(), det_time.end(), 0.) << std::endl;
img_num = std::max(1, img_num);
std::cout << "Average time cost per person:" << std::endl
<< "preproce_time(ms): " << det_time[0] / img_num
<< ", inference_time(ms): " << det_time[1] / img_num
<< ", postprocess_time(ms): " << det_time[2] / img_num << std::endl;
}
......@@ -284,7 +297,7 @@ void PredictImage(const std::vector<std::string> all_img_paths,
}
PrintBenchmarkLog(det_t, all_img_paths.size());
if (keypoint) {
PrintBenchmarkLog(keypoint_t, kpts_imgs);
PrintKptsBenchmarkLog(keypoint_t, kpts_imgs);
PrintTotalIimeLog((det_t[0] + det_t[1] + det_t[2]) / all_img_paths.size(),
(keypoint_t[0] + keypoint_t[1] + keypoint_t[2]) / all_img_paths.size(),
midtimecost / all_img_paths.size());
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册