From b4fa3dbda379684b24124c24fef27be93aa9b412 Mon Sep 17 00:00:00 2001 From: luotao1 Date: Wed, 5 Sep 2018 17:46:55 +0800 Subject: [PATCH] unify PrintTime of analysis unit-test --- .../inference/analysis/analyzer_lac_tester.cc | 10 ++------- .../inference/analysis/analyzer_ner_tester.cc | 6 +---- .../analysis/test_text_classification.cc | 22 ++++--------------- 3 files changed, 7 insertions(+), 31 deletions(-) diff --git a/paddle/fluid/inference/analysis/analyzer_lac_tester.cc b/paddle/fluid/inference/analysis/analyzer_lac_tester.cc index e2f7253ac04..3bb5d9462f5 100644 --- a/paddle/fluid/inference/analysis/analyzer_lac_tester.cc +++ b/paddle/fluid/inference/analysis/analyzer_lac_tester.cc @@ -114,12 +114,6 @@ void GetOneBatch(std::vector *input_slots, DataRecord *data, PADDLE_ENFORCE_EQ(batch_size, static_cast(one_batch.lod.size() - 1)); input_slots->assign({input_tensor}); } -static void PrintTime(const double latency, const int bs, const int repeat) { - LOG(INFO) << "===========profile result==========="; - LOG(INFO) << "batch_size: " << bs << ", repeat: " << repeat - << ", avg latency: " << latency / repeat << "ms"; - LOG(INFO) << "====================================="; -} void BenchAllData(const std::string &model_path, const std::string &data_file, const int batch_size, const int repeat) { NativeConfig config; @@ -145,7 +139,7 @@ void BenchAllData(const std::string &model_path, const std::string &data_file, sum += timer.toc(); } } - PrintTime(sum, batch_size, repeat); + PrintTime(batch_size, repeat, 1, 0, sum / repeat); } const int64_t lac_ref_data[] = {24, 25, 25, 25, 38, 30, 31, 14, 15, 44, 24, 25, 25, 25, 25, 25, 44, 24, 25, 25, 25, 36, 42, 43, @@ -176,7 +170,7 @@ void TestLACPrediction(const std::string &model_path, for (int i = 0; i < repeat; i++) { predictor->Run(input_slots, &outputs_slots); } - PrintTime(timer.toc(), batch_size, repeat); + PrintTime(batch_size, repeat, 1, 0, timer.toc() / repeat); EXPECT_EQ(outputs_slots.size(), 1UL); auto &out = outputs_slots[0]; size_t size = std::accumulate(out.shape.begin(), out.shape.end(), 1, diff --git a/paddle/fluid/inference/analysis/analyzer_ner_tester.cc b/paddle/fluid/inference/analysis/analyzer_ner_tester.cc index 720a8811db7..9c8fcf84fee 100644 --- a/paddle/fluid/inference/analysis/analyzer_ner_tester.cc +++ b/paddle/fluid/inference/analysis/analyzer_ner_tester.cc @@ -130,11 +130,7 @@ void TestChineseNERPrediction() { for (int i = 0; i < FLAGS_repeat; i++) { predictor->Run(input_slots, &outputs); } - LOG(INFO) << "===========profile result==========="; - LOG(INFO) << "batch_size: " << FLAGS_batch_size - << ", repeat: " << FLAGS_repeat - << ", latency: " << timer.toc() / FLAGS_repeat << "ms"; - LOG(INFO) << "====================================="; + PrintTime(FLAGS_batch_size, FLAGS_repeat, 1, 0, timer.toc() / FLAGS_repeat); PADDLE_ENFORCE(outputs.size(), 1UL); auto &out = outputs[0]; diff --git a/paddle/fluid/inference/analysis/test_text_classification.cc b/paddle/fluid/inference/analysis/test_text_classification.cc index 2913824f623..191b41e9883 100644 --- a/paddle/fluid/inference/analysis/test_text_classification.cc +++ b/paddle/fluid/inference/analysis/test_text_classification.cc @@ -18,8 +18,8 @@ #include "paddle/fluid/framework/ir/pass.h" #include "paddle/fluid/inference/analysis/analyzer.h" #include "paddle/fluid/inference/analysis/ut_helper.h" +#include "paddle/fluid/inference/api/helper.h" #include "paddle/fluid/inference/api/paddle_inference_api.h" -#include "paddle/fluid/inference/api/timer.h" DEFINE_string(infer_model, "", "Directory of the inference model."); DEFINE_string(infer_data, "", "Path of the dataset."); @@ -27,22 +27,7 @@ DEFINE_int32(batch_size, 1, "batch size."); DEFINE_int32(repeat, 1, "How many times to repeat run."); namespace paddle { - -template -std::string to_string(const std::vector &vec) { - std::stringstream ss; - for (const auto &c : vec) { - ss << c << " "; - } - return ss.str(); -} - -void PrintTime(const double latency, const int bs, const int repeat) { - LOG(INFO) << "===========profile result==========="; - LOG(INFO) << "batch_size: " << bs << ", repeat: " << repeat - << ", avg latency: " << latency / repeat << "ms"; - LOG(INFO) << "====================================="; -} +namespace inference { void Main(int batch_size) { // Three sequence inputs. @@ -78,7 +63,7 @@ void Main(int batch_size) { CHECK(predictor->Run(input_slots, &output_slots)); sum += timer.toc(); } - PrintTime(sum, batch_size, FLAGS_repeat); + PrintTime(batch_size, FLAGS_repeat, 1, 0, sum / FLAGS_repeat); // Get output LOG(INFO) << "get outputs " << output_slots.size(); @@ -99,6 +84,7 @@ void Main(int batch_size) { TEST(text_classification, basic) { Main(FLAGS_batch_size); } +} // namespace inference } // namespace paddle USE_PASS(fc_fuse_pass); -- GitLab