diff --git a/paddle/fluid/inference/analysis/analyzer_lac_tester.cc b/paddle/fluid/inference/analysis/analyzer_lac_tester.cc index 5740faa746a3161965f525753b893918c3fd4d2e..79171524283eae2473b732e8a99edd5085b70dc0 100644 --- a/paddle/fluid/inference/analysis/analyzer_lac_tester.cc +++ b/paddle/fluid/inference/analysis/analyzer_lac_tester.cc @@ -192,7 +192,7 @@ void TestLACPrediction(const std::string &model_path, sum += timer.toc(); } } - PrintTime(sum, batch_size, repeat); + PrintTime(batch_size, repeat, 1, 0, sum / batch_size); return; } timer.tic(); diff --git a/paddle/fluid/inference/api/helper.h b/paddle/fluid/inference/api/helper.h index 2c2ac656e8005369bb0e9033236a431cb09caa15..0ab2542f34e68c77b20d75b1c0840e957d7b0ccd 100644 --- a/paddle/fluid/inference/api/helper.h +++ b/paddle/fluid/inference/api/helper.h @@ -124,9 +124,11 @@ std::string DescribeTensor(const PaddleTensor &tensor) { void PrintTime(int batch_size, int repeat, int num_threads, int tid, double latency) { + LOG(INFO) << "====================================="; LOG(INFO) << "batch_size: " << batch_size << ", repeat: " << repeat << ", threads: " << num_threads << ", thread id: " << tid << ", latency: " << latency << "ms"; + LOG(INFO) << "====================================="; } } // namespace inference