You need to sign in or sign up before continuing.
提交 b4fa3dbd 编写于 作者: L luotao1

unify PrintTime of analysis unit-test

上级 f615ba2f
...@@ -114,12 +114,6 @@ void GetOneBatch(std::vector<PaddleTensor> *input_slots, DataRecord *data, ...@@ -114,12 +114,6 @@ void GetOneBatch(std::vector<PaddleTensor> *input_slots, DataRecord *data,
PADDLE_ENFORCE_EQ(batch_size, static_cast<int>(one_batch.lod.size() - 1)); PADDLE_ENFORCE_EQ(batch_size, static_cast<int>(one_batch.lod.size() - 1));
input_slots->assign({input_tensor}); input_slots->assign({input_tensor});
} }
static void PrintTime(const double latency, const int bs, const int repeat) {
LOG(INFO) << "===========profile result===========";
LOG(INFO) << "batch_size: " << bs << ", repeat: " << repeat
<< ", avg latency: " << latency / repeat << "ms";
LOG(INFO) << "=====================================";
}
void BenchAllData(const std::string &model_path, const std::string &data_file, void BenchAllData(const std::string &model_path, const std::string &data_file,
const int batch_size, const int repeat) { const int batch_size, const int repeat) {
NativeConfig config; NativeConfig config;
...@@ -145,7 +139,7 @@ void BenchAllData(const std::string &model_path, const std::string &data_file, ...@@ -145,7 +139,7 @@ void BenchAllData(const std::string &model_path, const std::string &data_file,
sum += timer.toc(); sum += timer.toc();
} }
} }
PrintTime(sum, batch_size, repeat); PrintTime(batch_size, repeat, 1, 0, sum / repeat);
} }
const int64_t lac_ref_data[] = {24, 25, 25, 25, 38, 30, 31, 14, 15, 44, 24, 25, const int64_t lac_ref_data[] = {24, 25, 25, 25, 38, 30, 31, 14, 15, 44, 24, 25,
25, 25, 25, 25, 44, 24, 25, 25, 25, 36, 42, 43, 25, 25, 25, 25, 44, 24, 25, 25, 25, 36, 42, 43,
...@@ -176,7 +170,7 @@ void TestLACPrediction(const std::string &model_path, ...@@ -176,7 +170,7 @@ void TestLACPrediction(const std::string &model_path,
for (int i = 0; i < repeat; i++) { for (int i = 0; i < repeat; i++) {
predictor->Run(input_slots, &outputs_slots); predictor->Run(input_slots, &outputs_slots);
} }
PrintTime(timer.toc(), batch_size, repeat); PrintTime(batch_size, repeat, 1, 0, timer.toc() / repeat);
EXPECT_EQ(outputs_slots.size(), 1UL); EXPECT_EQ(outputs_slots.size(), 1UL);
auto &out = outputs_slots[0]; auto &out = outputs_slots[0];
size_t size = std::accumulate(out.shape.begin(), out.shape.end(), 1, size_t size = std::accumulate(out.shape.begin(), out.shape.end(), 1,
......
...@@ -130,11 +130,7 @@ void TestChineseNERPrediction() { ...@@ -130,11 +130,7 @@ void TestChineseNERPrediction() {
for (int i = 0; i < FLAGS_repeat; i++) { for (int i = 0; i < FLAGS_repeat; i++) {
predictor->Run(input_slots, &outputs); predictor->Run(input_slots, &outputs);
} }
LOG(INFO) << "===========profile result==========="; PrintTime(FLAGS_batch_size, FLAGS_repeat, 1, 0, timer.toc() / FLAGS_repeat);
LOG(INFO) << "batch_size: " << FLAGS_batch_size
<< ", repeat: " << FLAGS_repeat
<< ", latency: " << timer.toc() / FLAGS_repeat << "ms";
LOG(INFO) << "=====================================";
PADDLE_ENFORCE(outputs.size(), 1UL); PADDLE_ENFORCE(outputs.size(), 1UL);
auto &out = outputs[0]; auto &out = outputs[0];
......
...@@ -18,8 +18,8 @@ ...@@ -18,8 +18,8 @@
#include "paddle/fluid/framework/ir/pass.h" #include "paddle/fluid/framework/ir/pass.h"
#include "paddle/fluid/inference/analysis/analyzer.h" #include "paddle/fluid/inference/analysis/analyzer.h"
#include "paddle/fluid/inference/analysis/ut_helper.h" #include "paddle/fluid/inference/analysis/ut_helper.h"
#include "paddle/fluid/inference/api/helper.h"
#include "paddle/fluid/inference/api/paddle_inference_api.h" #include "paddle/fluid/inference/api/paddle_inference_api.h"
#include "paddle/fluid/inference/api/timer.h"
DEFINE_string(infer_model, "", "Directory of the inference model."); DEFINE_string(infer_model, "", "Directory of the inference model.");
DEFINE_string(infer_data, "", "Path of the dataset."); DEFINE_string(infer_data, "", "Path of the dataset.");
...@@ -27,22 +27,7 @@ DEFINE_int32(batch_size, 1, "batch size."); ...@@ -27,22 +27,7 @@ DEFINE_int32(batch_size, 1, "batch size.");
DEFINE_int32(repeat, 1, "How many times to repeat run."); DEFINE_int32(repeat, 1, "How many times to repeat run.");
namespace paddle { namespace paddle {
namespace inference {
template <typename T>
std::string to_string(const std::vector<T> &vec) {
std::stringstream ss;
for (const auto &c : vec) {
ss << c << " ";
}
return ss.str();
}
void PrintTime(const double latency, const int bs, const int repeat) {
LOG(INFO) << "===========profile result===========";
LOG(INFO) << "batch_size: " << bs << ", repeat: " << repeat
<< ", avg latency: " << latency / repeat << "ms";
LOG(INFO) << "=====================================";
}
void Main(int batch_size) { void Main(int batch_size) {
// Three sequence inputs. // Three sequence inputs.
...@@ -78,7 +63,7 @@ void Main(int batch_size) { ...@@ -78,7 +63,7 @@ void Main(int batch_size) {
CHECK(predictor->Run(input_slots, &output_slots)); CHECK(predictor->Run(input_slots, &output_slots));
sum += timer.toc(); sum += timer.toc();
} }
PrintTime(sum, batch_size, FLAGS_repeat); PrintTime(batch_size, FLAGS_repeat, 1, 0, sum / FLAGS_repeat);
// Get output // Get output
LOG(INFO) << "get outputs " << output_slots.size(); LOG(INFO) << "get outputs " << output_slots.size();
...@@ -99,6 +84,7 @@ void Main(int batch_size) { ...@@ -99,6 +84,7 @@ void Main(int batch_size) {
TEST(text_classification, basic) { Main(FLAGS_batch_size); } TEST(text_classification, basic) { Main(FLAGS_batch_size); }
} // namespace inference
} // namespace paddle } // namespace paddle
USE_PASS(fc_fuse_pass); USE_PASS(fc_fuse_pass);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册