提交 eb9b9bec 编写于 作者: T Tao Luo

add warm up in TestMultiThreadPrediction

test=develop
上级 f17b05d4
...@@ -222,19 +222,36 @@ void TestMultiThreadPrediction( ...@@ -222,19 +222,36 @@ void TestMultiThreadPrediction(
// The inputs of each thread are all the same. // The inputs of each thread are all the same.
std::vector<PaddleTensor> outputs_tid; std::vector<PaddleTensor> outputs_tid;
auto &predictor = predictors[tid]; auto &predictor = predictors[tid];
LOG(INFO) << "running thread " << tid;
Timer timer; // warmup run
timer.tic(); LOG(INFO) << "Running thread " << tid << ", warm up run...";
for (int i = 0; i < num_times; i++) { {
for (const auto &input : inputs) { Timer warmup_timer;
ASSERT_TRUE(predictor->Run(input, &outputs_tid)); warmup_timer.tic();
predictor->Run(inputs[0], outputs, batch_size);
PrintTime(batch_size, 1, num_threads, tid, warmup_timer.toc(), 1);
#if !defined(_WIN32)
if (FLAGS_profile) {
paddle::platform::ResetProfiler();
} }
#endif
} }
auto time = timer.toc(); LOG(INFO) << "Thread " << tid << " run " << num_times << " times...";
total_time += time; {
PrintTime(batch_size, num_times, num_threads, tid, time / num_times, Timer timer;
inputs.size()); timer.tic();
for (int i = 0; i < num_times; i++) {
for (const auto &input : inputs) {
ASSERT_TRUE(predictor->Run(input, &outputs_tid));
}
}
auto time = timer.toc();
total_time += time;
PrintTime(batch_size, num_times, num_threads, tid, time / num_times,
inputs.size());
}
}); });
} }
for (int i = 0; i < num_threads; ++i) { for (int i = 0; i < num_threads; ++i) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册