未验证 提交 4048a268 编写于 作者: T Tao Luo 提交者: GitHub

Merge pull request #16687 from luotao1/reduce_inference_ci_time

reduce all analyzer_test ci elasped time
...@@ -55,6 +55,9 @@ DEFINE_bool(record_benchmark, false, ...@@ -55,6 +55,9 @@ DEFINE_bool(record_benchmark, false,
DEFINE_double(accuracy, 1e-3, "Result Accuracy."); DEFINE_double(accuracy, 1e-3, "Result Accuracy.");
DEFINE_double(quantized_accuracy, 1e-2, "Result Quantized Accuracy."); DEFINE_double(quantized_accuracy, 1e-2, "Result Quantized Accuracy.");
DEFINE_bool(zero_copy, false, "Use ZeroCopy to speedup Feed/Fetch."); DEFINE_bool(zero_copy, false, "Use ZeroCopy to speedup Feed/Fetch.");
DEFINE_bool(warmup, false,
"Use warmup to calculate elapsed_time more accurately. "
"To reduce CI time, it sets false in default.");
DECLARE_bool(profile); DECLARE_bool(profile);
DECLARE_int32(paddle_num_threads); DECLARE_int32(paddle_num_threads);
...@@ -367,7 +370,9 @@ void TestOneThreadPrediction( ...@@ -367,7 +370,9 @@ void TestOneThreadPrediction(
const std::vector<std::vector<PaddleTensor>> &inputs, const std::vector<std::vector<PaddleTensor>> &inputs,
std::vector<std::vector<PaddleTensor>> *outputs, bool use_analysis = true) { std::vector<std::vector<PaddleTensor>> *outputs, bool use_analysis = true) {
auto predictor = CreateTestPredictor(config, use_analysis); auto predictor = CreateTestPredictor(config, use_analysis);
PredictionWarmUp(predictor.get(), inputs, outputs, 1, 0); if (FLAGS_warmup) {
PredictionWarmUp(predictor.get(), inputs, outputs, 1, 0);
}
PredictionRun(predictor.get(), inputs, outputs, 1, 0); PredictionRun(predictor.get(), inputs, outputs, 1, 0);
} }
...@@ -395,7 +400,10 @@ void TestMultiThreadPrediction( ...@@ -395,7 +400,10 @@ void TestMultiThreadPrediction(
->SetMkldnnThreadID(static_cast<int>(tid) + 1); ->SetMkldnnThreadID(static_cast<int>(tid) + 1);
} }
#endif #endif
PredictionWarmUp(predictor.get(), inputs, &outputs_tid, num_threads, tid); if (FLAGS_warmup) {
PredictionWarmUp(predictor.get(), inputs, &outputs_tid, num_threads,
tid);
}
PredictionRun(predictor.get(), inputs, &outputs_tid, num_threads, tid); PredictionRun(predictor.get(), inputs, &outputs_tid, num_threads, tid);
}); });
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册