From 90bc14da24351b63f02438c9e82ee99d7d3f0ce5 Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Fri, 21 Sep 2018 20:08:19 +0800 Subject: [PATCH] simple fix on inference tester helper (#13507) --- paddle/fluid/inference/api/api_impl.h | 3 +-- .../fluid/inference/tests/api/tester_helper.h | 24 ++++++++++--------- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/paddle/fluid/inference/api/api_impl.h b/paddle/fluid/inference/api/api_impl.h index ec801c5885..6ecc32a700 100644 --- a/paddle/fluid/inference/api/api_impl.h +++ b/paddle/fluid/inference/api/api_impl.h @@ -20,10 +20,9 @@ #include #include -#include "paddle/fluid/inference/api/paddle_inference_api.h" - #include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/inference/api/paddle_inference_api.h" #include "paddle/fluid/inference/io.h" #include "paddle/fluid/platform/init.h" #include "paddle/fluid/platform/profiler.h" diff --git a/paddle/fluid/inference/tests/api/tester_helper.h b/paddle/fluid/inference/tests/api/tester_helper.h index 384a40a3f9..05cd343433 100644 --- a/paddle/fluid/inference/tests/api/tester_helper.h +++ b/paddle/fluid/inference/tests/api/tester_helper.h @@ -74,8 +74,8 @@ void CompareResult(const std::vector &outputs, } } -std::unique_ptr GetPrediction(AnalysisConfig config, - bool use_analysis = true) { +std::unique_ptr CreateTestPredictor( + const AnalysisConfig &config, bool use_analysis = true) { if (use_analysis) { return CreatePaddlePredictor( config); @@ -92,7 +92,7 @@ size_t GetSize(const PaddleTensor &out) { std::unordered_map GetFuseStatis(AnalysisConfig config, int *num_ops) { - auto predictor = GetPrediction(config); + auto predictor = CreateTestPredictor(config); AnalysisPredictor *analysis_predictor = dynamic_cast(predictor.get()); auto &fuse_statis = analysis_predictor->analysis_argument() @@ -113,11 +113,12 @@ std::unordered_map GetFuseStatis(AnalysisConfig config, } void TestOneThreadPrediction( - AnalysisConfig config, const std::vector> inputs, + const AnalysisConfig &config, + const std::vector> &inputs, std::vector *outputs, bool use_analysis = true) { int batch_size = FLAGS_batch_size; int num_times = FLAGS_repeat; - auto predictor = GetPrediction(config, use_analysis); + auto predictor = CreateTestPredictor(config, use_analysis); Timer timer; timer.tic(); for (int i = 0; i < num_times; i++) { @@ -130,7 +131,8 @@ void TestOneThreadPrediction( } void TestMultiThreadPrediction( - AnalysisConfig config, const std::vector> inputs, + const AnalysisConfig &config, + const std::vector> &inputs, std::vector *outputs, int num_threads, bool use_analysis = true) { int batch_size = FLAGS_batch_size; @@ -140,7 +142,7 @@ void TestMultiThreadPrediction( // TODO(yanchunwei): Bug here, the analyzer phase can't be parallelled // because AttentionLSTM's hard code nodeid will be damanged. for (int tid = 0; tid < num_threads; ++tid) { - predictors.emplace_back(GetPrediction(config, use_analysis)); + predictors.emplace_back(CreateTestPredictor(config, use_analysis)); } for (int tid = 0; tid < num_threads; ++tid) { threads.emplace_back([&, tid]() { @@ -164,8 +166,8 @@ void TestMultiThreadPrediction( } } -void TestPrediction(AnalysisConfig config, - const std::vector> inputs, +void TestPrediction(const AnalysisConfig &config, + const std::vector> &inputs, std::vector *outputs, int num_threads, bool use_analysis = FLAGS_use_analysis) { LOG(INFO) << "use_analysis: " << use_analysis; @@ -178,8 +180,8 @@ void TestPrediction(AnalysisConfig config, } void CompareNativeAndAnalysis( - AnalysisConfig config, - const std::vector> inputs) { + const AnalysisConfig &config, + const std::vector> &inputs) { std::vector native_outputs, analysis_outputs; TestOneThreadPrediction(config, inputs, &native_outputs, false); TestOneThreadPrediction(config, inputs, &analysis_outputs, true); -- GitLab