From e66b4c6bff74231898cbbb013627b0eb86eced0f Mon Sep 17 00:00:00 2001 From: luotao1 Date: Thu, 22 Nov 2018 18:49:59 +0800 Subject: [PATCH] adjust tester_helper to make multi-instance multi-thread work test=develop --- paddle/fluid/inference/tests/api/tester_helper.h | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/paddle/fluid/inference/tests/api/tester_helper.h b/paddle/fluid/inference/tests/api/tester_helper.h index 72703bc80b..d21567ac19 100644 --- a/paddle/fluid/inference/tests/api/tester_helper.h +++ b/paddle/fluid/inference/tests/api/tester_helper.h @@ -207,11 +207,7 @@ void TestMultiThreadPrediction( int batch_size = FLAGS_batch_size; int num_times = FLAGS_repeat; std::vector threads; - std::vector> predictors; - predictors.emplace_back(CreateTestPredictor(config, use_analysis)); - for (int tid = 1; tid < num_threads; ++tid) { - predictors.emplace_back(predictors.front()->Clone()); - } + auto main_predictor = CreateTestPredictor(config, use_analysis); size_t total_time{0}; for (int tid = 0; tid < num_threads; ++tid) { @@ -219,7 +215,9 @@ void TestMultiThreadPrediction( // Each thread should have local inputs and outputs. // The inputs of each thread are all the same. std::vector outputs_tid; - auto &predictor = predictors[tid]; + // To ensure the thread binding correctly, + // please clone inside the threadpool. + auto predictor = main_predictor->Clone(); #ifdef PADDLE_WITH_MKLDNN if (use_analysis) { static_cast(predictor.get()) -- GitLab