From 3123d18787941b9dc08090ad6e82e9b285dea8a1 Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Tue, 2 Jul 2019 16:13:11 +0800 Subject: [PATCH] remove unused AnalysisPredictor::SetMkldnnThreadID() (#18444) test=develop --- paddle/fluid/inference/api/analysis_predictor.cc | 8 -------- paddle/fluid/inference/api/analysis_predictor.h | 2 -- .../tests/api/analyzer_int8_object_detection_tester.cc | 2 +- paddle/fluid/inference/tests/api/tester_helper.h | 6 ------ 4 files changed, 1 insertion(+), 17 deletions(-) diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 5d9d5a3178..8b775b0faa 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -185,14 +185,6 @@ bool AnalysisPredictor::PrepareExecutor() { return true; } -void AnalysisPredictor::SetMkldnnThreadID(int tid) { -#ifdef PADDLE_WITH_MKLDNN - platform::set_cur_thread_id(tid); -#else - LOG(ERROR) << "Please compile with MKLDNN first to use MKLDNN"; -#endif -} - bool AnalysisPredictor::Run(const std::vector &inputs, std::vector *output_data, int batch_size) { diff --git a/paddle/fluid/inference/api/analysis_predictor.h b/paddle/fluid/inference/api/analysis_predictor.h index b5e134ced7..551ca5ba0c 100644 --- a/paddle/fluid/inference/api/analysis_predictor.h +++ b/paddle/fluid/inference/api/analysis_predictor.h @@ -80,8 +80,6 @@ class AnalysisPredictor : public PaddlePredictor { framework::Scope *scope() { return scope_.get(); } framework::ProgramDesc &program() { return *inference_program_; } - void SetMkldnnThreadID(int tid); - std::string GetSerializedProgram() const override; bool MkldnnQuantize(); diff --git a/paddle/fluid/inference/tests/api/analyzer_int8_object_detection_tester.cc b/paddle/fluid/inference/tests/api/analyzer_int8_object_detection_tester.cc index 19c38270b7..ccb50d4043 100644 --- a/paddle/fluid/inference/tests/api/analyzer_int8_object_detection_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_int8_object_detection_tester.cc @@ -144,7 +144,7 @@ std::shared_ptr> GetWarmupData( int test_data_batch_size = test_data[0][0].shape[0]; auto iterations = test_data.size(); PADDLE_ENFORCE( - static_cast(num_images) <= iterations * test_data_batch_size, + static_cast(num_images) <= iterations * test_data_batch_size, "The requested quantization warmup data size " + std::to_string(num_images) + " is bigger than all test data size."); diff --git a/paddle/fluid/inference/tests/api/tester_helper.h b/paddle/fluid/inference/tests/api/tester_helper.h index 05936458ce..61cf10c317 100644 --- a/paddle/fluid/inference/tests/api/tester_helper.h +++ b/paddle/fluid/inference/tests/api/tester_helper.h @@ -418,12 +418,6 @@ void TestMultiThreadPrediction( // The inputs of each thread are all the same. std::vector> outputs_tid; auto &predictor = predictors[tid]; -#ifdef PADDLE_WITH_MKLDNN - if (use_analysis) { - static_cast(predictor.get()) - ->SetMkldnnThreadID(static_cast(tid) + 1); - } -#endif if (FLAGS_warmup) { PredictionWarmUp(predictor.get(), inputs, &outputs_tid, num_threads, tid); -- GitLab