diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 5d9d5a3178aaa39f4b80197fb5ac7cd46504bf4f..8b775b0faa6b2ba58ed2a9f330d9619535a49a3d 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -185,14 +185,6 @@ bool AnalysisPredictor::PrepareExecutor() { return true; } -void AnalysisPredictor::SetMkldnnThreadID(int tid) { -#ifdef PADDLE_WITH_MKLDNN - platform::set_cur_thread_id(tid); -#else - LOG(ERROR) << "Please compile with MKLDNN first to use MKLDNN"; -#endif -} - bool AnalysisPredictor::Run(const std::vector &inputs, std::vector *output_data, int batch_size) { diff --git a/paddle/fluid/inference/api/analysis_predictor.h b/paddle/fluid/inference/api/analysis_predictor.h index b5e134ced70f8bf9ef0267bee08ec9836aeb5338..551ca5ba0cd6f555c5b01a212a941908d8ce898a 100644 --- a/paddle/fluid/inference/api/analysis_predictor.h +++ b/paddle/fluid/inference/api/analysis_predictor.h @@ -80,8 +80,6 @@ class AnalysisPredictor : public PaddlePredictor { framework::Scope *scope() { return scope_.get(); } framework::ProgramDesc &program() { return *inference_program_; } - void SetMkldnnThreadID(int tid); - std::string GetSerializedProgram() const override; bool MkldnnQuantize(); diff --git a/paddle/fluid/inference/tests/api/analyzer_int8_object_detection_tester.cc b/paddle/fluid/inference/tests/api/analyzer_int8_object_detection_tester.cc index 19c38270b70dff92085f2f19b98f71d5cb804d4a..ccb50d404379d0b3aea64d04371ceae0fcf6daef 100644 --- a/paddle/fluid/inference/tests/api/analyzer_int8_object_detection_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_int8_object_detection_tester.cc @@ -144,7 +144,7 @@ std::shared_ptr> GetWarmupData( int test_data_batch_size = test_data[0][0].shape[0]; auto iterations = test_data.size(); PADDLE_ENFORCE( - static_cast(num_images) <= iterations * test_data_batch_size, + static_cast(num_images) <= iterations * test_data_batch_size, "The requested quantization warmup data size " + std::to_string(num_images) + " is bigger than all test data size."); diff --git a/paddle/fluid/inference/tests/api/tester_helper.h b/paddle/fluid/inference/tests/api/tester_helper.h index 05936458cefbadecb0ab15afdc3b48d0fd4d64ce..61cf10c31788be87d14c93a344168088390e9275 100644 --- a/paddle/fluid/inference/tests/api/tester_helper.h +++ b/paddle/fluid/inference/tests/api/tester_helper.h @@ -418,12 +418,6 @@ void TestMultiThreadPrediction( // The inputs of each thread are all the same. std::vector> outputs_tid; auto &predictor = predictors[tid]; -#ifdef PADDLE_WITH_MKLDNN - if (use_analysis) { - static_cast(predictor.get()) - ->SetMkldnnThreadID(static_cast(tid) + 1); - } -#endif if (FLAGS_warmup) { PredictionWarmUp(predictor.get(), inputs, &outputs_tid, num_threads, tid);