diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index fcab1ab186127e40701da9420426b4ef27c7f95d..a84c909b3b7287ddc56dce8df6db3c91c338ecfa 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -192,9 +192,7 @@ void AnalysisPredictor::SetMkldnnThreadID(int tid) { bool AnalysisPredictor::Run(const std::vector &inputs, std::vector *output_data, int batch_size) { - if (UNLIKELY(config_.cpu_math_library_num_threads() > 1)) { - paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads()); - } + paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads()); VLOG(3) << "Predictor::predict"; inference::Timer timer; timer.tic(); @@ -569,6 +567,7 @@ std::unique_ptr AnalysisPredictor::GetOutputTensor( } bool AnalysisPredictor::ZeroCopyRun() { + paddle::platform::SetNumThreads(config_.cpu_math_library_num_threads()); executor_->Run(); // Fix TensorArray reuse not cleaned bug. tensor_array_batch_cleaner_.CollectTensorArrays(sub_scope_); diff --git a/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc b/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc index 9443b08063b8f61d3d6b291a7217d645d8825c54..cc31ab9588da01679b45c2bd4215f5eebd8447d1 100644 --- a/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_pyramid_dnn_tester.cc @@ -107,6 +107,7 @@ void SetConfig(AnalysisConfig *cfg) { cfg->DisableGpu(); cfg->SwitchSpecifyInputNames(); cfg->SwitchIrOptim(); + cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads); if (FLAGS_zero_copy) { cfg->SwitchUseFeedFetchOps(false); }