diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 6d60e40707f67bcb94a8ddcd86171825511cc471..adc88409b6a6cfdfc43a9bbe2af3f531a4c6fe75 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -234,6 +234,11 @@ bool AnalysisPredictor::Run(const std::vector &inputs, tensor_array_batch_cleaner_.CollectNoTensorVars(sub_scope_); } tensor_array_batch_cleaner_.ResetNoTensorVars(); + + // recover the cpu_math_library_num_threads to 1, in order to avoid thread + // conflict when integrating it into deployment service. + paddle::platform::SetNumThreads(1); + return true; } @@ -586,6 +591,11 @@ bool AnalysisPredictor::ZeroCopyRun() { // Fix TensorArray reuse not cleaned bug. tensor_array_batch_cleaner_.CollectTensorArrays(sub_scope_); tensor_array_batch_cleaner_.ResetTensorArray(); + + // recover the cpu_math_library_num_threads to 1, in order to avoid thread + // conflict when integrating it into deployment service. + paddle::platform::SetNumThreads(1); + return true; }