From e089e454a12bb59d87e5eac05fc9f4ccdd39f952 Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Mon, 3 Jun 2019 18:34:50 +0800 Subject: [PATCH] make omp thread num default 1 after inference run (#17801) test=develop --- paddle/fluid/inference/api/analysis_predictor.cc | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 6d60e4070..adc88409b 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -234,6 +234,11 @@ bool AnalysisPredictor::Run(const std::vector &inputs, tensor_array_batch_cleaner_.CollectNoTensorVars(sub_scope_); } tensor_array_batch_cleaner_.ResetNoTensorVars(); + + // recover the cpu_math_library_num_threads to 1, in order to avoid thread + // conflict when integrating it into deployment service. + paddle::platform::SetNumThreads(1); + return true; } @@ -586,6 +591,11 @@ bool AnalysisPredictor::ZeroCopyRun() { // Fix TensorArray reuse not cleaned bug. tensor_array_batch_cleaner_.CollectTensorArrays(sub_scope_); tensor_array_batch_cleaner_.ResetTensorArray(); + + // recover the cpu_math_library_num_threads to 1, in order to avoid thread + // conflict when integrating it into deployment service. + paddle::platform::SetNumThreads(1); + return true; } -- GitLab