未验证 提交 e089e454 编写于 作者: T Tao Luo 提交者: GitHub

make omp thread num default 1 after inference run (#17801)

test=develop
上级 6a6bf597
......@@ -234,6 +234,11 @@ bool AnalysisPredictor::Run(const std::vector<PaddleTensor> &inputs,
tensor_array_batch_cleaner_.CollectNoTensorVars(sub_scope_);
}
tensor_array_batch_cleaner_.ResetNoTensorVars();
// recover the cpu_math_library_num_threads to 1, in order to avoid thread
// conflict when integrating it into deployment service.
paddle::platform::SetNumThreads(1);
return true;
}
......@@ -586,6 +591,11 @@ bool AnalysisPredictor::ZeroCopyRun() {
// Fix TensorArray reuse not cleaned bug.
tensor_array_batch_cleaner_.CollectTensorArrays(sub_scope_);
tensor_array_batch_cleaner_.ResetTensorArray();
// recover the cpu_math_library_num_threads to 1, in order to avoid thread
// conflict when integrating it into deployment service.
paddle::platform::SetNumThreads(1);
return true;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册