From e84234b55116fa93d9f776bb752613c6055f2df3 Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Thu, 17 Jan 2019 18:45:14 +0800 Subject: [PATCH] make clone thread safe (#15363) --- paddle/fluid/inference/api/analysis_predictor.cc | 1 + paddle/fluid/inference/api/analysis_predictor.h | 2 ++ paddle/fluid/inference/api/analysis_predictor_tester.cc | 3 ++- paddle/fluid/inference/api/api_impl.cc | 2 ++ paddle/fluid/inference/api/api_impl.h | 2 ++ 5 files changed, 9 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 585634fae9c..3917b9b65b5 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -561,6 +561,7 @@ AnalysisPredictor::~AnalysisPredictor() { } std::unique_ptr AnalysisPredictor::Clone() { + std::lock_guard lk(clone_mutex_); auto *x = new AnalysisPredictor(config_); x->Init(scope_, inference_program_); return std::unique_ptr(x); diff --git a/paddle/fluid/inference/api/analysis_predictor.h b/paddle/fluid/inference/api/analysis_predictor.h index a6e126c5d53..6ca4b5e9bed 100644 --- a/paddle/fluid/inference/api/analysis_predictor.h +++ b/paddle/fluid/inference/api/analysis_predictor.h @@ -115,6 +115,8 @@ class AnalysisPredictor : public PaddlePredictor { // concurrency problems, wrong results and memory leak, so cache them. std::vector feed_tensors_; details::TensorArrayBatchCleaner tensor_array_batch_cleaner_; + // A mutex help to make Clone thread safe. + std::mutex clone_mutex_; private: // Some status here that help to determine the status inside the predictor. diff --git a/paddle/fluid/inference/api/analysis_predictor_tester.cc b/paddle/fluid/inference/api/analysis_predictor_tester.cc index 6169e60541e..3df26cde3d5 100644 --- a/paddle/fluid/inference/api/analysis_predictor_tester.cc +++ b/paddle/fluid/inference/api/analysis_predictor_tester.cc @@ -179,8 +179,9 @@ TEST(AnalysisPredictor, Clone) { threads.emplace_back([&predictors, &inputs, i] { LOG(INFO) << "thread #" << i << " running"; std::vector outputs; + auto predictor = predictors.front()->Clone(); for (int j = 0; j < 10; j++) { - ASSERT_TRUE(predictors[i]->Run(inputs, &outputs)); + ASSERT_TRUE(predictor->Run(inputs, &outputs)); } }); } diff --git a/paddle/fluid/inference/api/api_impl.cc b/paddle/fluid/inference/api/api_impl.cc index e5189e958bd..e18bc02d92e 100644 --- a/paddle/fluid/inference/api/api_impl.cc +++ b/paddle/fluid/inference/api/api_impl.cc @@ -161,6 +161,8 @@ bool NativePaddlePredictor::Run(const std::vector &inputs, } std::unique_ptr NativePaddlePredictor::Clone() { + std::lock_guard lk(clone_mutex_); + VLOG(3) << "Predictor::clone"; std::unique_ptr cls(new NativePaddlePredictor(config_)); // Hot fix the bug that result diff in multi-thread. // TODO(Superjomn) re-implement a real clone here. diff --git a/paddle/fluid/inference/api/api_impl.h b/paddle/fluid/inference/api/api_impl.h index d2133bd4673..96b94777304 100644 --- a/paddle/fluid/inference/api/api_impl.h +++ b/paddle/fluid/inference/api/api_impl.h @@ -74,6 +74,8 @@ class NativePaddlePredictor : public PaddlePredictor { // Do not use unique_ptr, use parent scope to delete framework::Scope *sub_scope_{nullptr}; details::TensorArrayBatchCleaner tensor_array_batch_cleaner_; + // A mutex to make Clone thread safe. + std::mutex clone_mutex_; }; } // namespace paddle -- GitLab