diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 585634fae9c85f77cc77d774ac166891014a025c..3917b9b65b5905be38ba8a236aa158f42586c825 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -561,6 +561,7 @@ AnalysisPredictor::~AnalysisPredictor() { } std::unique_ptr AnalysisPredictor::Clone() { + std::lock_guard lk(clone_mutex_); auto *x = new AnalysisPredictor(config_); x->Init(scope_, inference_program_); return std::unique_ptr(x); diff --git a/paddle/fluid/inference/api/analysis_predictor.h b/paddle/fluid/inference/api/analysis_predictor.h index a6e126c5d533f4299ccc3deed7d116cabc71f75b..6ca4b5e9bed7505fca3b833dfbb7026ff550d258 100644 --- a/paddle/fluid/inference/api/analysis_predictor.h +++ b/paddle/fluid/inference/api/analysis_predictor.h @@ -115,6 +115,8 @@ class AnalysisPredictor : public PaddlePredictor { // concurrency problems, wrong results and memory leak, so cache them. std::vector feed_tensors_; details::TensorArrayBatchCleaner tensor_array_batch_cleaner_; + // A mutex help to make Clone thread safe. + std::mutex clone_mutex_; private: // Some status here that help to determine the status inside the predictor. diff --git a/paddle/fluid/inference/api/analysis_predictor_tester.cc b/paddle/fluid/inference/api/analysis_predictor_tester.cc index 6169e60541e4a14d560e719d56624b3219dbcefd..3df26cde3d5defac97074c9bc4086e81f9ec0c93 100644 --- a/paddle/fluid/inference/api/analysis_predictor_tester.cc +++ b/paddle/fluid/inference/api/analysis_predictor_tester.cc @@ -179,8 +179,9 @@ TEST(AnalysisPredictor, Clone) { threads.emplace_back([&predictors, &inputs, i] { LOG(INFO) << "thread #" << i << " running"; std::vector outputs; + auto predictor = predictors.front()->Clone(); for (int j = 0; j < 10; j++) { - ASSERT_TRUE(predictors[i]->Run(inputs, &outputs)); + ASSERT_TRUE(predictor->Run(inputs, &outputs)); } }); } diff --git a/paddle/fluid/inference/api/api_impl.cc b/paddle/fluid/inference/api/api_impl.cc index e5189e958bdf6afe1f94dc9e2d721e3d2c8b904c..e18bc02d92eb517fa20dc83811694b8ac80ae316 100644 --- a/paddle/fluid/inference/api/api_impl.cc +++ b/paddle/fluid/inference/api/api_impl.cc @@ -161,6 +161,8 @@ bool NativePaddlePredictor::Run(const std::vector &inputs, } std::unique_ptr NativePaddlePredictor::Clone() { + std::lock_guard lk(clone_mutex_); + VLOG(3) << "Predictor::clone"; std::unique_ptr cls(new NativePaddlePredictor(config_)); // Hot fix the bug that result diff in multi-thread. // TODO(Superjomn) re-implement a real clone here. diff --git a/paddle/fluid/inference/api/api_impl.h b/paddle/fluid/inference/api/api_impl.h index d2133bd467376c723a80a98725ac7c70234c54b0..96b94777304382a9d4be115a84f80ead69249863 100644 --- a/paddle/fluid/inference/api/api_impl.h +++ b/paddle/fluid/inference/api/api_impl.h @@ -74,6 +74,8 @@ class NativePaddlePredictor : public PaddlePredictor { // Do not use unique_ptr, use parent scope to delete framework::Scope *sub_scope_{nullptr}; details::TensorArrayBatchCleaner tensor_array_batch_cleaner_; + // A mutex to make Clone thread safe. + std::mutex clone_mutex_; }; } // namespace paddle