From 79bda2c46d265f6d064d7e176535d1fdcbb055c6 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Mon, 4 Jun 2018 22:11:08 +0800 Subject: [PATCH] enable infer api with multi-threads --- .../contrib/inference/paddle_inference_api.h | 3 +- .../inference/paddle_inference_api_impl.cc | 41 +++++++++++-------- .../inference/paddle_inference_api_impl.h | 9 ++-- 3 files changed, 33 insertions(+), 20 deletions(-) diff --git a/paddle/contrib/inference/paddle_inference_api.h b/paddle/contrib/inference/paddle_inference_api.h index b5cd0d603f..c4588cf040 100644 --- a/paddle/contrib/inference/paddle_inference_api.h +++ b/paddle/contrib/inference/paddle_inference_api.h @@ -63,6 +63,7 @@ class PaddlePredictor { struct Config; PaddlePredictor() = default; PaddlePredictor(const PaddlePredictor&) = delete; + PaddlePredictor& operator=(const PaddlePredictor&) = delete; // Predict an record. // The caller should be responsible for allocating and releasing the memory of @@ -76,7 +77,7 @@ class PaddlePredictor { virtual std::unique_ptr Clone() = 0; // Destroy the Predictor. - virtual ~PaddlePredictor() {} + virtual ~PaddlePredictor() = default; // The common configs for all the predictors. struct Config { diff --git a/paddle/contrib/inference/paddle_inference_api_impl.cc b/paddle/contrib/inference/paddle_inference_api_impl.cc index b52a43a463..2d710727e2 100644 --- a/paddle/contrib/inference/paddle_inference_api_impl.cc +++ b/paddle/contrib/inference/paddle_inference_api_impl.cc @@ -54,7 +54,7 @@ std::string num2str(T a) { } } // namespace -bool NativePaddlePredictor::Init() { +bool NativePaddlePredictor::Init(std::shared_ptr scope) { VLOG(3) << "Predictor::init()"; if (config_.use_gpu) { @@ -62,9 +62,15 @@ bool NativePaddlePredictor::Init() { } else { place_ = paddle::platform::CPUPlace(); } - paddle::framework::InitDevices(false); + if (scope) { + scope_ = scope; + sub_scope_ = &(scope->NewScope()); + } else { + paddle::framework::InitDevices(false); + scope_.reset(new paddle::framework::Scope()); + } + executor_.reset(new paddle::framework::Executor(place_)); - scope_.reset(new paddle::framework::Scope()); // Initialize the inference program if (!config_.model_dir.empty()) { @@ -83,13 +89,8 @@ bool NativePaddlePredictor::Init() { return false; } ctx_ = executor_->Prepare(*inference_program_, 0); - - // Create temporary variables first, so that the first batch do not need to - // create variables in the runtime. This is the logics of the old inference - // API. - // TODO(Superjomn) this should be modified when `Clone` is valid for - // multi-thread application. - executor_->CreateVariables(*inference_program_, scope_.get(), 0); + executor_->CreateVariables( + *inference_program_, sub_scope_ ? sub_scope_ : scope_.get(), 0); // Get the feed_target_names and fetch_target_names feed_target_names_ = inference_program_->GetFeedTargetNames(); @@ -97,6 +98,13 @@ bool NativePaddlePredictor::Init() { return true; } +NativePaddlePredictor::~NativePaddlePredictor() { + if (sub_scope_) { + PADDLE_ENFORCE_NOT_NULL(scope_, "Should have parent scope!"); + scope_->DeleteScope(sub_scope_); + } +}; + bool NativePaddlePredictor::Run(const std::vector &inputs, std::vector *output_data) { VLOG(3) << "Predictor::predict"; @@ -121,11 +129,12 @@ bool NativePaddlePredictor::Run(const std::vector &inputs, } // Run the inference program // if share variables, we need not create variables - executor_->RunPreparedContext(ctx_.get(), - scope_.get(), - &feed_targets, - &fetch_targets, - false /* don't create variable eatch time */); + executor_->RunPreparedContext( + ctx_.get(), + sub_scope_ != nullptr ? sub_scope_ : scope_.get(), + &feed_targets, + &fetch_targets, + false /* don't create variable eatch time */); if (!GetFetch(fetchs, output_data)) { LOG(ERROR) << "fail to get fetchs"; return false; @@ -138,7 +147,7 @@ std::unique_ptr NativePaddlePredictor::Clone() { VLOG(3) << "Predictor::clone"; std::unique_ptr cls(new NativePaddlePredictor(config_)); - if (!dynamic_cast(cls.get())->Init()) { + if (!dynamic_cast(cls.get())->Init(scope_)) { LOG(ERROR) << "fail to call Init"; return nullptr; } diff --git a/paddle/contrib/inference/paddle_inference_api_impl.h b/paddle/contrib/inference/paddle_inference_api_impl.h index 84707e223d..dfb34a8631 100644 --- a/paddle/contrib/inference/paddle_inference_api_impl.h +++ b/paddle/contrib/inference/paddle_inference_api_impl.h @@ -34,14 +34,15 @@ class NativePaddlePredictor : public PaddlePredictor { explicit NativePaddlePredictor(const NativeConfig &config) : config_(config) {} - bool Init(); + // will only create sub scope if have global scope + bool Init(std::shared_ptr scope = nullptr); bool Run(const std::vector &inputs, std::vector *output_data) override; std::unique_ptr Clone() override; - ~NativePaddlePredictor() override{}; + ~NativePaddlePredictor() override; private: bool SetFeed(const std::vector &input_datas, @@ -52,11 +53,13 @@ class NativePaddlePredictor : public PaddlePredictor { NativeConfig config_; platform::Place place_; std::unique_ptr executor_; - std::unique_ptr scope_; + std::shared_ptr scope_; std::unique_ptr ctx_; std::unique_ptr inference_program_; std::vector feed_target_names_; std::vector fetch_target_names_; + // Do not use unique_ptr, use parent scope to delete + framework::Scope *sub_scope_{nullptr}; }; } // namespace paddle -- GitLab