diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index ebe56734c6d6a9137e877f8a5ea188f59a1b4f98..d998533977566e9d9c8e67921e9f14feea29e95b 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -284,6 +284,7 @@ bool AnalysisPredictor::GetFetch(std::vector *outputs, framework::GetFetchVariable(*scope, "fetch", idx); auto type = fetch.type(); auto output = &(outputs->at(i)); + output->name = fetchs_[idx]->Input("X")[0]; if (type == typeid(float)) { GetFetchOne(fetch, output); output->dtype = PaddleDType::FLOAT32; diff --git a/paddle/fluid/inference/api/analysis_predictor.h b/paddle/fluid/inference/api/analysis_predictor.h index db57812bc3ba8e6d578e665524cb5749e6bfecd6..12ecb7c15e92c3efcdb27a7058e9481a6f476674 100644 --- a/paddle/fluid/inference/api/analysis_predictor.h +++ b/paddle/fluid/inference/api/analysis_predictor.h @@ -109,7 +109,7 @@ class AnalysisPredictor : public PaddlePredictor { std::map feed_names_; std::vector fetchs_; // Memory buffer for feed inputs. The temporary LoDTensor will cause serious - // concurrency problems, so cache them. + // concurrency problems, wrong results and memory leak, so cache them. std::vector feed_tensors_; details::TensorArrayBatchCleaner tensor_array_batch_cleaner_; diff --git a/paddle/fluid/inference/api/api_impl.cc b/paddle/fluid/inference/api/api_impl.cc index a7cef426d17908bfae59a4a5a204237ab580a5a1..9c5703c91fedcbb7807c29eac9a52b30b8b4330a 100644 --- a/paddle/fluid/inference/api/api_impl.cc +++ b/paddle/fluid/inference/api/api_impl.cc @@ -185,8 +185,12 @@ bool NativePaddlePredictor::SetFeed(const std::vector &inputs, << inputs.size(); return false; } + + // Cache the inputs memory for better concurrency performance. + feed_tensors_.resize(inputs.size()); + for (size_t i = 0; i < inputs.size(); ++i) { - framework::LoDTensor input; + auto &input = feed_tensors_[i]; framework::DDim ddim = framework::make_ddim(inputs[i].shape); void *input_ptr; if (inputs[i].dtype == PaddleDType::INT64) { @@ -261,6 +265,7 @@ bool NativePaddlePredictor::GetFetch(std::vector *outputs, framework::GetFetchVariable(*scope, "fetch", idx); auto type = fetch.type(); auto output = &(outputs->at(i)); + output->name = fetchs_[idx]->Input("X")[0]; if (type == typeid(float)) { GetFetchOne(fetch, output); output->dtype = PaddleDType::FLOAT32; diff --git a/paddle/fluid/inference/api/api_impl.h b/paddle/fluid/inference/api/api_impl.h index 9dfa48d501f17fa654ec50049608b1a87c586cb6..c1fcd198ccda07bb6cdd9911716be911ffef6e8d 100644 --- a/paddle/fluid/inference/api/api_impl.h +++ b/paddle/fluid/inference/api/api_impl.h @@ -69,6 +69,9 @@ class NativePaddlePredictor : public PaddlePredictor { std::vector feeds_; std::map feed_names_; std::vector fetchs_; + // Memory buffer for feed inputs. The temporary LoDTensor will cause serious + // concurrency problems, wrong results and memory leak, so cache them. + std::vector feed_tensors_; // Do not use unique_ptr, use parent scope to delete framework::Scope *sub_scope_{nullptr}; details::TensorArrayBatchCleaner tensor_array_batch_cleaner_;