From dbc2bb337641b78679d29e4aeb581c03aabeb5fa Mon Sep 17 00:00:00 2001 From: liu zhengxi <380185688@qq.com> Date: Thu, 17 Oct 2019 09:57:50 +0800 Subject: [PATCH] improve the performance of capi in PD_PredictorRun (#20665) --- paddle/fluid/inference/capi/pd_predictor.cc | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/inference/capi/pd_predictor.cc b/paddle/fluid/inference/capi/pd_predictor.cc index 89d4c41537..bb75052480 100644 --- a/paddle/fluid/inference/capi/pd_predictor.cc +++ b/paddle/fluid/inference/capi/pd_predictor.cc @@ -29,7 +29,13 @@ bool PD_PredictorRun(const PD_AnalysisConfig* config, PD_Tensor* inputs, int in_size, PD_Tensor* output_data, int** out_size, int batch_size) { PADDLE_ENFORCE_NOT_NULL(config); - auto predictor = paddle::CreatePaddlePredictor(config->config); + static std::map> + predictors; + if (!predictors.count(config->config.model_dir())) { + predictors[config->config.model_dir()] = + paddle::CreatePaddlePredictor(config->config); + } + auto& predictor = predictors[config->config.model_dir()]; std::vector in; for (int i = 0; i < in_size; ++i) { in.emplace_back(inputs->tensor); -- GitLab