From c9e5aa19c1b3d3aa76bd03aca2c4b6570014ea52 Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Fri, 18 Jan 2019 08:54:01 +0800 Subject: [PATCH] get tensor API add more comments (#15345) --- paddle/fluid/inference/api/paddle_api.h | 28 ++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/inference/api/paddle_api.h b/paddle/fluid/inference/api/paddle_api.h index d9edcf7cc..46b510fd1 100644 --- a/paddle/fluid/inference/api/paddle_api.h +++ b/paddle/fluid/inference/api/paddle_api.h @@ -170,18 +170,40 @@ class PaddlePredictor { std::vector* output_data, int batch_size = -1) = 0; - /** Zero copy input and output optimization. - * Get the input or output tensors, and operate on their memory directly, - * without copy. + /** \brief Get a mutable tensor directly. + * + * NOTE Only works in AnalysisPredictor. + * + * One can also use this to modify any temporary variable related tensors in + * the predictor. + * */ virtual std::unique_ptr GetInputTensor( const std::string& name) { return nullptr; } + /** + * \brief Get an immutable tensor without copy. + * + * NOTE Only works in AnalysisPredictor. + * One can use this API to get any temporary tensors in the predictor and + * read it. + */ virtual std::unique_ptr GetOutputTensor( const std::string& name) { return nullptr; } + /** + * \brief Run the predictor with zero-copied inputs and outputs. + * + * NOTE Only works in AnalysisPredictor. + * + * This will save the IO copy for transfering inputs and outputs to predictor + * workspace and get some performance improvement. + * To use it, one should call the `AnalysisConfig.SwitchUseFeedFetchOp(true)` + * and then use the `GetInputTensor` and `GetOutputTensor` to directly write + * or read the input/output tensors. + */ virtual bool ZeroCopyRun() { return false; } /** Clone a predictor that share the model weights, the Cloned predictor -- GitLab