From 46b8d282dcdc4c595fda865dc94bc459da9fc282 Mon Sep 17 00:00:00 2001 From: Pei Yang Date: Mon, 23 Mar 2020 17:04:29 +0800 Subject: [PATCH] Add some inference API comments for AnalysisConfig (#23117) * add some API comments in paddle_analysis_config.h, test=develop * add some API comments in paddle_analysis_config.h, test=develop --- .../inference/api/paddle_analysis_config.h | 90 ++++++++++++++++--- 1 file changed, 76 insertions(+), 14 deletions(-) diff --git a/paddle/fluid/inference/api/paddle_analysis_config.h b/paddle/fluid/inference/api/paddle_analysis_config.h index 7a5ff0318b..d45c706334 100644 --- a/paddle/fluid/inference/api/paddle_analysis_config.h +++ b/paddle/fluid/inference/api/paddle_analysis_config.h @@ -11,6 +11,17 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. + +/// +/// \file paddle_analysis_config.h +/// +/// \brief Paddle Analysis Config API信息 +/// +/// \author paddle-infer@baidu.com +/// \date 2020-03-20 +/// \since 1.7 +/// + #pragma once #include @@ -36,34 +47,85 @@ namespace paddle { class AnalysisPredictor; struct MkldnnQuantizerConfig; -// NOTE WIP, not stable yet. +/// +/// \brief configuration manager for `AnalysisPredictor`. +/// \since 1.7.0 +/// +/// `AnalysisConfig` manages configurations of `AnalysisPredictor`. +/// During inference procedure, there are many parameters(model/params path, +/// place of inference, etc.) +/// to be specified, and various optimizations(subgraph fusion, memory +/// optimazation, TensorRT engine, etc.) +/// to be done. Users can manage these settings by creating and modifying an +/// `AnalysisConfig`, +/// and loading it into `AnalysisPredictor`. +/// struct AnalysisConfig { AnalysisConfig() = default; + /// + /// \brief Construct a new `AnalysisConfig` from another + /// `AnalysisConfig`. + /// + /// \param[in] other another `AnalysisConfig` + /// explicit AnalysisConfig(const AnalysisConfig& other); + /// + /// \brief Construct a new `AnalysisConfig` from a no-combined model. + /// + /// \param[in] model_dir model directory of the no-combined model. + /// explicit AnalysisConfig(const std::string& model_dir); + /// + /// \brief Construct a new `AnalysisConfig` from a combined model. + /// + /// \param[in] prog_file model file path of the combined model. + /// \param[in] params_file params file path of the combined model. + /// explicit AnalysisConfig(const std::string& prog_file, const std::string& params_file); + /// + /// \brief Precision of inference in TensorRT. + /// enum class Precision { - kFloat32 = 0, - kInt8, - kHalf, + kFloat32 = 0, ///< fp32 + kInt8, ///< int8 + kHalf, ///< fp16 }; - /** Set model with a directory. - */ + /// + /// \brief Set the no-combined model dir path. + /// + /// \param model_dir model dir path. + /// void SetModel(const std::string& model_dir) { model_dir_ = model_dir; } - /** Set model with two specific pathes for program and parameters. - */ + + /// + /// \brief Set the combined model with two specific pathes for program and + /// parameters. + /// + /// \param prog_file_path model file path of the combined model. + /// \param params_file_path params file path of the combined model. + /// void SetModel(const std::string& prog_file_path, const std::string& params_file_path); - /** Set program file path. - */ + /// + /// \brief Set the model file path of a combined model. + /// + /// \param x model file path. + /// void SetProgFile(const std::string& x) { prog_file_ = x; } - /** Set parameter composed file path. - */ + /// + /// \brief Set the params file path of a combined model. + /// + /// \param x params file path. + /// void SetParamsFile(const std::string& x) { params_file_ = x; } - /** Set opt cache dir. - */ + + /// + /// \brief Set the path of optimization cache directory. + /// + /// \param opt_cache_dir the path of optimization cache directory. + /// void SetOptimCacheDir(const std::string& opt_cache_dir) { opt_cache_dir_ = opt_cache_dir; } -- GitLab