未验证 提交 46b8d282 编写于 作者: P Pei Yang 提交者: GitHub

Add some inference API comments for AnalysisConfig (#23117)

* add some API comments in paddle_analysis_config.h, test=develop

* add some API comments in paddle_analysis_config.h, test=develop
上级 4f5e4540
......@@ -11,6 +11,17 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///
/// \file paddle_analysis_config.h
///
/// \brief Paddle Analysis Config API信息
///
/// \author paddle-infer@baidu.com
/// \date 2020-03-20
/// \since 1.7
///
#pragma once
#include <cassert>
......@@ -36,34 +47,85 @@ namespace paddle {
class AnalysisPredictor;
struct MkldnnQuantizerConfig;
// NOTE WIP, not stable yet.
///
/// \brief configuration manager for `AnalysisPredictor`.
/// \since 1.7.0
///
/// `AnalysisConfig` manages configurations of `AnalysisPredictor`.
/// During inference procedure, there are many parameters(model/params path,
/// place of inference, etc.)
/// to be specified, and various optimizations(subgraph fusion, memory
/// optimazation, TensorRT engine, etc.)
/// to be done. Users can manage these settings by creating and modifying an
/// `AnalysisConfig`,
/// and loading it into `AnalysisPredictor`.
///
struct AnalysisConfig {
AnalysisConfig() = default;
///
/// \brief Construct a new `AnalysisConfig` from another
/// `AnalysisConfig`.
///
/// \param[in] other another `AnalysisConfig`
///
explicit AnalysisConfig(const AnalysisConfig& other);
///
/// \brief Construct a new `AnalysisConfig` from a no-combined model.
///
/// \param[in] model_dir model directory of the no-combined model.
///
explicit AnalysisConfig(const std::string& model_dir);
///
/// \brief Construct a new `AnalysisConfig` from a combined model.
///
/// \param[in] prog_file model file path of the combined model.
/// \param[in] params_file params file path of the combined model.
///
explicit AnalysisConfig(const std::string& prog_file,
const std::string& params_file);
///
/// \brief Precision of inference in TensorRT.
///
enum class Precision {
kFloat32 = 0,
kInt8,
kHalf,
kFloat32 = 0, ///< fp32
kInt8, ///< int8
kHalf, ///< fp16
};
/** Set model with a directory.
*/
///
/// \brief Set the no-combined model dir path.
///
/// \param model_dir model dir path.
///
void SetModel(const std::string& model_dir) { model_dir_ = model_dir; }
/** Set model with two specific pathes for program and parameters.
*/
///
/// \brief Set the combined model with two specific pathes for program and
/// parameters.
///
/// \param prog_file_path model file path of the combined model.
/// \param params_file_path params file path of the combined model.
///
void SetModel(const std::string& prog_file_path,
const std::string& params_file_path);
/** Set program file path.
*/
///
/// \brief Set the model file path of a combined model.
///
/// \param x model file path.
///
void SetProgFile(const std::string& x) { prog_file_ = x; }
/** Set parameter composed file path.
*/
///
/// \brief Set the params file path of a combined model.
///
/// \param x params file path.
///
void SetParamsFile(const std::string& x) { params_file_ = x; }
/** Set opt cache dir.
*/
///
/// \brief Set the path of optimization cache directory.
///
/// \param opt_cache_dir the path of optimization cache directory.
///
void SetOptimCacheDir(const std::string& opt_cache_dir) {
opt_cache_dir_ = opt_cache_dir;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册