From d09d6eadc0c875cd7f703593d37fb46216ca4400 Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Tue, 8 Jan 2019 15:05:00 +0800 Subject: [PATCH] make inference api work with Doxygen (#15195) --- .../fluid/inference/api/analysis_predictor.h | 7 +- paddle/fluid/inference/api/api_impl.h | 1 - .../inference/api/paddle_analysis_config.h | 103 +++++++++- paddle/fluid/inference/api/paddle_api.h | 176 +++++++++++------- .../fluid/inference/api/paddle_pass_builder.h | 37 ++-- 5 files changed, 227 insertions(+), 97 deletions(-) diff --git a/paddle/fluid/inference/api/analysis_predictor.h b/paddle/fluid/inference/api/analysis_predictor.h index 12ecb7c15..a6e126c5d 100644 --- a/paddle/fluid/inference/api/analysis_predictor.h +++ b/paddle/fluid/inference/api/analysis_predictor.h @@ -35,8 +35,11 @@ using framework::proto::ProgramDesc; using framework::NaiveExecutor; using contrib::AnalysisConfig; -/* This predictor is based on the original native predictor with IR and Analysis - * support. It will optimize IR and Parameters in the runtime. +/** \brief This predictor is based on the original native predictor with IR and + * Analysis support. + * + * It will optimize IR and Parameters in the runtime. + * * TODO(Superjomn) Replace the Navive predictor? */ class AnalysisPredictor : public PaddlePredictor { diff --git a/paddle/fluid/inference/api/api_impl.h b/paddle/fluid/inference/api/api_impl.h index c1fcd198c..d2133bd46 100644 --- a/paddle/fluid/inference/api/api_impl.h +++ b/paddle/fluid/inference/api/api_impl.h @@ -19,7 +19,6 @@ limitations under the License. */ #include #include #include - #include "paddle/fluid/framework/ddim.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor_array.h" diff --git a/paddle/fluid/inference/api/paddle_analysis_config.h b/paddle/fluid/inference/api/paddle_analysis_config.h index 2d61098f9..ae6ac6985 100644 --- a/paddle/fluid/inference/api/paddle_analysis_config.h +++ b/paddle/fluid/inference/api/paddle_analysis_config.h @@ -19,6 +19,8 @@ #include #include +/*! \file */ + // Here we include some header files with relative paths, for that in deploy, // the abstract path of this header file will be changed. #include "paddle_api.h" // NOLINT @@ -41,49 +43,125 @@ struct AnalysisConfig { explicit AnalysisConfig(const std::string& prog_file, const std::string& params_file); - // Model path related. + /** Set model with a directory. + */ void SetModel(const std::string& model_dir) { model_dir_ = model_dir; } + /** Set model with two specific pathes for program and parameters. + */ void SetModel(const std::string& prog_file_path, const std::string& params_file_path); + /** Set program file path. + */ void SetProgFile(const std::string& x) { prog_file_ = x; } + /** Set parameter composed file path. + */ void SetParamsFile(const std::string& x) { params_file_ = x; } + /** Get the model directory path. + */ const std::string& model_dir() const { return model_dir_; } + /** Get the program file path. + */ const std::string& prog_file() const { return prog_file_; } + /** Get the composed parameters file. + */ const std::string& params_file() const { return params_file_; } // GPU related. + + /** + * \brief Turn on GPU. + * @param memory_pool_init_size_mb initial size of the GPU memory pool in MB. + * @param device_id the GPU card to use (default is 0). + */ void EnableUseGpu(uint64_t memory_pool_init_size_mb, int device_id = 0); + /** Turn off the GPU. + */ void DisableGpu(); + /** A bool state telling whether the GPU is turned on. + */ bool use_gpu() const { return use_gpu_; } + /** Get the GPU device id. + */ int gpu_device_id() const { return device_id_; } + /** Get the initial size in MB of the GPU memory pool. + */ int memory_pool_init_size_mb() const { return memory_pool_init_size_mb_; } + /** Get the proportion of the initial memory pool size compared to the device. + */ float fraction_of_gpu_memory_for_pool() const; - // Determine whether to perform graph optimization. + /** \brief Control whether to perform IR graph optimization. + * + * If turned off, the AnalysisConfig will act just like a NativeConfig. + */ void SwitchIrOptim(int x = true) { enable_ir_optim_ = x; } + /** A boolean state tell whether the ir graph optimization is actived. + */ bool ir_optim() const { return enable_ir_optim_; } + /** \brief INTERNAL Determine whether to use the feed and fetch operators. + * Just for internal development, not stable yet. + * When ZeroCopyTensor is used, this should turned off. + */ void SwitchUseFeedFetchOps(int x = true) { use_feed_fetch_ops_ = x; } + /** A boolean state telling whether to use the feed and fetch operators. + */ bool use_feed_fetch_ops_enabled() const { return use_feed_fetch_ops_; } + /** \brief Control whether to specify the inputs' names. + * + * The PaddleTensor type has a `name` member, assign it with the corresponding + * variable name. This is used only when the input PaddleTensors passed to the + * `PaddlePredictor.Run(...)` cannot follow the order in the training phase. + */ void SwitchSpecifyInputNames(bool x = true) { specify_input_name_ = x; } + + /** A boolean state tell whether the input PaddleTensor names specified should + * be used to reorder the inputs in `PaddlePredictor.Run(...)`. + */ bool specify_input_name() const { return specify_input_name_; } + /** + * \brief Turn on the TensorRT engine. + * + * The TensorRT engine will accelerate some subgraphes in the original Fluid + * computation graph. In some models such as TensorRT50, GoogleNet and so on, + * it gains significant performance acceleration. + * + * @param workspace_size the memory size(in byte) used for TensorRT workspace. + * @param max_batch_size the maximum batch size of this prediction task, + * better set as small as possible, or performance loss. + * @param min_subgrpah_size the minimum TensorRT subgraph size needed, if a + * subgraph is less than this, it will not transfer to TensorRT engine. + */ void EnableTensorRtEngine(int workspace_size = 1 << 20, int max_batch_size = 1, int min_subgraph_size = 3); + /** A boolean state telling whether the TensorRT engine is used. + */ bool tensorrt_engine_enabled() const { return use_tensorrt_; } + /** Control whther to debug IR graph analysis phase. + */ void SwitchIrDebug(int x = true) { ir_debug_ = x; } + /** Turn on MKLDNN. + */ void EnableMKLDNN(); + /** A boolean state telling whether to use the MKLDNN. + */ bool mkldnn_enabled() const { return use_mkldnn_; } - // Set and get the number of cpu math library threads. + /** Set and get the number of cpu math library threads. + */ void SetCpuMathLibraryNumThreads(int cpu_math_library_num_threads); + /** An int state telling how many threads are used in the CPU math library. + */ int cpu_math_library_num_threads() const { return cpu_math_library_num_threads_; } + /** Transform the AnalysisConfig to NativeConfig. + */ NativeConfig ToNativeConfig() const { NativeConfig config; config.model_dir = model_dir_; @@ -95,19 +173,30 @@ struct AnalysisConfig { config.specify_input_name = specify_input_name_; return config; } + /** Specify the operator type list to use MKLDNN acceleration. + * @param op_list the operator type list. + */ void SetMKLDNNOp(std::unordered_set op_list) { mkldnn_enabled_op_types_ = op_list; } - // Specify the memory buffer of program and parameter + /** Specify the memory buffer of program and parameter + * @param prog_buffer the memory buffer of program. + * @param prog_buffer_size the size of the data. + * @param params_buffer the memory buffer of the composed parameters file. + * @param params_buffer_size the size of the commposed parameters data. + */ void SetModelBuffer(const char* prog_buffer, size_t prog_buffer_size, - const char* program_buffer, size_t program_buffer_size); + const char* params_buffer, size_t params_buffer_size); + /** A boolean state telling whether the model is set from the CPU memory. + */ bool model_from_memory() const { return model_from_memory_; } friend class ::paddle::AnalysisPredictor; - // NOTE just for developer, not an official API, easily to be broken. - // Get a pass builder for customize the passes in IR analysis phase. + /** NOTE just for developer, not an official API, easily to be broken. + * Get a pass builder for customize the passes in IR analysis phase. + */ PassStrategy* pass_builder() const; protected: diff --git a/paddle/fluid/inference/api/paddle_api.h b/paddle/fluid/inference/api/paddle_api.h index 1513a4b3b..3642f3612 100644 --- a/paddle/fluid/inference/api/paddle_api.h +++ b/paddle/fluid/inference/api/paddle_api.h @@ -13,61 +13,76 @@ // limitations under the License. #pragma once +/*! \file paddle_api.h + */ + #include #include #include #include +/*! \namespace paddle + */ namespace paddle { -// Data type. +/** paddle data type. + */ enum PaddleDType { FLOAT32, INT64, // TODO(Superjomn) support more data types if needed. }; -/* - * Memory menage for PaddleTensor. - * The PaddleBuf holds a buffer for data input or output. The memory can be - * allocated by user or by PaddleBuf itself, but in any case, the PaddleBuf - * should be reused for better performance. +/** + *\brief Memory menager for PaddleTensor. * - * For user allocated memory, the following API can be used: - * - PaddleBuf(void* data, size_t length) to set an external memory by - * specifying - * the memory address and length. - * - Reset(void* data, size_t length) to reset the PaddleBuf with an external - * memory. - * ATTENTION, for user allocated memory, deallocation should be done by users - * externally after the program finished. The PaddleBuf won't do any allocation - * or deallocation. + *The PaddleBuf holds a buffer for data input or output. The memory can be + *allocated by user or by PaddleBuf itself, but in any case, the PaddleBuf + *should be reused for better performance. * - * To have the PaddleBuf allocate and manage the memory: - * - PaddleBuf(size_t length) will allocate a memory of size `length`. - * - Resize(size_t length) resize the memory to no less than `length`, ATTENTION - * if the allocated memory is larger than `length`, nothing will done. + *For user allocated memory, the following API can be used: + *- PaddleBuf(void* data, size_t length) to set an external memory by + *specifying + * the memory address and length. + *- Reset(void* data, size_t length) to reset the PaddleBuf with an external + *memory. + *ATTENTION, for user allocated memory, deallocation should be done by users + *externally after the program finished. The PaddleBuf won't do any allocation + *or deallocation. + * + *To have the PaddleBuf allocate and manage the memory: + *- PaddleBuf(size_t length) will allocate a memory of size `length`. + *- Resize(size_t length) resize the memory to no less than `length`, ATTENTION + * if the allocated memory is larger than `length`, nothing will done. */ class PaddleBuf { public: - // PaddleBuf allocate memory internally, and manage it. + /** PaddleBuf allocate memory internally, and manage it. + */ explicit PaddleBuf(size_t length) : data_(new char[length]), length_(length), memory_owned_(true) {} - // Set external memory, the PaddleBuf won't manage it. + /** Set external memory, the PaddleBuf won't manage it. + */ PaddleBuf(void* data, size_t length) : data_(data), length_(length), memory_owned_{false} {} - // Copy only available when memory is managed externally. + /** Copy only available when memory is managed externally. + */ explicit PaddleBuf(const PaddleBuf&); - // Resize the memory. + /** Resize the memory. + */ void Resize(size_t length); - // Reset to external memory, with address and length set. + /** Reset to external memory, with address and length set. + */ void Reset(void* data, size_t length); - // Tell whether the buffer is empty. + /** Tell whether the buffer is empty. + */ bool empty() const { return length_ == 0; } - // Get the memory address. + /** Get the memory address. + */ void* data() const { return data_; } - // Get the memory length. + /** Get the memory length. + */ size_t length() const { return length_; } ~PaddleBuf() { Free(); } @@ -83,7 +98,8 @@ class PaddleBuf { bool memory_owned_{true}; }; -// Basic input and output data structure for PaddlePredictor. +/** Basic input and output data structure for PaddlePredictor. + */ struct PaddleTensor { PaddleTensor() = default; std::string name; // variable name. @@ -94,19 +110,22 @@ struct PaddleTensor { }; enum class PaddlePlace { kUNK = -1, kCPU, kGPU }; -// Tensor without copy, currently only supports AnalysisPredictor. +/** Tensor without copy, currently only supports AnalysisPredictor. + */ class ZeroCopyTensor { public: void Reshape(const std::vector& shape); - // Get the memory in CPU or GPU with specific data type, should Reshape first - // to tell the data size. - // Once can directly call this data to feed the data. - // This is for write the input tensor. + /** Get the memory in CPU or GPU with specific data type, should Reshape first + * to tell the data size. + * Once can directly call this data to feed the data. + * This is for write the input tensor. + */ template T* mutable_data(PaddlePlace place); - // Get the memory directly, will return the place and memory size by pointer. - // This is for reading the output tensor. + /** Get the memory directly, will return the place and memory size by pointer. + * This is for reading the output tensor. + */ template T* data(PaddlePlace* place, int* size) const; @@ -128,8 +147,7 @@ class ZeroCopyTensor { void* scope_{nullptr}; }; -/* - * A simple Inference API for Paddle. +/** A simple Inference API for Paddle. */ class PaddlePredictor { public: @@ -138,18 +156,20 @@ class PaddlePredictor { PaddlePredictor(const PaddlePredictor&) = delete; PaddlePredictor& operator=(const PaddlePredictor&) = delete; - // Predict an record. - // The caller should be responsible for allocating and releasing the memory of - // `inputs`. `inputs` should be available until Run returns. Caller should be - // responsible for the output tensor's buffer, either allocated or passed from - // outside. + /** Predict an record. + * The caller should be responsible for allocating and releasing the memory of + * `inputs`. `inputs` should be available until Run returns. Caller should be + * responsible for the output tensor's buffer, either allocated or passed from + * outside. + */ virtual bool Run(const std::vector& inputs, std::vector* output_data, int batch_size = -1) = 0; - // Zero copy input and output optimization. - // Get the input or output tensors, and operate on their memory directly, - // without copy. + /** Zero copy input and output optimization. + * Get the input or output tensors, and operate on their memory directly, + * without copy. + */ virtual std::unique_ptr GetInputTensor( const std::string& name) { return nullptr; @@ -160,16 +180,19 @@ class PaddlePredictor { } virtual bool ZeroCopyRun() { return false; } - // Clone a predictor that share the model weights, the Cloned predictor should - // be thread-safe. + /** Clone a predictor that share the model weights, the Cloned predictor + * should be thread-safe. + */ virtual std::unique_ptr Clone() = 0; - // Destroy the Predictor. + /** Destroy the Predictor. + */ virtual ~PaddlePredictor() = default; - // The common configs for all the predictors. + /** The common configs for all the predictors. + */ struct Config { - std::string model_dir; // path to the model directory. + std::string model_dir; /*!< path to the model directory. */ }; }; @@ -177,17 +200,21 @@ struct NativeConfig : public PaddlePredictor::Config { // GPU related fields. bool use_gpu{false}; int device{0}; - float fraction_of_gpu_memory{-1.f}; // Change to a float in (0,1] if needed. + float fraction_of_gpu_memory{ + -1.f}; /*!< Change to a float in (0,1] if needed. */ // Specify the exact path of program and parameter files. std::string prog_file; std::string param_file; - // Specify the variable's name of each input if input tensors don't follow the - // `feeds` and `fetches` of the phase `save_inference_model`. + /** Specify the variable's name of each input if input tensors don't follow + * the + * `feeds` and `fetches` of the phase `save_inference_model`. + */ bool specify_input_name{false}; - // Set and get the number of cpu math library threads. + /** Set and get the number of cpu math library threads. + */ void SetCpuMathLibraryNumThreads(int cpu_math_library_num_threads) { cpu_math_library_num_threads_ = cpu_math_library_num_threads; } @@ -201,28 +228,33 @@ struct NativeConfig : public PaddlePredictor::Config { int cpu_math_library_num_threads_{1}; }; -// A factory to help create different predictors. -// -// Usage: -// -// NativeConfig config; -// ... // change the configs. -// auto native_predictor = CreatePaddlePredictor(config); -// -// FOR EXTENSION DEVELOPER: -// Different predictors are designated by config type. Similar configs can be -// merged, but there shouldn't be a huge config containing different fields for -// more than one kind of predictors. +/*! \fn std::unique_ptr CreatePaddlePredictor(const ConfigT& + * config); + * + * \brief A factory to help create different predictors. + * + * Usage: + * + * NativeConfig config; + * ... // change the configs. + * auto native_predictor = CreatePaddlePredictor(config); + * + * FOR EXTENSION DEVELOPER: + * Different predictors are designated by config type. Similar configs can be + * merged, but there shouldn't be a huge config containing different fields for + * more than one kind of predictors. + */ template std::unique_ptr CreatePaddlePredictor(const ConfigT& config); -// NOTE The following APIs are too trivial, we will discard it in the following -// versions. +/** NOTE The following APIs are too trivial, we will discard it in the following + * versions. + */ enum class PaddleEngineKind { - kNative = 0, // Use the native Fluid facility. - kAutoMixedTensorRT, // Automatically mix Fluid with TensorRT. - kAnalysis, // More optimization. - kAnakin // Use Anakin for inference, not mature yet. + kNative = 0, /*!< Use the native Fluid facility. */ + kAutoMixedTensorRT, /*!< Automatically mix Fluid with TensorRT. */ + kAnalysis, /*!< More optimization. */ + kAnakin /*!< Use Anakin for inference, not mature yet. */ }; template diff --git a/paddle/fluid/inference/api/paddle_pass_builder.h b/paddle/fluid/inference/api/paddle_pass_builder.h index b4cbc40e0..9337ae55b 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.h +++ b/paddle/fluid/inference/api/paddle_pass_builder.h @@ -18,30 +18,39 @@ #include #include +/*! \file */ + +/*! \namespace paddle */ namespace paddle { -/* - * This is a pass builder based on string. It is part of inference API. + +/** This is a pass builder based on string. It is part of inference API. */ class PaddlePassBuilder { public: explicit PaddlePassBuilder(const std::vector &passes) : passes_(passes) {} + /** Append a pass to the end of the passes. */ void AppendPass(const std::string &pass_type); + /** Insert a pass to a specific position. + * @param idx the position to insert. + * @param pass_type the pass key. + */ void InsertPass(size_t idx, const std::string &pass_type); - // Delete the `idx`-th pass. + /** Delete the `idx`-th pass. */ void DeletePass(size_t idx); - // Delete all the passes that has type `pass_type`. + /** Delete all the passes that has type `pass_type`. */ void DeletePass(const std::string &pass_type); - // Visualize the computation graph after each pass by generating a DOT - // language file, one can draw them with the Graphviz toolkit. + /** Visualize the computation graph after each pass by generating a DOT + * language file, one can draw them with the Graphviz toolkit. + */ void TurnOnDebug(); - // Human-readible information. + /** Human-readible information. */ std::string DebugString(); const std::vector &AllPasses() const { return passes_; } @@ -50,16 +59,16 @@ class PaddlePassBuilder { std::vector passes_; }; -/* - * Pass strategy to help control the IR passes. +/**Pass strategy to help control the IR passes. */ class PassStrategy : public PaddlePassBuilder { public: explicit PassStrategy(const std::vector &passes) : PaddlePassBuilder(passes) {} - // The MKLDNN control exists in both CPU and GPU mode, because there can be - // still some CPU kernels running in CPU mode. + /** The MKLDNN control exists in both CPU and GPU mode, because there can be + * still some CPU kernels running in CPU mode. + */ virtual void EnableMKLDNN() = 0; bool use_gpu() const { return use_gpu_; } @@ -70,8 +79,7 @@ class PassStrategy : public PaddlePassBuilder { bool use_gpu_{false}; }; -/* - * The CPU passes controller, it is used in AnalysisPredictor with CPU mode. +/** The CPU passes controller, it is used in AnalysisPredictor with CPU mode. */ class CpuPassStrategy : public PassStrategy { public: @@ -117,8 +125,7 @@ class CpuPassStrategy : public PassStrategy { CpuPassStrategy(const CpuPassStrategy &other) : PassStrategy(other.passes_) {} }; -/* - * The GPU passes strategy, it is used in +/** The GPU passes strategy, it is used in AnalysisPredictor with GPU mode. */ class GpuPassStrategy : public PassStrategy { public: -- GitLab