API¶
Defines
- 
DISABLE_COPY_AND_ASSIGN(classname)¶
Enums
Functions
- 
void initPaddle(int argc, char **argv)¶
- Initialize paddle. - In python, this method should be invoked as - import sys import paddle paddle.initPaddle(sys.argv) or you can change arguments as any list of str. 
- 
bool isGpuVersion()¶
- Return true if this py_paddle is compiled in GPU Version. 
Variables
- 
const size_t NO_SPARSE_ID¶
- In Python, -1UL is hard to write. So define a const value used by python side. 
- 
class IOError¶
- #include <PaddleAPI.h>The Error of IO Operation. Such as file not found, etc. 
- 
class RangeError¶
- #include <PaddleAPI.h>Out of range error. 
- 
class UnsupportError¶
- #include <PaddleAPI.h>Not support Error, such as access GPU memory directly, etc. 
- 
struct FloatArray¶
- #include <PaddleAPI.h>This type will map to python’s list of float. Public Functions - 
FloatArray(const float *b, const size_t l)¶
 
- 
- 
struct IntArray¶
- #include <PaddleAPI.h>This type will map to python’s list of int. Public Functions - 
IntArray(const int *b, const size_t l, bool f = false)¶
 
- 
- 
struct IntWithFloatArray¶
- #include <PaddleAPI.h>This type will map to python’s list of (int, float) Public Functions - 
IntWithFloatArray(const float *v, const int *i, size_t l, bool f = false)¶
 
- 
- 
class Matrix¶
- Public Functions - 
virtual ~Matrix()¶
 - 
void toNumpyMatInplace(float **view_data, int *dim1, int *dim2)¶
- Cast to numpy matrix. - Example: - import paddle m = paddle.Matrix.createZero(10,2) numpy_mat = m.toNumpyMat() - Note
- This method take no parameter in python. - This method in python will return a numpy matrix, not void. - Only CpuDenseMatrix is supported. 
 
 - 
void copyToNumpyMat(float **view_m_data, int *dim1, int *dim2)¶
- Copy To numpy mat. 
 - 
void copyFromNumpyMat(float *data, int dim1, int dim2)¶
- Copy From Numpy Mat. 
 - 
bool isSparse() const¶
- return true if this matrix is sparse. 
 - 
SparseValueType getSparseValueType() const¶
 - 
SparseFormatType getSparseFormat() const¶
 - 
IntWithFloatArray getSparseRowColsVal(size_t i) const¶
 - 
size_t getHeight() const¶
 - 
size_t getWidth() const¶
 - 
float get(size_t x, size_t y) const¶
 - 
void set(size_t x, size_t y, float val)¶
 - 
FloatArray getData() const¶
- return type is list of float 
 - 
void sparseCopyFrom(const std::vector<int> &rows, const std::vector<int> &cols, const std::vector<float> &values = std::vector< float >())¶
- Copy from rows, cols, values. - if sparse_nonvalue, the values should be [] 
 - 
bool isGpu() const¶
 - Public Static Functions - 
Matrix *createZero(size_t height, size_t width, bool useGpu = false)¶
- Create A Matrix with height,width, which is filled by zero. 
 - 
Matrix *createSparse(size_t height, size_t width, size_t nnz, bool isNonVal = true, bool trans = false, bool useGpu = false)¶
- Create Sparse Matrix. - After create sparse, sparseCopyFrom can be used to fill matrix. - Note
- the default sparse type is SPARSE_CSR.
- Parameters
- nnz-- Number of non zero values. 
 
 
 - 
Matrix *createDense(const std::vector<float> &data, size_t height, size_t width, bool useGpu = false)¶
- Create Dense Matrix. - Note
- the value will be copy into a new matrix.
- Parameters
- data-- list of float should be passed in python. 
 
 
 - 
Matrix *createCpuDenseFromNumpy(float *data, int dim1, int dim2, bool copy = false)¶
- Create Cpu Dense Matrix from numpy matrix, dtype=float32 - Parameters
- data-- a numpy matrix. 
- dim1-- dimension of data. 
- dim2-- dimension of data. 
- copy-- true if copy into a new matrix, false will create matrix inplace. 
 
 
 - Private Members - 
MatrixPrivate *m¶
 - Friends - 
friend Trainer
 - 
friend GradientMachine
 - 
friend Arguments
 
- 
virtual 
- 
class Vector¶
- Public Functions - 
~Vector()¶
 - 
void toNumpyArrayInplace(float **view_data, int *dim1)¶
- Cast to numpy array inplace. 
 - 
void copyToNumpyArray(float **view_m_data, int *dim1)¶
- Copy to numpy array. 
 - 
void copyFromNumpyArray(float *data, int dim)¶
- Copy from numpy array. 
 - 
float get(const size_t idx) const¶
- getitem in python 
 - 
void set(const size_t idx, float val)¶
- setitem in python 
 - 
bool isGpu() const¶
- Return is GPU vector or not. 
 - 
size_t getSize() const¶
- len in python 
 - Public Static Functions - 
Vector *create(const std::vector<float> &data, bool useGpu = false)¶
- Create Vector from list of float. - It will create a new vector, and copy data into it. 
 - Private Members - 
VectorPrivate *m¶
 - Friends - 
friend Parameter
 - 
friend ParameterOptimizer
 - 
friend ParameterTraverseCallbackPrivate
 
- 
- 
class IVector¶
- Public Functions - 
void toNumpyArrayInplace(int **view_data, int *dim1)¶
- Cast to numpy array inplace. 
 - 
void copyToNumpyArray(int **view_m_data, int *dim1)¶
- Copy to numpy array. 
 - 
void copyFromNumpyArray(int *data, int dim)¶
- Copy from numpy array. 
 - 
virtual ~IVector()¶
 - 
int &operator[](const size_t idx)¶
- This method will map to python [] method. 
 - 
const int &operator[](const size_t idx) const¶
 - 
int get(const size_t idx) const¶
 - 
void set(const size_t idx, int val)¶
 - 
bool isGpu() const¶
- Return true if it is gpu vector. 
 - 
size_t getSize() const¶
- This method will map to python len();. 
 - Public Static Functions - 
IVector *create(const std::vector<int> &data, bool useGpu = false)¶
- Create IVector from list of int. It will create a new vector, and copy data into it. 
 - Private Members - 
IVectorPrivate *m¶
 - Friends - 
friend Arguments
 
- 
void 
- 
class Arguments¶
- #include <PaddleAPI.h>The Arguments is actual a std::vector<paddle::Argument> in paddle. Public Functions - 
void resize(size_t slotNum)¶
 - 
virtual ~Arguments()¶
 - 
size_t getSlotNum() const¶
- Return the slot number that aguments contains. - It is actually the vector’s size 
 - 
int64_t getBatchSize(size_t idx = 0) const¶
 Public Static Functions Private Members - 
ArgumentsPrivate *m¶
 Friends - 
friend Trainer
 - 
friend GradientMachine
 - 
friend SequenceGenerator
 
- 
void 
- 
class ParameterConfig¶
- 
Private Members - 
ParameterConfigPrivate *m¶
 Private Static Functions - Internal methods 
 - 
ParameterConfig *createParameterConfigFromParameterPtr(void *ptr)¶
 Friends - 
friend Parameter
 - 
friend ParameterOptimizer
 - 
friend ParameterTraverseCallbackPrivate
 
- 
ParameterConfigPrivate *
- 
class OptimizationConfig¶
- 
Public Static Functions - 
OptimizationConfig *createFromProtoString(const std::string &str)¶
 Private Functions - 
DISABLE_COPY_AND_ASSIGN(OptimizationConfig)¶
 - 
OptimizationConfig()¶
 - 
void *getRawPtr()¶
 Private Members - 
OptimizationConfigPrivate *m¶
 Friends - 
friend TrainerConfig
 - 
friend ParameterOptimizer
 
- 
OptimizationConfig *
- 
class Parameter¶
- Public Functions - 
virtual ~Parameter()¶
 - 
std::string getName() const¶
- get parameter name 
 - 
size_t getID() const¶
- get id 
 - 
ParameterConfig *getConfig()¶
 - Private Members - 
ParameterPrivate *m¶
 - Friends - 
friend UpdateCallbackWrapper
 - 
friend GradientMachine
 
- 
virtual 
- 
class ModelConfig¶
- #include <PaddleAPI.h>You can only get model config from TrainerConfig. It is used by GradientMachine. Public Functions - 
virtual ~ModelConfig()¶
 Private Functions - 
ModelConfig()¶
 - 
DISABLE_COPY_AND_ASSIGN(ModelConfig)¶
 - 
void *getPaddleModelConfig() const¶
 Private Members - 
ModelConfigPrivate *m¶
 Friends - 
friend TrainerConfig
 - 
friend TrainerConfigPrivate
 - 
friend GradientMachine
 
- 
virtual 
- 
class TrainerConfig¶
- #include <PaddleAPI.h>To get TrainerConfig from file. It is used by GradientMachine. Public Functions - 
virtual ~TrainerConfig()¶
 - 
ModelConfig *getModelConfig() const¶
 - 
OptimizationConfig *getOptimizationConfig() const¶
 Public Static Functions - 
TrainerConfig *createFromTrainerConfigFile(const std::string &configPath)¶
 Private Members - 
TrainerConfigPrivate *m¶
 
- 
virtual 
- 
class UpdateCallback¶
- #include <PaddleAPI.h>The callback in backword. You can inherit this class in python. class UpdateCallbackInPython(paddle.UpdateCallback): def __init__(self): paddle.UpdateCallback.__init__(self) def apply(self, param): assert isinstance(param, paddle.Parameter) 
- 
class ParameterTraverseCallback¶
- Public Functions - 
~ParameterTraverseCallback()¶
 - 
void apply(const std::vector<Vector *> &vecs, const ParameterConfig &config, size_t sparseId)¶
 - Private Members - 
ParameterTraverseCallbackPrivate *m¶
 - Friends - 
friend ParameterOptimizer
 
- 
- 
class ParameterOptimizer¶
- Public Functions - 
~ParameterOptimizer()¶
 - 
void init(size_t numRows, const ParameterConfig *config)¶
 - 
void startPass()¶
 - 
void finishPass()¶
 - 
void startBatch(size_t numSamplesProcessed)¶
 - 
void finishBatch()¶
 - 
void update(const std::vector<Vector *> &vecs, const ParameterConfig &conf, size_t sparseId = NO_SPARSE_ID)¶
 - 
std::vector<int> getParameterTypes() const¶
 - 
ParameterTraverseCallback *needSpecialTraversal(const ParameterConfig &config) const¶
 - Public Static Functions - 
ParameterOptimizer *create(OptimizationConfig *config)¶
 - Private Members - 
ParameterOptimizerPrivate *m¶
 
- 
- 
class GradientMachine¶
- Public Functions - 
virtual ~GradientMachine()¶
 - 
void forward(const Arguments &inArgs, Arguments *outArgs, PassType passType)¶
- The forward stage of GradientMachine. - Note
- the outArgs could be zero length arguemnts. - THIS METHOD IS VERY USEFULL FOR PREDICT FROM TRAINED MODEL. 
 
 - 
void backward(const UpdateCallback &callback = UpdateCallback ())¶
- The backward stage of GradientMachine. - Note
- Currently the ParameterUpdater is not wrapped in SWIG, so backward cannot actually train a network. But you can write a update callback to change the parameter or implement a ParameterUpdater in python side.
 
 - 
void forwardBackward(const Arguments &inArgs, Arguments *outArgs, PassType passType, const UpdateCallback &callback = UpdateCallback ())¶
- Combine forward/backward 
 - 
void loadParameters(const std::string &path)¶
 - 
size_t getParameterSize() const¶
 - 
void randParameters()¶
 - 
SequenceGenerator *asSequenceGenerator(const std::vector<std::string> &dict = std::vector< std::string >(), size_t begin_id = 0UL, size_t end_id = 0UL, size_t max_length = 100UL, size_t beam_size = -1UL)¶
- Create a sequence generator. - Note
- It just like a paddle_gen_sequence.
 
 - Public Static Functions - 
GradientMachine *createByConfigProtoStr(const std::string &protoStr, GradientMatchineCreateMode mode = CREATE_MODE_NORMAL, const std::vector<int> ¶meterTypes = defaultParamTypes)¶
- Create By ProtoStr. - The ProtoStr can be generate by python’s protobuf code. 
 - 
GradientMachine *createByModelConfig(ModelConfig *conf, GradientMatchineCreateMode mode = CREATE_MODE_NORMAL, const std::vector<int> ¶meterTypes = defaultParamTypes)¶
- Create by ModelConfig object. - To get ModelConfig, you can get TrainerConfig from config file, then get model config by TrainerConfig 
 - Private Members - 
GradientMachinePrivate *m¶
 - Private Static Functions - 
GradientMachine *createFromPaddleModelPtr(void *confPtr, GradientMatchineCreateMode mode, const std::vector<int> &types)¶
 - Private Static Attributes - 
std::vector<int> defaultParamTypes¶
 
- 
virtual 
- 
class Trainer¶
- Public Functions - 
virtual ~Trainer()¶
 - 
void startTrain()¶
- Start Train. 
 - 
void finishTrain()¶
 - 
void startTrainPass()¶
- Start Pass. 
 - 
void finishTrainPass()¶
 - 
void setBatchSize(size_t batchSize)¶
 - 
bool trainOneBatch(size_t batchSize = -1UL)¶
- Train one batch, - Return
- true if all batch finished.
- Parameters
- batchSize-- -1 wiil use command line or batch size set before, otherwise use this batchSize for train. 
 
 
 - 
bool prepareBatchData(size_t batchSize = -1UL)¶
 - 
void finishTrainOneBatch()¶
 - 
void forwardOneBatch()¶
 - Public Static Functions - 
Trainer *createByCommandLine()¶
- Create A Trainer By TrainerConfig. using paddle command line. 
 - Private Members - 
TrainerPrivate *m¶
 
- 
virtual 
- 
class ISequenceResults¶
- #include <PaddleAPI.h>The N-Best results generated from one input sequence. Subclassed by PathSequenceResults Public Functions - 
virtual ~ISequenceResults()¶
 - 
virtual size_t getSize() const = 0¶
- Number of result. 
 - 
virtual std::string getSentence(size_t id, bool split = false) const = 0¶
- Get sentence from dictionary. - Parameters
- id-- the index of result. 
- split-- if true, the return sentence will be splited with ‘ ‘ by each word. Default is false. 
 
 
 - 
virtual std::vector<int> getSequence(size_t id) const = 0¶
 - 
virtual float getScore(size_t id) const = 0¶
 
- 
virtual 
- 
class SequenceGenerator¶
- Public Functions - 
virtual ~SequenceGenerator()¶
 - 
ISequenceResults *generateSequence(const Arguments &inArgs) const¶
- Generate Sequence by input. - Note
- The inArgs is just one sequence of data. - The return will get a N-best generate result by inArgs. Sort by score. 
 
 - 
void setDict(const std::vector<std::string> &dict)¶
 - 
void setBos(size_t bos)¶
 - 
void setEos(size_t eos)¶
 - 
void setMaxLength(size_t maxlength)¶
 - 
void setBeamSize(size_t beamSize)¶
 - Private Members - 
SequenceGeneratorPrivate *m¶
 - Private Static Functions - Friends - 
friend GradientMachine
 
- 
virtual 
Functions
- template <typename T1, typename T2>
- 
void staticCastVector(std::vector<T2> *dest, const std::vector<T1> &src)¶