From 2be3a74779935777924d4bb061f98de4ea8272a1 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Mon, 7 Nov 2016 15:06:56 -0800 Subject: [PATCH] Modified API to use FLAGS_use_gpu as useGpu default value --- paddle/api/Matrix.cpp | 11 +++++++++++ paddle/api/Paddle.swig | 9 ++++++++- paddle/api/PaddleAPI.h | 31 +++++++++++++++++++++++-------- paddle/api/Util.cpp | 2 ++ paddle/api/Vector.cpp | 24 +++++++++++++++++++++++- 5 files changed, 67 insertions(+), 10 deletions(-) diff --git a/paddle/api/Matrix.cpp b/paddle/api/Matrix.cpp index 6a79f83495..f1ff957c6b 100644 --- a/paddle/api/Matrix.cpp +++ b/paddle/api/Matrix.cpp @@ -52,6 +52,17 @@ Matrix* Matrix::createDense(const std::vector& data, size_t height, return m; } +Matrix* Matrix::createDenseFromNumpy(float* data, int dim1, int dim2, + bool copy, bool useGpu) { + if (useGpu) { + /// Gpu mode only supports copy=True + CHECK(copy); + return Matrix::createGpuDenseFromNumpy(data, dim1, dim2); + } else { + return Matrix::createCpuDenseFromNumpy(data, dim1, dim2, copy); + } +} + Matrix* Matrix::createCpuDenseFromNumpy(float* data, int dim1, int dim2, bool copy) { auto m = new Matrix(); diff --git a/paddle/api/Paddle.swig b/paddle/api/Paddle.swig index a09f24ce1c..eaee182b52 100644 --- a/paddle/api/Paddle.swig +++ b/paddle/api/Paddle.swig @@ -133,14 +133,21 @@ namespace std { %newobject Matrix::createZero; %newobject Matrix::createSparse; %newobject Matrix::createDense; +%newobject Matrix::createDenseFromNumpy; +%newobject Matrix::createCpuDenseFromNumpy; +%newobject Matrix::createGpuDenseFromNumpy; %newobject Vector::createZero; %newobject Vector::create; +%newobject Vector::createVectorFromNumpy; %newobject Vector::createCpuVectorFromNumpy; %newobject Vector::createGpuVectorFromNumpy; %newobject IVector::createZero; %newobject IVector::create; +%newobject IVector::createVectorFromNumpy; +%newobject IVector::createCpuVectorFromNumpy; +%newobject IVector::createGpuVectorFromNumpy; %newobject Trainer::createByCommandLine; -%newobject Trainer::getNetworkOutput; +%newobject Trainer::getForwardOutput; %newobject Trainer::getLayerOutput; %newobject Arguments::getSlotValue; %newobject Arguments::getSlotIds; diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index cf790f2f8e..0825260fa1 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -42,6 +42,9 @@ using namespace paddle::enumeration_wrapper; // NOLINT */ void initPaddle(int argc, char** argv); +/// Return FLAGS_use_gpu +bool isUseGpu(); + /// Return true if this py_paddle is compiled in GPU Version bool isGpuVersion(); @@ -101,7 +104,8 @@ public: /** * Create A Matrix with height,width, which is filled by zero. */ - static Matrix* createZero(size_t height, size_t width, bool useGpu = false); + static Matrix* createZero(size_t height, size_t width, + bool useGpu = isUseGpu()); /** * Create Sparse Matrix. @@ -114,7 +118,7 @@ public: */ static Matrix* createSparse(size_t height, size_t width, size_t nnz, bool isNonVal = true, bool trans = false, - bool useGpu = false); + bool useGpu = isUseGpu()); /** * Create Dense Matrix. @@ -123,7 +127,11 @@ public: * @note the value will be copy into a new matrix. */ static Matrix* createDense(const std::vector& data, size_t height, - size_t width, bool useGpu = false); + size_t width, bool useGpu = isUseGpu()); + + static Matrix* createDenseFromNumpy(float* data, int dim1, int dim2, + bool copy = true, + bool useGpu = isUseGpu()); /** * Create Cpu Dense Matrix from numpy matrix, dtype=float32 @@ -221,15 +229,18 @@ public: ~Vector(); /// Create Vector filled with zero. - static Vector* createZero(size_t sz, bool useGpu = false); + static Vector* createZero(size_t sz, bool useGpu = isUseGpu()); /** * Create Vector from list of float. * * It will create a new vector, and copy data into it. */ - static Vector* create(const std::vector& data, bool useGpu = false); + static Vector* create(const std::vector& data, + bool useGpu = isUseGpu()); + static Vector* createVectorFromNumpy(float* data, int dim, bool copy = true, + bool useGpu = isUseGpu()); /** * Create Cpu Vector from numpy array, which dtype=float32 * @@ -279,13 +290,17 @@ class IVector { public: /// Create IVector filled with zero - static IVector* createZero(size_t sz, bool useGpu = false); + static IVector* createZero(size_t sz, bool useGpu = isUseGpu()); /** * Create IVector from list of int. * It will create a new vector, and copy data into it. */ - static IVector* create(const std::vector& data, bool useGpu = false); + static IVector* create(const std::vector& data, + bool useGpu = isUseGpu()); + + static IVector* createVectorFromNumpy(int* data, int dim, bool copy = true, + bool useGpu = isUseGpu()); /** * Create Cpu IVector from numpy array, which dtype=int32 @@ -297,7 +312,7 @@ public: /** * Create Gpu IVector from numpy array, which dtype=int32 */ - static IVector* createGpuVectorFromNumy(int* data, int dim); + static IVector* createGpuVectorFromNumpy(int* data, int dim); /// Cast to numpy array inplace. void toNumpyArrayInplace(int** view_data, int* dim1) throw(UnsupportError); diff --git a/paddle/api/Util.cpp b/paddle/api/Util.cpp index 8a6741078f..f953b322ce 100644 --- a/paddle/api/Util.cpp +++ b/paddle/api/Util.cpp @@ -41,6 +41,8 @@ IntWithFloatArray::IntWithFloatArray(const float* v, const int* i, size_t l, bool f) : valBuf(v), idxBuf(i), length(l), needFree(f) {} +bool isUseGpu() {return FLAGS_use_gpu;} + bool isGpuVersion() { #ifdef PADDLE_ONLY_CPU return false; diff --git a/paddle/api/Vector.cpp b/paddle/api/Vector.cpp index 1affc1a5fe..5abafad9d1 100644 --- a/paddle/api/Vector.cpp +++ b/paddle/api/Vector.cpp @@ -39,6 +39,17 @@ IVector* IVector::create(const std::vector& data, bool useGpu) { return v; } +IVector* IVector::createVectorFromNumpy(int* data, int dim, bool copy, + bool useGpu) { + if (useGpu) { + /// if use gpu only copy=true is supported + CHECK(copy); + return IVector::createGpuVectorFromNumpy(data, dim); + } else { + return IVector::createCpuVectorFromNumpy(data, dim, copy); + } +} + IVector* IVector::createCpuVectorFromNumpy(int* data, int dim, bool copy) { auto v = new IVector(); if (copy) { @@ -50,7 +61,7 @@ IVector* IVector::createCpuVectorFromNumpy(int* data, int dim, bool copy) { return v; } -IVector* IVector::createGpuVectorFromNumy(int* data, int dim) { +IVector* IVector::createGpuVectorFromNumpy(int* data, int dim) { auto v = new IVector(); v->m->vec = paddle::IVector::create(dim, true); v->m->vec->copyFrom(data, dim); @@ -188,6 +199,17 @@ Vector* Vector::createByPaddleVectorPtr(void* ptr) { } } +Vector* Vector::createVectorFromNumpy(float* data, int dim, bool copy, + bool useGpu) { + if (useGpu) { + /// if use gpu only copy=True is supported + CHECK(copy); + return Vector::createGpuVectorFromNumpy(data, dim); + } else { + return Vector::createCpuVectorFromNumpy(data, dim, copy); + } +} + Vector* Vector::createCpuVectorFromNumpy(float* data, int dim, bool copy) { CHECK_GT(dim, 0); auto retVec = new Vector(); -- GitLab