From f22573bdafa4554482fa51459e2763b12bea3190 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Tue, 8 Nov 2016 10:21:55 -0800 Subject: [PATCH] changed to isUsingGpu() in PaddleAPI.h and throw exceptions instead of CHECK --- paddle/api/Matrix.cpp | 8 ++++++-- paddle/api/PaddleAPI.h | 25 ++++++++++++++----------- paddle/api/Util.cpp | 2 +- paddle/api/Vector.cpp | 16 ++++++++++++---- 4 files changed, 33 insertions(+), 18 deletions(-) diff --git a/paddle/api/Matrix.cpp b/paddle/api/Matrix.cpp index f1ff957c6b..6201ce926f 100644 --- a/paddle/api/Matrix.cpp +++ b/paddle/api/Matrix.cpp @@ -53,10 +53,14 @@ Matrix* Matrix::createDense(const std::vector& data, size_t height, } Matrix* Matrix::createDenseFromNumpy(float* data, int dim1, int dim2, - bool copy, bool useGpu) { + bool copy, bool useGpu) + throw (UnsupportError) { if (useGpu) { /// Gpu mode only supports copy=True - CHECK(copy); + if (!copy) { + UnsupportError e; + throw e; + } return Matrix::createGpuDenseFromNumpy(data, dim1, dim2); } else { return Matrix::createCpuDenseFromNumpy(data, dim1, dim2, copy); diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index 0825260fa1..386de6d597 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -43,7 +43,7 @@ using namespace paddle::enumeration_wrapper; // NOLINT void initPaddle(int argc, char** argv); /// Return FLAGS_use_gpu -bool isUseGpu(); +bool isUsingGpu(); /// Return true if this py_paddle is compiled in GPU Version bool isGpuVersion(); @@ -105,7 +105,7 @@ public: * Create A Matrix with height,width, which is filled by zero. */ static Matrix* createZero(size_t height, size_t width, - bool useGpu = isUseGpu()); + bool useGpu = isUsingGpu()); /** * Create Sparse Matrix. @@ -118,7 +118,7 @@ public: */ static Matrix* createSparse(size_t height, size_t width, size_t nnz, bool isNonVal = true, bool trans = false, - bool useGpu = isUseGpu()); + bool useGpu = isUsingGpu()); /** * Create Dense Matrix. @@ -127,11 +127,12 @@ public: * @note the value will be copy into a new matrix. */ static Matrix* createDense(const std::vector& data, size_t height, - size_t width, bool useGpu = isUseGpu()); + size_t width, bool useGpu = isUsingGpu()); static Matrix* createDenseFromNumpy(float* data, int dim1, int dim2, bool copy = true, - bool useGpu = isUseGpu()); + bool useGpu = isUsingGpu()) + throw (UnsupportError) ; /** * Create Cpu Dense Matrix from numpy matrix, dtype=float32 @@ -229,7 +230,7 @@ public: ~Vector(); /// Create Vector filled with zero. - static Vector* createZero(size_t sz, bool useGpu = isUseGpu()); + static Vector* createZero(size_t sz, bool useGpu = isUsingGpu()); /** * Create Vector from list of float. @@ -237,10 +238,11 @@ public: * It will create a new vector, and copy data into it. */ static Vector* create(const std::vector& data, - bool useGpu = isUseGpu()); + bool useGpu = isUsingGpu()); static Vector* createVectorFromNumpy(float* data, int dim, bool copy = true, - bool useGpu = isUseGpu()); + bool useGpu = isUsingGpu()) + throw (UnsupportError) ; /** * Create Cpu Vector from numpy array, which dtype=float32 * @@ -290,17 +292,18 @@ class IVector { public: /// Create IVector filled with zero - static IVector* createZero(size_t sz, bool useGpu = isUseGpu()); + static IVector* createZero(size_t sz, bool useGpu = isUsingGpu()); /** * Create IVector from list of int. * It will create a new vector, and copy data into it. */ static IVector* create(const std::vector& data, - bool useGpu = isUseGpu()); + bool useGpu = isUsingGpu()); static IVector* createVectorFromNumpy(int* data, int dim, bool copy = true, - bool useGpu = isUseGpu()); + bool useGpu = isUsingGpu()) + throw (UnsupportError) ; /** * Create Cpu IVector from numpy array, which dtype=int32 diff --git a/paddle/api/Util.cpp b/paddle/api/Util.cpp index f953b322ce..f72c06aad3 100644 --- a/paddle/api/Util.cpp +++ b/paddle/api/Util.cpp @@ -41,7 +41,7 @@ IntWithFloatArray::IntWithFloatArray(const float* v, const int* i, size_t l, bool f) : valBuf(v), idxBuf(i), length(l), needFree(f) {} -bool isUseGpu() {return FLAGS_use_gpu;} +bool isUsingGpu() {return FLAGS_use_gpu;} bool isGpuVersion() { #ifdef PADDLE_ONLY_CPU diff --git a/paddle/api/Vector.cpp b/paddle/api/Vector.cpp index 5abafad9d1..787cf1c973 100644 --- a/paddle/api/Vector.cpp +++ b/paddle/api/Vector.cpp @@ -40,10 +40,14 @@ IVector* IVector::create(const std::vector& data, bool useGpu) { } IVector* IVector::createVectorFromNumpy(int* data, int dim, bool copy, - bool useGpu) { + bool useGpu) + throw (UnsupportError) { if (useGpu) { /// if use gpu only copy=true is supported - CHECK(copy); + if (!copy) { + UnsupportError e; + throw e; + } return IVector::createGpuVectorFromNumpy(data, dim); } else { return IVector::createCpuVectorFromNumpy(data, dim, copy); @@ -200,10 +204,14 @@ Vector* Vector::createByPaddleVectorPtr(void* ptr) { } Vector* Vector::createVectorFromNumpy(float* data, int dim, bool copy, - bool useGpu) { + bool useGpu) + throw (UnsupportError) { if (useGpu) { /// if use gpu only copy=True is supported - CHECK(copy); + if (!copy) { + UnsupportError e; + throw e; + } return Vector::createGpuVectorFromNumpy(data, dim); } else { return Vector::createCpuVectorFromNumpy(data, dim, copy); -- GitLab