diff --git a/paddle/api/Matrix.cpp b/paddle/api/Matrix.cpp index f1ff957c6b89296ba2fa88a7485cb8af350e81ea..6201ce926f2899664e438c05fdcf31d22745762a 100644 --- a/paddle/api/Matrix.cpp +++ b/paddle/api/Matrix.cpp @@ -53,10 +53,14 @@ Matrix* Matrix::createDense(const std::vector& data, size_t height, } Matrix* Matrix::createDenseFromNumpy(float* data, int dim1, int dim2, - bool copy, bool useGpu) { + bool copy, bool useGpu) + throw (UnsupportError) { if (useGpu) { /// Gpu mode only supports copy=True - CHECK(copy); + if (!copy) { + UnsupportError e; + throw e; + } return Matrix::createGpuDenseFromNumpy(data, dim1, dim2); } else { return Matrix::createCpuDenseFromNumpy(data, dim1, dim2, copy); diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index 0825260fa1e3650b3f3cebba24a3abcfc3a222f4..386de6d59741764eb510cd08f145a477bf134c97 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -43,7 +43,7 @@ using namespace paddle::enumeration_wrapper; // NOLINT void initPaddle(int argc, char** argv); /// Return FLAGS_use_gpu -bool isUseGpu(); +bool isUsingGpu(); /// Return true if this py_paddle is compiled in GPU Version bool isGpuVersion(); @@ -105,7 +105,7 @@ public: * Create A Matrix with height,width, which is filled by zero. */ static Matrix* createZero(size_t height, size_t width, - bool useGpu = isUseGpu()); + bool useGpu = isUsingGpu()); /** * Create Sparse Matrix. @@ -118,7 +118,7 @@ public: */ static Matrix* createSparse(size_t height, size_t width, size_t nnz, bool isNonVal = true, bool trans = false, - bool useGpu = isUseGpu()); + bool useGpu = isUsingGpu()); /** * Create Dense Matrix. @@ -127,11 +127,12 @@ public: * @note the value will be copy into a new matrix. */ static Matrix* createDense(const std::vector& data, size_t height, - size_t width, bool useGpu = isUseGpu()); + size_t width, bool useGpu = isUsingGpu()); static Matrix* createDenseFromNumpy(float* data, int dim1, int dim2, bool copy = true, - bool useGpu = isUseGpu()); + bool useGpu = isUsingGpu()) + throw (UnsupportError) ; /** * Create Cpu Dense Matrix from numpy matrix, dtype=float32 @@ -229,7 +230,7 @@ public: ~Vector(); /// Create Vector filled with zero. - static Vector* createZero(size_t sz, bool useGpu = isUseGpu()); + static Vector* createZero(size_t sz, bool useGpu = isUsingGpu()); /** * Create Vector from list of float. @@ -237,10 +238,11 @@ public: * It will create a new vector, and copy data into it. */ static Vector* create(const std::vector& data, - bool useGpu = isUseGpu()); + bool useGpu = isUsingGpu()); static Vector* createVectorFromNumpy(float* data, int dim, bool copy = true, - bool useGpu = isUseGpu()); + bool useGpu = isUsingGpu()) + throw (UnsupportError) ; /** * Create Cpu Vector from numpy array, which dtype=float32 * @@ -290,17 +292,18 @@ class IVector { public: /// Create IVector filled with zero - static IVector* createZero(size_t sz, bool useGpu = isUseGpu()); + static IVector* createZero(size_t sz, bool useGpu = isUsingGpu()); /** * Create IVector from list of int. * It will create a new vector, and copy data into it. */ static IVector* create(const std::vector& data, - bool useGpu = isUseGpu()); + bool useGpu = isUsingGpu()); static IVector* createVectorFromNumpy(int* data, int dim, bool copy = true, - bool useGpu = isUseGpu()); + bool useGpu = isUsingGpu()) + throw (UnsupportError) ; /** * Create Cpu IVector from numpy array, which dtype=int32 diff --git a/paddle/api/Util.cpp b/paddle/api/Util.cpp index f953b322ce83b6bd7ae075db29612756fe11ecfe..f72c06aad31ca4b9319a63556a0fead179e1dbbe 100644 --- a/paddle/api/Util.cpp +++ b/paddle/api/Util.cpp @@ -41,7 +41,7 @@ IntWithFloatArray::IntWithFloatArray(const float* v, const int* i, size_t l, bool f) : valBuf(v), idxBuf(i), length(l), needFree(f) {} -bool isUseGpu() {return FLAGS_use_gpu;} +bool isUsingGpu() {return FLAGS_use_gpu;} bool isGpuVersion() { #ifdef PADDLE_ONLY_CPU diff --git a/paddle/api/Vector.cpp b/paddle/api/Vector.cpp index 5abafad9d1216650f13100182af7dfb77628cc0a..787cf1c973bab1a0daf9444aaa08cedf8b6fdf25 100644 --- a/paddle/api/Vector.cpp +++ b/paddle/api/Vector.cpp @@ -40,10 +40,14 @@ IVector* IVector::create(const std::vector& data, bool useGpu) { } IVector* IVector::createVectorFromNumpy(int* data, int dim, bool copy, - bool useGpu) { + bool useGpu) + throw (UnsupportError) { if (useGpu) { /// if use gpu only copy=true is supported - CHECK(copy); + if (!copy) { + UnsupportError e; + throw e; + } return IVector::createGpuVectorFromNumpy(data, dim); } else { return IVector::createCpuVectorFromNumpy(data, dim, copy); @@ -200,10 +204,14 @@ Vector* Vector::createByPaddleVectorPtr(void* ptr) { } Vector* Vector::createVectorFromNumpy(float* data, int dim, bool copy, - bool useGpu) { + bool useGpu) + throw (UnsupportError) { if (useGpu) { /// if use gpu only copy=True is supported - CHECK(copy); + if (!copy) { + UnsupportError e; + throw e; + } return Vector::createGpuVectorFromNumpy(data, dim); } else { return Vector::createCpuVectorFromNumpy(data, dim, copy);