From b207535198d218e49508752cdc8ae9d4221d78d4 Mon Sep 17 00:00:00 2001 From: wangyang59 Date: Fri, 11 Nov 2016 11:21:20 -0800 Subject: [PATCH] Add setUseGpu in PaddleAPI.h and handle UnsupportedError in swig with meaningful message displayed --- paddle/api/Matrix.cpp | 6 ++---- paddle/api/Paddle.swig | 14 ++++++++++++++ paddle/api/PaddleAPI.h | 19 ++++++++++++------- paddle/api/Util.cpp | 2 ++ paddle/api/Vector.cpp | 12 ++++-------- paddle/api/test/testMatrix.py | 8 ++++++-- paddle/api/test/testVector.py | 10 ++++++---- 7 files changed, 46 insertions(+), 25 deletions(-) diff --git a/paddle/api/Matrix.cpp b/paddle/api/Matrix.cpp index 6201ce926f..0c8d2935a0 100644 --- a/paddle/api/Matrix.cpp +++ b/paddle/api/Matrix.cpp @@ -53,13 +53,11 @@ Matrix* Matrix::createDense(const std::vector& data, size_t height, } Matrix* Matrix::createDenseFromNumpy(float* data, int dim1, int dim2, - bool copy, bool useGpu) - throw (UnsupportError) { + bool copy, bool useGpu) { if (useGpu) { /// Gpu mode only supports copy=True if (!copy) { - UnsupportError e; - throw e; + throw UnsupportError("Gpu mode only supports copy=True"); } return Matrix::createGpuDenseFromNumpy(data, dim1, dim2); } else { diff --git a/paddle/api/Paddle.swig b/paddle/api/Paddle.swig index eaee182b52..e723a669f3 100644 --- a/paddle/api/Paddle.swig +++ b/paddle/api/Paddle.swig @@ -4,6 +4,20 @@ #define SWIG_FILE_WITH_INIT #include "api/PaddleAPI.h" %} + +%include "exception.i" +%exception{ + try{ + $action + } + catch(UnsupportError &ex ){ + SWIG_exception(SWIG_RuntimeError, ex.what()); + } + catch( ... ){ + SWIG_fail; + } +} + %include "std_vector.i" %include "std_pair.i" #ifdef SWIGPYTHON diff --git a/paddle/api/PaddleAPI.h b/paddle/api/PaddleAPI.h index 5df7320136..807519e739 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/api/PaddleAPI.h @@ -18,6 +18,7 @@ limitations under the License. */ #include #include #include +#include #include #include "paddle/utils/GlobalConstants.h" #include "paddle/utils/TypeDefs.h" @@ -45,6 +46,9 @@ void initPaddle(int argc, char** argv); /// Return FLAGS_use_gpu bool isUsingGpu(); +/// Set the Flags_use_gpu to the given parameter +void setUseGpu(bool useGpu); + /// Return true if this py_paddle is compiled in GPU Version bool isGpuVersion(); @@ -55,7 +59,11 @@ class IOError {}; class RangeError {}; /// Not support Error, such as access GPU memory directly, etc. -class UnsupportError {}; +class UnsupportError : public std::runtime_error { +public: + UnsupportError() : std::runtime_error(" ") {}; + UnsupportError(const std::string& message) : std::runtime_error(message) {}; +}; /// This type will map to python's list of float. struct FloatArray { @@ -131,8 +139,7 @@ public: static Matrix* createDenseFromNumpy(float* data, int dim1, int dim2, bool copy = true, - bool useGpu = isUsingGpu()) - throw (UnsupportError) ; + bool useGpu = isUsingGpu()); /** * Create Cpu Dense Matrix from numpy matrix, dtype=float32 @@ -241,8 +248,7 @@ public: bool useGpu = isUsingGpu()); static Vector* createVectorFromNumpy(float* data, int dim, bool copy = true, - bool useGpu = isUsingGpu()) - throw (UnsupportError) ; + bool useGpu = isUsingGpu()); /** * Create Cpu Vector from numpy array, which dtype=float32 * @@ -305,8 +311,7 @@ public: bool useGpu = isUsingGpu()); static IVector* createVectorFromNumpy(int* data, int dim, bool copy = true, - bool useGpu = isUsingGpu()) - throw (UnsupportError) ; + bool useGpu = isUsingGpu()); /** * Create Cpu IVector from numpy array, which dtype=int32 diff --git a/paddle/api/Util.cpp b/paddle/api/Util.cpp index f72c06aad3..a8932351a6 100644 --- a/paddle/api/Util.cpp +++ b/paddle/api/Util.cpp @@ -43,6 +43,8 @@ IntWithFloatArray::IntWithFloatArray(const float* v, const int* i, size_t l, bool isUsingGpu() {return FLAGS_use_gpu;} +void setUseGpu(bool useGpu) {FLAGS_use_gpu = useGpu;} + bool isGpuVersion() { #ifdef PADDLE_ONLY_CPU return false; diff --git a/paddle/api/Vector.cpp b/paddle/api/Vector.cpp index b8b1b2d2f1..547be27ed5 100644 --- a/paddle/api/Vector.cpp +++ b/paddle/api/Vector.cpp @@ -40,13 +40,11 @@ IVector* IVector::create(const std::vector& data, bool useGpu) { } IVector* IVector::createVectorFromNumpy(int* data, int dim, bool copy, - bool useGpu) - throw (UnsupportError) { + bool useGpu) { if (useGpu) { /// if use gpu only copy=true is supported if (!copy) { - UnsupportError e; - throw e; + throw UnsupportError("Gpu mode only supports copy=True"); } return IVector::createGpuVectorFromNumpy(data, dim); } else { @@ -204,13 +202,11 @@ Vector* Vector::createByPaddleVectorPtr(void* ptr) { } Vector* Vector::createVectorFromNumpy(float* data, int dim, bool copy, - bool useGpu) - throw (UnsupportError) { + bool useGpu) { if (useGpu) { /// if use gpu only copy=True is supported if (!copy) { - UnsupportError e; - throw e; + throw UnsupportError("Gpu mode only supports copy=True"); } return Vector::createGpuVectorFromNumpy(data, dim); } else { diff --git a/paddle/api/test/testMatrix.py b/paddle/api/test/testMatrix.py index 6d0d42f340..87cedd607c 100644 --- a/paddle/api/test/testMatrix.py +++ b/paddle/api/test/testMatrix.py @@ -111,5 +111,9 @@ class TestMatrix(unittest.TestCase): if __name__ == "__main__": - swig_paddle.initPaddle("--use_gpu=1" if swig_paddle.isGpuVersion() else "--use_gpu=0") - unittest.main() + swig_paddle.initPaddle("--use_gpu=0") + suite = unittest.TestLoader().loadTestsFromTestCase(TestMatrix) + unittest.TextTestRunner().run(suite) + if swig_paddle.isGpuVersion(): + swig_paddle.setUseGpu(True) + unittest.main() diff --git a/paddle/api/test/testVector.py b/paddle/api/test/testVector.py index 5ca4d90dee..48aaa1d73d 100644 --- a/paddle/api/test/testVector.py +++ b/paddle/api/test/testVector.py @@ -147,7 +147,9 @@ class TestVector(unittest.TestCase): if __name__ == '__main__': - swig_paddle.initPaddle("--use_gpu=1" - if swig_paddle.isGpuVersion() else "--use_gpu=0") - unittest.main() - + swig_paddle.initPaddle("--use_gpu=0") + suite = unittest.TestLoader().loadTestsFromTestCase(TestVector) + unittest.TextTestRunner().run(suite) + if swig_paddle.isGpuVersion(): + swig_paddle.setUseGpu(True) + unittest.main() \ No newline at end of file -- GitLab