提交 b2075351 编写于 作者: W wangyang59

Add setUseGpu in PaddleAPI.h and handle UnsupportedError in swig with meaningful message displayed

上级 91e6dcb6
...@@ -53,13 +53,11 @@ Matrix* Matrix::createDense(const std::vector<float>& data, size_t height, ...@@ -53,13 +53,11 @@ Matrix* Matrix::createDense(const std::vector<float>& data, size_t height,
} }
Matrix* Matrix::createDenseFromNumpy(float* data, int dim1, int dim2, Matrix* Matrix::createDenseFromNumpy(float* data, int dim1, int dim2,
bool copy, bool useGpu) bool copy, bool useGpu) {
throw (UnsupportError) {
if (useGpu) { if (useGpu) {
/// Gpu mode only supports copy=True /// Gpu mode only supports copy=True
if (!copy) { if (!copy) {
UnsupportError e; throw UnsupportError("Gpu mode only supports copy=True");
throw e;
} }
return Matrix::createGpuDenseFromNumpy(data, dim1, dim2); return Matrix::createGpuDenseFromNumpy(data, dim1, dim2);
} else { } else {
......
...@@ -4,6 +4,20 @@ ...@@ -4,6 +4,20 @@
#define SWIG_FILE_WITH_INIT #define SWIG_FILE_WITH_INIT
#include "api/PaddleAPI.h" #include "api/PaddleAPI.h"
%} %}
%include "exception.i"
%exception{
try{
$action
}
catch(UnsupportError &ex ){
SWIG_exception(SWIG_RuntimeError, ex.what());
}
catch( ... ){
SWIG_fail;
}
}
%include "std_vector.i" %include "std_vector.i"
%include "std_pair.i" %include "std_pair.i"
#ifdef SWIGPYTHON #ifdef SWIGPYTHON
......
...@@ -18,6 +18,7 @@ limitations under the License. */ ...@@ -18,6 +18,7 @@ limitations under the License. */
#include <stddef.h> #include <stddef.h>
#include <stdint.h> #include <stdint.h>
#include <string> #include <string>
#include <stdexcept>
#include <vector> #include <vector>
#include "paddle/utils/GlobalConstants.h" #include "paddle/utils/GlobalConstants.h"
#include "paddle/utils/TypeDefs.h" #include "paddle/utils/TypeDefs.h"
...@@ -45,6 +46,9 @@ void initPaddle(int argc, char** argv); ...@@ -45,6 +46,9 @@ void initPaddle(int argc, char** argv);
/// Return FLAGS_use_gpu /// Return FLAGS_use_gpu
bool isUsingGpu(); bool isUsingGpu();
/// Set the Flags_use_gpu to the given parameter
void setUseGpu(bool useGpu);
/// Return true if this py_paddle is compiled in GPU Version /// Return true if this py_paddle is compiled in GPU Version
bool isGpuVersion(); bool isGpuVersion();
...@@ -55,7 +59,11 @@ class IOError {}; ...@@ -55,7 +59,11 @@ class IOError {};
class RangeError {}; class RangeError {};
/// Not support Error, such as access GPU memory directly, etc. /// Not support Error, such as access GPU memory directly, etc.
class UnsupportError {}; class UnsupportError : public std::runtime_error {
public:
UnsupportError() : std::runtime_error(" ") {};
UnsupportError(const std::string& message) : std::runtime_error(message) {};
};
/// This type will map to python's list of float. /// This type will map to python's list of float.
struct FloatArray { struct FloatArray {
...@@ -131,8 +139,7 @@ public: ...@@ -131,8 +139,7 @@ public:
static Matrix* createDenseFromNumpy(float* data, int dim1, int dim2, static Matrix* createDenseFromNumpy(float* data, int dim1, int dim2,
bool copy = true, bool copy = true,
bool useGpu = isUsingGpu()) bool useGpu = isUsingGpu());
throw (UnsupportError) ;
/** /**
* Create Cpu Dense Matrix from numpy matrix, dtype=float32 * Create Cpu Dense Matrix from numpy matrix, dtype=float32
...@@ -241,8 +248,7 @@ public: ...@@ -241,8 +248,7 @@ public:
bool useGpu = isUsingGpu()); bool useGpu = isUsingGpu());
static Vector* createVectorFromNumpy(float* data, int dim, bool copy = true, static Vector* createVectorFromNumpy(float* data, int dim, bool copy = true,
bool useGpu = isUsingGpu()) bool useGpu = isUsingGpu());
throw (UnsupportError) ;
/** /**
* Create Cpu Vector from numpy array, which dtype=float32 * Create Cpu Vector from numpy array, which dtype=float32
* *
...@@ -305,8 +311,7 @@ public: ...@@ -305,8 +311,7 @@ public:
bool useGpu = isUsingGpu()); bool useGpu = isUsingGpu());
static IVector* createVectorFromNumpy(int* data, int dim, bool copy = true, static IVector* createVectorFromNumpy(int* data, int dim, bool copy = true,
bool useGpu = isUsingGpu()) bool useGpu = isUsingGpu());
throw (UnsupportError) ;
/** /**
* Create Cpu IVector from numpy array, which dtype=int32 * Create Cpu IVector from numpy array, which dtype=int32
......
...@@ -43,6 +43,8 @@ IntWithFloatArray::IntWithFloatArray(const float* v, const int* i, size_t l, ...@@ -43,6 +43,8 @@ IntWithFloatArray::IntWithFloatArray(const float* v, const int* i, size_t l,
bool isUsingGpu() {return FLAGS_use_gpu;} bool isUsingGpu() {return FLAGS_use_gpu;}
void setUseGpu(bool useGpu) {FLAGS_use_gpu = useGpu;}
bool isGpuVersion() { bool isGpuVersion() {
#ifdef PADDLE_ONLY_CPU #ifdef PADDLE_ONLY_CPU
return false; return false;
......
...@@ -40,13 +40,11 @@ IVector* IVector::create(const std::vector<int>& data, bool useGpu) { ...@@ -40,13 +40,11 @@ IVector* IVector::create(const std::vector<int>& data, bool useGpu) {
} }
IVector* IVector::createVectorFromNumpy(int* data, int dim, bool copy, IVector* IVector::createVectorFromNumpy(int* data, int dim, bool copy,
bool useGpu) bool useGpu) {
throw (UnsupportError) {
if (useGpu) { if (useGpu) {
/// if use gpu only copy=true is supported /// if use gpu only copy=true is supported
if (!copy) { if (!copy) {
UnsupportError e; throw UnsupportError("Gpu mode only supports copy=True");
throw e;
} }
return IVector::createGpuVectorFromNumpy(data, dim); return IVector::createGpuVectorFromNumpy(data, dim);
} else { } else {
...@@ -204,13 +202,11 @@ Vector* Vector::createByPaddleVectorPtr(void* ptr) { ...@@ -204,13 +202,11 @@ Vector* Vector::createByPaddleVectorPtr(void* ptr) {
} }
Vector* Vector::createVectorFromNumpy(float* data, int dim, bool copy, Vector* Vector::createVectorFromNumpy(float* data, int dim, bool copy,
bool useGpu) bool useGpu) {
throw (UnsupportError) {
if (useGpu) { if (useGpu) {
/// if use gpu only copy=True is supported /// if use gpu only copy=True is supported
if (!copy) { if (!copy) {
UnsupportError e; throw UnsupportError("Gpu mode only supports copy=True");
throw e;
} }
return Vector::createGpuVectorFromNumpy(data, dim); return Vector::createGpuVectorFromNumpy(data, dim);
} else { } else {
......
...@@ -111,5 +111,9 @@ class TestMatrix(unittest.TestCase): ...@@ -111,5 +111,9 @@ class TestMatrix(unittest.TestCase):
if __name__ == "__main__": if __name__ == "__main__":
swig_paddle.initPaddle("--use_gpu=1" if swig_paddle.isGpuVersion() else "--use_gpu=0") swig_paddle.initPaddle("--use_gpu=0")
suite = unittest.TestLoader().loadTestsFromTestCase(TestMatrix)
unittest.TextTestRunner().run(suite)
if swig_paddle.isGpuVersion():
swig_paddle.setUseGpu(True)
unittest.main() unittest.main()
...@@ -147,7 +147,9 @@ class TestVector(unittest.TestCase): ...@@ -147,7 +147,9 @@ class TestVector(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
swig_paddle.initPaddle("--use_gpu=1" swig_paddle.initPaddle("--use_gpu=0")
if swig_paddle.isGpuVersion() else "--use_gpu=0") suite = unittest.TestLoader().loadTestsFromTestCase(TestVector)
unittest.TextTestRunner().run(suite)
if swig_paddle.isGpuVersion():
swig_paddle.setUseGpu(True)
unittest.main() unittest.main()
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册