提交 2be3a747 编写于 作者: W wangyang59

Modified API to use FLAGS_use_gpu as useGpu default value

上级 0ba0f02c
...@@ -52,6 +52,17 @@ Matrix* Matrix::createDense(const std::vector<float>& data, size_t height, ...@@ -52,6 +52,17 @@ Matrix* Matrix::createDense(const std::vector<float>& data, size_t height,
return m; return m;
} }
Matrix* Matrix::createDenseFromNumpy(float* data, int dim1, int dim2,
bool copy, bool useGpu) {
if (useGpu) {
/// Gpu mode only supports copy=True
CHECK(copy);
return Matrix::createGpuDenseFromNumpy(data, dim1, dim2);
} else {
return Matrix::createCpuDenseFromNumpy(data, dim1, dim2, copy);
}
}
Matrix* Matrix::createCpuDenseFromNumpy(float* data, int dim1, int dim2, Matrix* Matrix::createCpuDenseFromNumpy(float* data, int dim1, int dim2,
bool copy) { bool copy) {
auto m = new Matrix(); auto m = new Matrix();
......
...@@ -133,14 +133,21 @@ namespace std { ...@@ -133,14 +133,21 @@ namespace std {
%newobject Matrix::createZero; %newobject Matrix::createZero;
%newobject Matrix::createSparse; %newobject Matrix::createSparse;
%newobject Matrix::createDense; %newobject Matrix::createDense;
%newobject Matrix::createDenseFromNumpy;
%newobject Matrix::createCpuDenseFromNumpy;
%newobject Matrix::createGpuDenseFromNumpy;
%newobject Vector::createZero; %newobject Vector::createZero;
%newobject Vector::create; %newobject Vector::create;
%newobject Vector::createVectorFromNumpy;
%newobject Vector::createCpuVectorFromNumpy; %newobject Vector::createCpuVectorFromNumpy;
%newobject Vector::createGpuVectorFromNumpy; %newobject Vector::createGpuVectorFromNumpy;
%newobject IVector::createZero; %newobject IVector::createZero;
%newobject IVector::create; %newobject IVector::create;
%newobject IVector::createVectorFromNumpy;
%newobject IVector::createCpuVectorFromNumpy;
%newobject IVector::createGpuVectorFromNumpy;
%newobject Trainer::createByCommandLine; %newobject Trainer::createByCommandLine;
%newobject Trainer::getNetworkOutput; %newobject Trainer::getForwardOutput;
%newobject Trainer::getLayerOutput; %newobject Trainer::getLayerOutput;
%newobject Arguments::getSlotValue; %newobject Arguments::getSlotValue;
%newobject Arguments::getSlotIds; %newobject Arguments::getSlotIds;
......
...@@ -42,6 +42,9 @@ using namespace paddle::enumeration_wrapper; // NOLINT ...@@ -42,6 +42,9 @@ using namespace paddle::enumeration_wrapper; // NOLINT
*/ */
void initPaddle(int argc, char** argv); void initPaddle(int argc, char** argv);
/// Return FLAGS_use_gpu
bool isUseGpu();
/// Return true if this py_paddle is compiled in GPU Version /// Return true if this py_paddle is compiled in GPU Version
bool isGpuVersion(); bool isGpuVersion();
...@@ -101,7 +104,8 @@ public: ...@@ -101,7 +104,8 @@ public:
/** /**
* Create A Matrix with height,width, which is filled by zero. * Create A Matrix with height,width, which is filled by zero.
*/ */
static Matrix* createZero(size_t height, size_t width, bool useGpu = false); static Matrix* createZero(size_t height, size_t width,
bool useGpu = isUseGpu());
/** /**
* Create Sparse Matrix. * Create Sparse Matrix.
...@@ -114,7 +118,7 @@ public: ...@@ -114,7 +118,7 @@ public:
*/ */
static Matrix* createSparse(size_t height, size_t width, size_t nnz, static Matrix* createSparse(size_t height, size_t width, size_t nnz,
bool isNonVal = true, bool trans = false, bool isNonVal = true, bool trans = false,
bool useGpu = false); bool useGpu = isUseGpu());
/** /**
* Create Dense Matrix. * Create Dense Matrix.
...@@ -123,7 +127,11 @@ public: ...@@ -123,7 +127,11 @@ public:
* @note the value will be copy into a new matrix. * @note the value will be copy into a new matrix.
*/ */
static Matrix* createDense(const std::vector<float>& data, size_t height, static Matrix* createDense(const std::vector<float>& data, size_t height,
size_t width, bool useGpu = false); size_t width, bool useGpu = isUseGpu());
static Matrix* createDenseFromNumpy(float* data, int dim1, int dim2,
bool copy = true,
bool useGpu = isUseGpu());
/** /**
* Create Cpu Dense Matrix from numpy matrix, dtype=float32 * Create Cpu Dense Matrix from numpy matrix, dtype=float32
...@@ -221,15 +229,18 @@ public: ...@@ -221,15 +229,18 @@ public:
~Vector(); ~Vector();
/// Create Vector filled with zero. /// Create Vector filled with zero.
static Vector* createZero(size_t sz, bool useGpu = false); static Vector* createZero(size_t sz, bool useGpu = isUseGpu());
/** /**
* Create Vector from list of float. * Create Vector from list of float.
* *
* It will create a new vector, and copy data into it. * It will create a new vector, and copy data into it.
*/ */
static Vector* create(const std::vector<float>& data, bool useGpu = false); static Vector* create(const std::vector<float>& data,
bool useGpu = isUseGpu());
static Vector* createVectorFromNumpy(float* data, int dim, bool copy = true,
bool useGpu = isUseGpu());
/** /**
* Create Cpu Vector from numpy array, which dtype=float32 * Create Cpu Vector from numpy array, which dtype=float32
* *
...@@ -279,13 +290,17 @@ class IVector { ...@@ -279,13 +290,17 @@ class IVector {
public: public:
/// Create IVector filled with zero /// Create IVector filled with zero
static IVector* createZero(size_t sz, bool useGpu = false); static IVector* createZero(size_t sz, bool useGpu = isUseGpu());
/** /**
* Create IVector from list of int. * Create IVector from list of int.
* It will create a new vector, and copy data into it. * It will create a new vector, and copy data into it.
*/ */
static IVector* create(const std::vector<int>& data, bool useGpu = false); static IVector* create(const std::vector<int>& data,
bool useGpu = isUseGpu());
static IVector* createVectorFromNumpy(int* data, int dim, bool copy = true,
bool useGpu = isUseGpu());
/** /**
* Create Cpu IVector from numpy array, which dtype=int32 * Create Cpu IVector from numpy array, which dtype=int32
...@@ -297,7 +312,7 @@ public: ...@@ -297,7 +312,7 @@ public:
/** /**
* Create Gpu IVector from numpy array, which dtype=int32 * Create Gpu IVector from numpy array, which dtype=int32
*/ */
static IVector* createGpuVectorFromNumy(int* data, int dim); static IVector* createGpuVectorFromNumpy(int* data, int dim);
/// Cast to numpy array inplace. /// Cast to numpy array inplace.
void toNumpyArrayInplace(int** view_data, int* dim1) throw(UnsupportError); void toNumpyArrayInplace(int** view_data, int* dim1) throw(UnsupportError);
......
...@@ -41,6 +41,8 @@ IntWithFloatArray::IntWithFloatArray(const float* v, const int* i, size_t l, ...@@ -41,6 +41,8 @@ IntWithFloatArray::IntWithFloatArray(const float* v, const int* i, size_t l,
bool f) bool f)
: valBuf(v), idxBuf(i), length(l), needFree(f) {} : valBuf(v), idxBuf(i), length(l), needFree(f) {}
bool isUseGpu() {return FLAGS_use_gpu;}
bool isGpuVersion() { bool isGpuVersion() {
#ifdef PADDLE_ONLY_CPU #ifdef PADDLE_ONLY_CPU
return false; return false;
......
...@@ -39,6 +39,17 @@ IVector* IVector::create(const std::vector<int>& data, bool useGpu) { ...@@ -39,6 +39,17 @@ IVector* IVector::create(const std::vector<int>& data, bool useGpu) {
return v; return v;
} }
IVector* IVector::createVectorFromNumpy(int* data, int dim, bool copy,
bool useGpu) {
if (useGpu) {
/// if use gpu only copy=true is supported
CHECK(copy);
return IVector::createGpuVectorFromNumpy(data, dim);
} else {
return IVector::createCpuVectorFromNumpy(data, dim, copy);
}
}
IVector* IVector::createCpuVectorFromNumpy(int* data, int dim, bool copy) { IVector* IVector::createCpuVectorFromNumpy(int* data, int dim, bool copy) {
auto v = new IVector(); auto v = new IVector();
if (copy) { if (copy) {
...@@ -50,7 +61,7 @@ IVector* IVector::createCpuVectorFromNumpy(int* data, int dim, bool copy) { ...@@ -50,7 +61,7 @@ IVector* IVector::createCpuVectorFromNumpy(int* data, int dim, bool copy) {
return v; return v;
} }
IVector* IVector::createGpuVectorFromNumy(int* data, int dim) { IVector* IVector::createGpuVectorFromNumpy(int* data, int dim) {
auto v = new IVector(); auto v = new IVector();
v->m->vec = paddle::IVector::create(dim, true); v->m->vec = paddle::IVector::create(dim, true);
v->m->vec->copyFrom(data, dim); v->m->vec->copyFrom(data, dim);
...@@ -188,6 +199,17 @@ Vector* Vector::createByPaddleVectorPtr(void* ptr) { ...@@ -188,6 +199,17 @@ Vector* Vector::createByPaddleVectorPtr(void* ptr) {
} }
} }
Vector* Vector::createVectorFromNumpy(float* data, int dim, bool copy,
bool useGpu) {
if (useGpu) {
/// if use gpu only copy=True is supported
CHECK(copy);
return Vector::createGpuVectorFromNumpy(data, dim);
} else {
return Vector::createCpuVectorFromNumpy(data, dim, copy);
}
}
Vector* Vector::createCpuVectorFromNumpy(float* data, int dim, bool copy) { Vector* Vector::createCpuVectorFromNumpy(float* data, int dim, bool copy) {
CHECK_GT(dim, 0); CHECK_GT(dim, 0);
auto retVec = new Vector(); auto retVec = new Vector();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册