提交 0ba302f7 编写于 作者: L liaogang

fix bug on paddle api when WITH_DOUBLE

上级 d8366a67
...@@ -44,7 +44,7 @@ Matrix* Matrix::createZero(size_t height, size_t width, bool useGpu) { ...@@ -44,7 +44,7 @@ Matrix* Matrix::createZero(size_t height, size_t width, bool useGpu) {
return m; return m;
} }
Matrix* Matrix::createDense(const std::vector<float>& data, size_t height, Matrix* Matrix::createDense(const std::vector<real>& data, size_t height,
size_t width, bool useGpu) { size_t width, bool useGpu) {
auto m = new Matrix(); auto m = new Matrix();
m->m->mat = paddle::Matrix::create(height, width, useGpu); m->m->mat = paddle::Matrix::create(height, width, useGpu);
...@@ -52,7 +52,7 @@ Matrix* Matrix::createDense(const std::vector<float>& data, size_t height, ...@@ -52,7 +52,7 @@ Matrix* Matrix::createDense(const std::vector<float>& data, size_t height,
return m; return m;
} }
Matrix* Matrix::createCpuDenseFromNumpy(float* data, int dim1, int dim2, Matrix* Matrix::createCpuDenseFromNumpy(real* data, int dim1, int dim2,
bool copy) { bool copy) {
auto m = new Matrix(); auto m = new Matrix();
if (copy) { if (copy) {
...@@ -64,7 +64,7 @@ Matrix* Matrix::createCpuDenseFromNumpy(float* data, int dim1, int dim2, ...@@ -64,7 +64,7 @@ Matrix* Matrix::createCpuDenseFromNumpy(float* data, int dim1, int dim2,
return m; return m;
} }
Matrix* Matrix::createGpuDenseFromNumpy(float* data, int dim1, int dim2) { Matrix* Matrix::createGpuDenseFromNumpy(real* data, int dim1, int dim2) {
auto m = new Matrix(); auto m = new Matrix();
m->m->mat = paddle::Matrix::create(dim1, dim2, false, true); m->m->mat = paddle::Matrix::create(dim1, dim2, false, true);
m->m->mat->copyFrom(data, dim1 * dim2); m->m->mat->copyFrom(data, dim1 * dim2);
...@@ -86,7 +86,7 @@ size_t Matrix::getHeight() const { return m->mat->getHeight(); } ...@@ -86,7 +86,7 @@ size_t Matrix::getHeight() const { return m->mat->getHeight(); }
size_t Matrix::getWidth() const { return m->mat->getWidth(); } size_t Matrix::getWidth() const { return m->mat->getWidth(); }
float Matrix::get(size_t x, size_t y) const throw(RangeError) { real Matrix::get(size_t x, size_t y) const throw(RangeError) {
if (x > this->getWidth() || y > this->getHeight()) { if (x > this->getWidth() || y > this->getHeight()) {
RangeError e; RangeError e;
throw e; throw e;
...@@ -94,7 +94,7 @@ float Matrix::get(size_t x, size_t y) const throw(RangeError) { ...@@ -94,7 +94,7 @@ float Matrix::get(size_t x, size_t y) const throw(RangeError) {
return m->mat->getElement(x, y); return m->mat->getElement(x, y);
} }
void Matrix::set(size_t x, size_t y, float val) throw(RangeError, void Matrix::set(size_t x, size_t y, real val) throw(RangeError,
UnsupportError) { UnsupportError) {
if (x > this->getWidth() || y > this->getHeight()) { if (x > this->getWidth() || y > this->getHeight()) {
RangeError e; RangeError e;
...@@ -193,10 +193,10 @@ FloatArray Matrix::getData() const { ...@@ -193,10 +193,10 @@ FloatArray Matrix::getData() const {
auto rawMat = m->mat.get(); auto rawMat = m->mat.get();
if (dynamic_cast<paddle::GpuMemoryHandle*>(rawMat->getMemoryHandle().get())) { if (dynamic_cast<paddle::GpuMemoryHandle*>(rawMat->getMemoryHandle().get())) {
// is gpu. then copy data // is gpu. then copy data
float* data = rawMat->getData(); real* data = rawMat->getData();
size_t len = rawMat->getElementCnt(); size_t len = rawMat->getElementCnt();
float* cpuData = new float[len]; real* cpuData = new real[len];
hl_memcpy_device2host(cpuData, data, len * sizeof(float)); hl_memcpy_device2host(cpuData, data, len * sizeof(real));
FloatArray ret_val(cpuData, len); FloatArray ret_val(cpuData, len);
ret_val.needFree = true; ret_val.needFree = true;
return ret_val; return ret_val;
...@@ -208,7 +208,7 @@ FloatArray Matrix::getData() const { ...@@ -208,7 +208,7 @@ FloatArray Matrix::getData() const {
void Matrix::sparseCopyFrom( void Matrix::sparseCopyFrom(
const std::vector<int>& rows, const std::vector<int>& cols, const std::vector<int>& rows, const std::vector<int>& cols,
const std::vector<float>& vals) throw(UnsupportError) { const std::vector<real>& vals) throw(UnsupportError) {
auto cpuSparseMat = auto cpuSparseMat =
std::dynamic_pointer_cast<paddle::CpuSparseMatrix>(m->mat); std::dynamic_pointer_cast<paddle::CpuSparseMatrix>(m->mat);
if (cpuSparseMat != nullptr) { if (cpuSparseMat != nullptr) {
...@@ -217,7 +217,7 @@ void Matrix::sparseCopyFrom( ...@@ -217,7 +217,7 @@ void Matrix::sparseCopyFrom(
// <<" ValSize = "<<vals.size(); // <<" ValSize = "<<vals.size();
cpuSparseMat->copyFrom(const_cast<std::vector<int>&>(rows), cpuSparseMat->copyFrom(const_cast<std::vector<int>&>(rows),
const_cast<std::vector<int>&>(cols), const_cast<std::vector<int>&>(cols),
const_cast<std::vector<float>&>(vals)); const_cast<std::vector<real>&>(vals));
} else { } else {
UnsupportError e; UnsupportError e;
throw e; throw e;
...@@ -226,7 +226,7 @@ void Matrix::sparseCopyFrom( ...@@ -226,7 +226,7 @@ void Matrix::sparseCopyFrom(
void* Matrix::getSharedPtr() const { return &m->mat; } void* Matrix::getSharedPtr() const { return &m->mat; }
void Matrix::toNumpyMatInplace(float** view_data, int* dim1, void Matrix::toNumpyMatInplace(real** view_data, int* dim1,
int* dim2) throw(UnsupportError) { int* dim2) throw(UnsupportError) {
auto cpuMat = std::dynamic_pointer_cast<paddle::CpuMatrix>(m->mat); auto cpuMat = std::dynamic_pointer_cast<paddle::CpuMatrix>(m->mat);
if (cpuMat) { if (cpuMat) {
...@@ -237,9 +237,9 @@ void Matrix::toNumpyMatInplace(float** view_data, int* dim1, ...@@ -237,9 +237,9 @@ void Matrix::toNumpyMatInplace(float** view_data, int* dim1,
throw UnsupportError(); throw UnsupportError();
} }
} }
void Matrix::copyToNumpyMat(float** view_m_data, int* dim1, void Matrix::copyToNumpyMat(real** view_m_data, int* dim1,
int* dim2) throw(UnsupportError) { int* dim2) throw(UnsupportError) {
static_assert(sizeof(paddle::real) == sizeof(float), static_assert(sizeof(paddle::real) == sizeof(real),
"Currently PaddleAPI only support for single " "Currently PaddleAPI only support for single "
"precision version of paddle."); "precision version of paddle.");
if (this->isSparse()) { if (this->isSparse()) {
...@@ -247,7 +247,7 @@ void Matrix::copyToNumpyMat(float** view_m_data, int* dim1, ...@@ -247,7 +247,7 @@ void Matrix::copyToNumpyMat(float** view_m_data, int* dim1,
} else { } else {
*dim1 = m->mat->getHeight(); *dim1 = m->mat->getHeight();
*dim2 = m->mat->getWidth(); *dim2 = m->mat->getWidth();
*view_m_data = new float[(*dim1) * (*dim2)]; *view_m_data = new real[(*dim1) * (*dim2)];
if (auto cpuMat = dynamic_cast<paddle::CpuMatrix*>(m->mat.get())) { if (auto cpuMat = dynamic_cast<paddle::CpuMatrix*>(m->mat.get())) {
auto src = cpuMat->getData(); auto src = cpuMat->getData();
auto dest = *view_m_data; auto dest = *view_m_data;
...@@ -264,7 +264,7 @@ void Matrix::copyToNumpyMat(float** view_m_data, int* dim1, ...@@ -264,7 +264,7 @@ void Matrix::copyToNumpyMat(float** view_m_data, int* dim1,
} }
} }
void Matrix::copyFromNumpyMat(float* data, int dim1, void Matrix::copyFromNumpyMat(real* data, int dim1,
int dim2) throw(UnsupportError, RangeError) { int dim2) throw(UnsupportError, RangeError) {
if (isSparse()) { if (isSparse()) {
throw UnsupportError(); throw UnsupportError();
......
...@@ -20,6 +20,7 @@ limitations under the License. */ ...@@ -20,6 +20,7 @@ limitations under the License. */
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/utils/GlobalConstants.h" #include "paddle/utils/GlobalConstants.h"
#include "paddle/utils/TypeDefs.h"
/// Import PaddlePaddle's enumeration into global namespace. /// Import PaddlePaddle's enumeration into global namespace.
using namespace paddle::enumeration_wrapper; // NOLINT using namespace paddle::enumeration_wrapper; // NOLINT
...@@ -55,10 +56,10 @@ class UnsupportError {}; ...@@ -55,10 +56,10 @@ class UnsupportError {};
/// This type will map to python's list of float. /// This type will map to python's list of float.
struct FloatArray { struct FloatArray {
const float* buf; const real* buf;
const size_t length; const size_t length;
bool needFree; // true if the buf is dynamic alloced. bool needFree; // true if the buf is dynamic alloced.
FloatArray(const float* b, const size_t l); FloatArray(const real* b, const size_t l);
}; };
/// This type will map to python's list of int /// This type will map to python's list of int
...@@ -71,11 +72,11 @@ struct IntArray { ...@@ -71,11 +72,11 @@ struct IntArray {
/// This type will map to python's list of (int, float) /// This type will map to python's list of (int, float)
struct IntWithFloatArray { struct IntWithFloatArray {
const float* valBuf; const real* valBuf;
const int* idxBuf; const int* idxBuf;
const size_t length; const size_t length;
bool needFree; bool needFree;
IntWithFloatArray(const float* v, const int* i, size_t l, bool f = false); IntWithFloatArray(const real* v, const int* i, size_t l, bool f = false);
}; };
enum SparseValueType { SPARSE_NON_VALUE = 0, SPARSE_VALUE = 1 }; enum SparseValueType { SPARSE_NON_VALUE = 0, SPARSE_VALUE = 1 };
...@@ -121,7 +122,7 @@ public: ...@@ -121,7 +122,7 @@ public:
* @param data list of float should be passed in python. * @param data list of float should be passed in python.
* @note the value will be copy into a new matrix. * @note the value will be copy into a new matrix.
*/ */
static Matrix* createDense(const std::vector<float>& data, size_t height, static Matrix* createDense(const std::vector<real>& data, size_t height,
size_t width, bool useGpu = false); size_t width, bool useGpu = false);
/** /**
...@@ -133,11 +134,11 @@ public: ...@@ -133,11 +134,11 @@ public:
* @param copy true if copy into a new matrix, false will create * @param copy true if copy into a new matrix, false will create
* matrix inplace. * matrix inplace.
*/ */
static Matrix* createCpuDenseFromNumpy(float* data, int dim1, int dim2, static Matrix* createCpuDenseFromNumpy(real* data, int dim1, int dim2,
bool copy = false); bool copy = false);
/// Create Gpu Dense Matrix from numpy matrix, dtype=float32 /// Create Gpu Dense Matrix from numpy matrix, dtype=float32
static Matrix* createGpuDenseFromNumpy(float* data, int dim1, int dim2); static Matrix* createGpuDenseFromNumpy(real* data, int dim1, int dim2);
/** /**
* Cast to numpy matrix. * Cast to numpy matrix.
...@@ -153,15 +154,15 @@ public: ...@@ -153,15 +154,15 @@ public:
* numpy_mat = m.toNumpyMat() * numpy_mat = m.toNumpyMat()
* @endcode * @endcode
*/ */
void toNumpyMatInplace(float** view_data, int* dim1, void toNumpyMatInplace(real** view_data, int* dim1,
int* dim2) throw(UnsupportError); int* dim2) throw(UnsupportError);
/// Copy To numpy mat. /// Copy To numpy mat.
void copyToNumpyMat(float** view_m_data, int* dim1, void copyToNumpyMat(real** view_m_data, int* dim1,
int* dim2) throw(UnsupportError); int* dim2) throw(UnsupportError);
/// Copy From Numpy Mat /// Copy From Numpy Mat
void copyFromNumpyMat(float* data, int dim1, int dim2) throw(UnsupportError, void copyFromNumpyMat(real* data, int dim1, int dim2) throw(UnsupportError,
RangeError); RangeError);
/// return true if this matrix is sparse. /// return true if this matrix is sparse.
...@@ -180,9 +181,9 @@ public: ...@@ -180,9 +181,9 @@ public:
size_t getWidth() const; size_t getWidth() const;
float get(size_t x, size_t y) const throw(RangeError); real get(size_t x, size_t y) const throw(RangeError);
void set(size_t x, size_t y, float val) throw(RangeError, UnsupportError); void set(size_t x, size_t y, real val) throw(RangeError, UnsupportError);
/// return type is list of float /// return type is list of float
FloatArray getData() const; FloatArray getData() const;
...@@ -194,8 +195,8 @@ public: ...@@ -194,8 +195,8 @@ public:
*/ */
void sparseCopyFrom(const std::vector<int>& rows, void sparseCopyFrom(const std::vector<int>& rows,
const std::vector<int>& cols, const std::vector<int>& cols,
const std::vector<float>& values = const std::vector<real>& values =
std::vector<float>()) throw(UnsupportError); std::vector<real>()) throw(UnsupportError);
bool isGpu() const; bool isGpu() const;
...@@ -227,33 +228,33 @@ public: ...@@ -227,33 +228,33 @@ public:
* *
* It will create a new vector, and copy data into it. * It will create a new vector, and copy data into it.
*/ */
static Vector* create(const std::vector<float>& data, bool useGpu = false); static Vector* create(const std::vector<real>& data, bool useGpu = false);
/** /**
* Create Cpu Vector from numpy array, which dtype=float32 * Create Cpu Vector from numpy array, which dtype=float32
* *
* If copy is false, it will create vector inplace. * If copy is false, it will create vector inplace.
*/ */
static Vector* createCpuVectorFromNumpy(float* data, int dim, static Vector* createCpuVectorFromNumpy(real* data, int dim,
bool copy = false); bool copy = false);
/// Create Gpu Vector from numpy array, which dtype=float32 /// Create Gpu Vector from numpy array, which dtype=float32
static Vector* createGpuVectorFromNumpy(float* data, int dim); static Vector* createGpuVectorFromNumpy(real* data, int dim);
/// Cast to numpy array inplace. /// Cast to numpy array inplace.
void toNumpyArrayInplace(float** view_data, int* dim1) throw(UnsupportError); void toNumpyArrayInplace(real** view_data, int* dim1) throw(UnsupportError);
/// Copy to numpy array. /// Copy to numpy array.
void copyToNumpyArray(float** view_m_data, int* dim1); void copyToNumpyArray(real** view_m_data, int* dim1);
/// Copy from numpy array. /// Copy from numpy array.
void copyFromNumpyArray(float* data, int dim); void copyFromNumpyArray(real* data, int dim);
/// __getitem__ in python /// __getitem__ in python
float get(const size_t idx) const throw(RangeError, UnsupportError); real get(const size_t idx) const throw(RangeError, UnsupportError);
/// __setitem__ in python /// __setitem__ in python
void set(const size_t idx, float val) throw(RangeError, UnsupportError); void set(const size_t idx, real val) throw(RangeError, UnsupportError);
/// Return is GPU vector or not. /// Return is GPU vector or not.
bool isGpu() const; bool isGpu() const;
......
...@@ -31,13 +31,13 @@ void initPaddle(int argc, char** argv) { ...@@ -31,13 +31,13 @@ void initPaddle(int argc, char** argv) {
feenableexcept(FE_INVALID | FE_DIVBYZERO | FE_OVERFLOW); feenableexcept(FE_INVALID | FE_DIVBYZERO | FE_OVERFLOW);
} }
FloatArray::FloatArray(const float* b, const size_t l) FloatArray::FloatArray(const real* b, const size_t l)
: buf(b), length(l), needFree(false) {} : buf(b), length(l), needFree(false) {}
IntArray::IntArray(const int* b, const size_t l, bool f) IntArray::IntArray(const int* b, const size_t l, bool f)
: buf(b), length(l), needFree(f) {} : buf(b), length(l), needFree(f) {}
IntWithFloatArray::IntWithFloatArray(const float* v, const int* i, size_t l, IntWithFloatArray::IntWithFloatArray(const real* v, const int* i, size_t l,
bool f) bool f)
: valBuf(v), idxBuf(i), length(l), needFree(f) {} : valBuf(v), idxBuf(i), length(l), needFree(f) {}
......
...@@ -140,7 +140,7 @@ struct VectorPrivate { ...@@ -140,7 +140,7 @@ struct VectorPrivate {
paddle::VectorPtr vec; paddle::VectorPtr vec;
void safeAccessData(const size_t idx, void safeAccessData(const size_t idx,
const std::function<void(float&)>& func) const const std::function<void(real&)>& func) const
throw(RangeError, UnsupportError) { throw(RangeError, UnsupportError) {
auto cpuVec = std::dynamic_pointer_cast<const paddle::CpuVector>(vec); auto cpuVec = std::dynamic_pointer_cast<const paddle::CpuVector>(vec);
if (cpuVec != nullptr) { if (cpuVec != nullptr) {
...@@ -170,7 +170,7 @@ Vector* Vector::createZero(size_t sz, bool useGpu) { ...@@ -170,7 +170,7 @@ Vector* Vector::createZero(size_t sz, bool useGpu) {
return retVec; return retVec;
} }
Vector* Vector::create(const std::vector<float>& data, bool useGpu) { Vector* Vector::create(const std::vector<real>& data, bool useGpu) {
auto retVec = new Vector(); auto retVec = new Vector();
retVec->m->vec = paddle::Vector::create(data.size(), useGpu); retVec->m->vec = paddle::Vector::create(data.size(), useGpu);
retVec->m->vec->copyFrom(data.data(), data.size()); retVec->m->vec->copyFrom(data.data(), data.size());
...@@ -188,7 +188,7 @@ Vector* Vector::createByPaddleVectorPtr(void* ptr) { ...@@ -188,7 +188,7 @@ Vector* Vector::createByPaddleVectorPtr(void* ptr) {
} }
} }
Vector* Vector::createCpuVectorFromNumpy(float* data, int dim, bool copy) { Vector* Vector::createCpuVectorFromNumpy(real* data, int dim, bool copy) {
CHECK_GT(dim, 0); CHECK_GT(dim, 0);
auto retVec = new Vector(); auto retVec = new Vector();
if (copy) { if (copy) {
...@@ -200,7 +200,7 @@ Vector* Vector::createCpuVectorFromNumpy(float* data, int dim, bool copy) { ...@@ -200,7 +200,7 @@ Vector* Vector::createCpuVectorFromNumpy(float* data, int dim, bool copy) {
return retVec; return retVec;
} }
Vector* Vector::createGpuVectorFromNumpy(float* data, int dim) { Vector* Vector::createGpuVectorFromNumpy(real* data, int dim) {
CHECK_GT(dim, 0); CHECK_GT(dim, 0);
auto retVec = new Vector(); auto retVec = new Vector();
retVec->m->vec = paddle::Vector::create((size_t)dim, true); retVec->m->vec = paddle::Vector::create((size_t)dim, true);
...@@ -208,7 +208,7 @@ Vector* Vector::createGpuVectorFromNumpy(float* data, int dim) { ...@@ -208,7 +208,7 @@ Vector* Vector::createGpuVectorFromNumpy(float* data, int dim) {
return retVec; return retVec;
} }
void Vector::toNumpyArrayInplace(float** view_data, void Vector::toNumpyArrayInplace(real** view_data,
int* dim1) throw(UnsupportError) { int* dim1) throw(UnsupportError) {
auto v = std::dynamic_pointer_cast<paddle::CpuVector>(m->vec); auto v = std::dynamic_pointer_cast<paddle::CpuVector>(m->vec);
if (v != nullptr) { if (v != nullptr) {
...@@ -219,20 +219,20 @@ void Vector::toNumpyArrayInplace(float** view_data, ...@@ -219,20 +219,20 @@ void Vector::toNumpyArrayInplace(float** view_data,
} }
} }
void Vector::copyToNumpyArray(float** view_m_data, int* dim1) { void Vector::copyToNumpyArray(real** view_m_data, int* dim1) {
*dim1 = m->vec->getSize(); *dim1 = m->vec->getSize();
*view_m_data = new float[*dim1]; *view_m_data = new real[*dim1];
if (auto cpuVec = dynamic_cast<paddle::CpuVector*>(m->vec.get())) { if (auto cpuVec = dynamic_cast<paddle::CpuVector*>(m->vec.get())) {
std::memcpy(*view_m_data, cpuVec->getData(), sizeof(float) * (*dim1)); std::memcpy(*view_m_data, cpuVec->getData(), sizeof(real) * (*dim1));
} else if (auto gpuVec = dynamic_cast<paddle::CpuVector*>(m->vec.get())) { } else if (auto gpuVec = dynamic_cast<paddle::CpuVector*>(m->vec.get())) {
hl_memcpy_device2host(*view_m_data, gpuVec->getData(), hl_memcpy_device2host(*view_m_data, gpuVec->getData(),
sizeof(float) * (*dim1)); sizeof(real) * (*dim1));
} else { } else {
LOG(INFO) << "Unexpected situation"; LOG(INFO) << "Unexpected situation";
} }
} }
void Vector::copyFromNumpyArray(float* data, int dim) { void Vector::copyFromNumpyArray(real* data, int dim) {
m->vec->resize(dim); m->vec->resize(dim);
m->vec->copyFrom(data, dim); m->vec->copyFrom(data, dim);
} }
...@@ -241,15 +241,15 @@ bool Vector::isGpu() const { ...@@ -241,15 +241,15 @@ bool Vector::isGpu() const {
return std::dynamic_pointer_cast<paddle::GpuVector>(m->vec) != nullptr; return std::dynamic_pointer_cast<paddle::GpuVector>(m->vec) != nullptr;
} }
float Vector::get(const size_t idx) const throw(RangeError, UnsupportError) { real Vector::get(const size_t idx) const throw(RangeError, UnsupportError) {
float r; real r;
m->safeAccessData(idx, [&](float& o) { r = o; }); m->safeAccessData(idx, [&](real& o) { r = o; });
return r; return r;
} }
void Vector::set(const size_t idx, float val) throw(RangeError, void Vector::set(const size_t idx, real val) throw(RangeError,
UnsupportError) { UnsupportError) {
m->safeAccessData(idx, [&](float& o) { o = val; }); m->safeAccessData(idx, [&](real& o) { o = val; });
} }
size_t Vector::getSize() const { return m->vec->getSize(); } size_t Vector::getSize() const { return m->vec->getSize(); }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册