Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
aaed5cfc
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
aaed5cfc
编写于
9月 20, 2016
作者:
L
liaogang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
revert real into float for swig API
上级
7ff8e762
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
56 addition
and
56 deletion
+56
-56
paddle/api/Matrix.cpp
paddle/api/Matrix.cpp
+17
-17
paddle/api/PaddleAPI.h
paddle/api/PaddleAPI.h
+22
-22
paddle/api/Util.cpp
paddle/api/Util.cpp
+2
-2
paddle/api/Vector.cpp
paddle/api/Vector.cpp
+15
-15
未找到文件。
paddle/api/Matrix.cpp
浏览文件 @
aaed5cfc
...
...
@@ -44,7 +44,7 @@ Matrix* Matrix::createZero(size_t height, size_t width, bool useGpu) {
return
m
;
}
Matrix
*
Matrix
::
createDense
(
const
std
::
vector
<
real
>&
data
,
size_t
height
,
Matrix
*
Matrix
::
createDense
(
const
std
::
vector
<
float
>&
data
,
size_t
height
,
size_t
width
,
bool
useGpu
)
{
auto
m
=
new
Matrix
();
m
->
m
->
mat
=
paddle
::
Matrix
::
create
(
height
,
width
,
useGpu
);
...
...
@@ -52,7 +52,7 @@ Matrix* Matrix::createDense(const std::vector<real>& data, size_t height,
return
m
;
}
Matrix
*
Matrix
::
createCpuDenseFromNumpy
(
real
*
data
,
int
dim1
,
int
dim2
,
Matrix
*
Matrix
::
createCpuDenseFromNumpy
(
float
*
data
,
int
dim1
,
int
dim2
,
bool
copy
)
{
auto
m
=
new
Matrix
();
if
(
copy
)
{
...
...
@@ -64,7 +64,7 @@ Matrix* Matrix::createCpuDenseFromNumpy(real* data, int dim1, int dim2,
return
m
;
}
Matrix
*
Matrix
::
createGpuDenseFromNumpy
(
real
*
data
,
int
dim1
,
int
dim2
)
{
Matrix
*
Matrix
::
createGpuDenseFromNumpy
(
float
*
data
,
int
dim1
,
int
dim2
)
{
auto
m
=
new
Matrix
();
m
->
m
->
mat
=
paddle
::
Matrix
::
create
(
dim1
,
dim2
,
false
,
true
);
m
->
m
->
mat
->
copyFrom
(
data
,
dim1
*
dim2
);
...
...
@@ -86,7 +86,7 @@ size_t Matrix::getHeight() const { return m->mat->getHeight(); }
size_t
Matrix
::
getWidth
()
const
{
return
m
->
mat
->
getWidth
();
}
real
Matrix
::
get
(
size_t
x
,
size_t
y
)
const
throw
(
RangeError
)
{
float
Matrix
::
get
(
size_t
x
,
size_t
y
)
const
throw
(
RangeError
)
{
if
(
x
>
this
->
getWidth
()
||
y
>
this
->
getHeight
())
{
RangeError
e
;
throw
e
;
...
...
@@ -94,7 +94,7 @@ real Matrix::get(size_t x, size_t y) const throw(RangeError) {
return
m
->
mat
->
getElement
(
x
,
y
);
}
void
Matrix
::
set
(
size_t
x
,
size_t
y
,
real
val
)
throw
(
RangeError
,
void
Matrix
::
set
(
size_t
x
,
size_t
y
,
float
val
)
throw
(
RangeError
,
UnsupportError
)
{
if
(
x
>
this
->
getWidth
()
||
y
>
this
->
getHeight
())
{
RangeError
e
;
...
...
@@ -193,10 +193,10 @@ FloatArray Matrix::getData() const {
auto
rawMat
=
m
->
mat
.
get
();
if
(
dynamic_cast
<
paddle
::
GpuMemoryHandle
*>
(
rawMat
->
getMemoryHandle
().
get
()))
{
// is gpu. then copy data
real
*
data
=
rawMat
->
getData
();
float
*
data
=
rawMat
->
getData
();
size_t
len
=
rawMat
->
getElementCnt
();
real
*
cpuData
=
new
real
[
len
];
hl_memcpy_device2host
(
cpuData
,
data
,
len
*
sizeof
(
real
));
float
*
cpuData
=
new
float
[
len
];
hl_memcpy_device2host
(
cpuData
,
data
,
len
*
sizeof
(
float
));
FloatArray
ret_val
(
cpuData
,
len
);
ret_val
.
needFree
=
true
;
return
ret_val
;
...
...
@@ -208,7 +208,7 @@ FloatArray Matrix::getData() const {
void
Matrix
::
sparseCopyFrom
(
const
std
::
vector
<
int
>&
rows
,
const
std
::
vector
<
int
>&
cols
,
const
std
::
vector
<
real
>&
vals
)
throw
(
UnsupportError
)
{
const
std
::
vector
<
float
>&
vals
)
throw
(
UnsupportError
)
{
auto
cpuSparseMat
=
std
::
dynamic_pointer_cast
<
paddle
::
CpuSparseMatrix
>
(
m
->
mat
);
if
(
cpuSparseMat
!=
nullptr
)
{
...
...
@@ -217,7 +217,7 @@ void Matrix::sparseCopyFrom(
// <<" ValSize = "<<vals.size();
cpuSparseMat
->
copyFrom
(
const_cast
<
std
::
vector
<
int
>&>
(
rows
),
const_cast
<
std
::
vector
<
int
>&>
(
cols
),
const_cast
<
std
::
vector
<
real
>&>
(
vals
));
const_cast
<
std
::
vector
<
float
>&>
(
vals
));
}
else
{
UnsupportError
e
;
throw
e
;
...
...
@@ -226,7 +226,7 @@ void Matrix::sparseCopyFrom(
void
*
Matrix
::
getSharedPtr
()
const
{
return
&
m
->
mat
;
}
void
Matrix
::
toNumpyMatInplace
(
real
**
view_data
,
int
*
dim1
,
void
Matrix
::
toNumpyMatInplace
(
float
**
view_data
,
int
*
dim1
,
int
*
dim2
)
throw
(
UnsupportError
)
{
auto
cpuMat
=
std
::
dynamic_pointer_cast
<
paddle
::
CpuMatrix
>
(
m
->
mat
);
if
(
cpuMat
)
{
...
...
@@ -237,9 +237,9 @@ void Matrix::toNumpyMatInplace(real** view_data, int* dim1,
throw
UnsupportError
();
}
}
void
Matrix
::
copyToNumpyMat
(
real
**
view_m_data
,
int
*
dim1
,
void
Matrix
::
copyToNumpyMat
(
float
**
view_m_data
,
int
*
dim1
,
int
*
dim2
)
throw
(
UnsupportError
)
{
static_assert
(
sizeof
(
paddle
::
real
)
==
sizeof
(
real
),
static_assert
(
sizeof
(
float
)
==
sizeof
(
float
),
"Currently PaddleAPI only support for single "
"precision version of paddle."
);
if
(
this
->
isSparse
())
{
...
...
@@ -247,16 +247,16 @@ void Matrix::copyToNumpyMat(real** view_m_data, int* dim1,
}
else
{
*
dim1
=
m
->
mat
->
getHeight
();
*
dim2
=
m
->
mat
->
getWidth
();
*
view_m_data
=
new
real
[(
*
dim1
)
*
(
*
dim2
)];
*
view_m_data
=
new
float
[(
*
dim1
)
*
(
*
dim2
)];
if
(
auto
cpuMat
=
dynamic_cast
<
paddle
::
CpuMatrix
*>
(
m
->
mat
.
get
()))
{
auto
src
=
cpuMat
->
getData
();
auto
dest
=
*
view_m_data
;
std
::
memcpy
(
dest
,
src
,
sizeof
(
paddle
::
real
)
*
(
*
dim1
)
*
(
*
dim2
));
std
::
memcpy
(
dest
,
src
,
sizeof
(
float
)
*
(
*
dim1
)
*
(
*
dim2
));
}
else
if
(
auto
gpuMat
=
dynamic_cast
<
paddle
::
GpuMatrix
*>
(
m
->
mat
.
get
()))
{
auto
src
=
gpuMat
->
getData
();
auto
dest
=
*
view_m_data
;
hl_memcpy_device2host
(
dest
,
src
,
sizeof
(
paddle
::
real
)
*
(
*
dim1
)
*
(
*
dim2
));
sizeof
(
float
)
*
(
*
dim1
)
*
(
*
dim2
));
}
else
{
LOG
(
WARNING
)
<<
"Unexpected Situation"
;
throw
UnsupportError
();
...
...
@@ -264,7 +264,7 @@ void Matrix::copyToNumpyMat(real** view_m_data, int* dim1,
}
}
void
Matrix
::
copyFromNumpyMat
(
real
*
data
,
int
dim1
,
void
Matrix
::
copyFromNumpyMat
(
float
*
data
,
int
dim1
,
int
dim2
)
throw
(
UnsupportError
,
RangeError
)
{
if
(
isSparse
())
{
throw
UnsupportError
();
...
...
paddle/api/PaddleAPI.h
浏览文件 @
aaed5cfc
...
...
@@ -56,10 +56,10 @@ class UnsupportError {};
/// This type will map to python's list of float.
struct
FloatArray
{
const
real
*
buf
;
const
float
*
buf
;
const
size_t
length
;
bool
needFree
;
// true if the buf is dynamic alloced.
FloatArray
(
const
real
*
b
,
const
size_t
l
);
FloatArray
(
const
float
*
b
,
const
size_t
l
);
};
/// This type will map to python's list of int
...
...
@@ -72,11 +72,11 @@ struct IntArray {
/// This type will map to python's list of (int, float)
struct
IntWithFloatArray
{
const
real
*
valBuf
;
const
float
*
valBuf
;
const
int
*
idxBuf
;
const
size_t
length
;
bool
needFree
;
IntWithFloatArray
(
const
real
*
v
,
const
int
*
i
,
size_t
l
,
bool
f
=
false
);
IntWithFloatArray
(
const
float
*
v
,
const
int
*
i
,
size_t
l
,
bool
f
=
false
);
};
enum
SparseValueType
{
SPARSE_NON_VALUE
=
0
,
SPARSE_VALUE
=
1
};
...
...
@@ -122,7 +122,7 @@ public:
* @param data list of float should be passed in python.
* @note the value will be copy into a new matrix.
*/
static
Matrix
*
createDense
(
const
std
::
vector
<
real
>&
data
,
size_t
height
,
static
Matrix
*
createDense
(
const
std
::
vector
<
float
>&
data
,
size_t
height
,
size_t
width
,
bool
useGpu
=
false
);
/**
...
...
@@ -134,11 +134,11 @@ public:
* @param copy true if copy into a new matrix, false will create
* matrix inplace.
*/
static
Matrix
*
createCpuDenseFromNumpy
(
real
*
data
,
int
dim1
,
int
dim2
,
static
Matrix
*
createCpuDenseFromNumpy
(
float
*
data
,
int
dim1
,
int
dim2
,
bool
copy
=
false
);
/// Create Gpu Dense Matrix from numpy matrix, dtype=float32
static
Matrix
*
createGpuDenseFromNumpy
(
real
*
data
,
int
dim1
,
int
dim2
);
static
Matrix
*
createGpuDenseFromNumpy
(
float
*
data
,
int
dim1
,
int
dim2
);
/**
* Cast to numpy matrix.
...
...
@@ -154,15 +154,15 @@ public:
* numpy_mat = m.toNumpyMat()
* @endcode
*/
void
toNumpyMatInplace
(
real
**
view_data
,
int
*
dim1
,
void
toNumpyMatInplace
(
float
**
view_data
,
int
*
dim1
,
int
*
dim2
)
throw
(
UnsupportError
);
/// Copy To numpy mat.
void
copyToNumpyMat
(
real
**
view_m_data
,
int
*
dim1
,
void
copyToNumpyMat
(
float
**
view_m_data
,
int
*
dim1
,
int
*
dim2
)
throw
(
UnsupportError
);
/// Copy From Numpy Mat
void
copyFromNumpyMat
(
real
*
data
,
int
dim1
,
int
dim2
)
throw
(
UnsupportError
,
void
copyFromNumpyMat
(
float
*
data
,
int
dim1
,
int
dim2
)
throw
(
UnsupportError
,
RangeError
);
/// return true if this matrix is sparse.
...
...
@@ -181,9 +181,9 @@ public:
size_t
getWidth
()
const
;
real
get
(
size_t
x
,
size_t
y
)
const
throw
(
RangeError
);
float
get
(
size_t
x
,
size_t
y
)
const
throw
(
RangeError
);
void
set
(
size_t
x
,
size_t
y
,
real
val
)
throw
(
RangeError
,
UnsupportError
);
void
set
(
size_t
x
,
size_t
y
,
float
val
)
throw
(
RangeError
,
UnsupportError
);
/// return type is list of float
FloatArray
getData
()
const
;
...
...
@@ -195,8 +195,8 @@ public:
*/
void
sparseCopyFrom
(
const
std
::
vector
<
int
>&
rows
,
const
std
::
vector
<
int
>&
cols
,
const
std
::
vector
<
real
>&
values
=
std
::
vector
<
real
>
())
throw
(
UnsupportError
);
const
std
::
vector
<
float
>&
values
=
std
::
vector
<
float
>
())
throw
(
UnsupportError
);
bool
isGpu
()
const
;
...
...
@@ -228,33 +228,33 @@ public:
*
* It will create a new vector, and copy data into it.
*/
static
Vector
*
create
(
const
std
::
vector
<
real
>&
data
,
bool
useGpu
=
false
);
static
Vector
*
create
(
const
std
::
vector
<
float
>&
data
,
bool
useGpu
=
false
);
/**
* Create Cpu Vector from numpy array, which dtype=float32
*
* If copy is false, it will create vector inplace.
*/
static
Vector
*
createCpuVectorFromNumpy
(
real
*
data
,
int
dim
,
static
Vector
*
createCpuVectorFromNumpy
(
float
*
data
,
int
dim
,
bool
copy
=
false
);
/// Create Gpu Vector from numpy array, which dtype=float32
static
Vector
*
createGpuVectorFromNumpy
(
real
*
data
,
int
dim
);
static
Vector
*
createGpuVectorFromNumpy
(
float
*
data
,
int
dim
);
/// Cast to numpy array inplace.
void
toNumpyArrayInplace
(
real
**
view_data
,
int
*
dim1
)
throw
(
UnsupportError
);
void
toNumpyArrayInplace
(
float
**
view_data
,
int
*
dim1
)
throw
(
UnsupportError
);
/// Copy to numpy array.
void
copyToNumpyArray
(
real
**
view_m_data
,
int
*
dim1
);
void
copyToNumpyArray
(
float
**
view_m_data
,
int
*
dim1
);
/// Copy from numpy array.
void
copyFromNumpyArray
(
real
*
data
,
int
dim
);
void
copyFromNumpyArray
(
float
*
data
,
int
dim
);
/// __getitem__ in python
real
get
(
const
size_t
idx
)
const
throw
(
RangeError
,
UnsupportError
);
float
get
(
const
size_t
idx
)
const
throw
(
RangeError
,
UnsupportError
);
/// __setitem__ in python
void
set
(
const
size_t
idx
,
real
val
)
throw
(
RangeError
,
UnsupportError
);
void
set
(
const
size_t
idx
,
float
val
)
throw
(
RangeError
,
UnsupportError
);
/// Return is GPU vector or not.
bool
isGpu
()
const
;
...
...
paddle/api/Util.cpp
浏览文件 @
aaed5cfc
...
...
@@ -31,13 +31,13 @@ void initPaddle(int argc, char** argv) {
feenableexcept
(
FE_INVALID
|
FE_DIVBYZERO
|
FE_OVERFLOW
);
}
FloatArray
::
FloatArray
(
const
real
*
b
,
const
size_t
l
)
FloatArray
::
FloatArray
(
const
float
*
b
,
const
size_t
l
)
:
buf
(
b
),
length
(
l
),
needFree
(
false
)
{}
IntArray
::
IntArray
(
const
int
*
b
,
const
size_t
l
,
bool
f
)
:
buf
(
b
),
length
(
l
),
needFree
(
f
)
{}
IntWithFloatArray
::
IntWithFloatArray
(
const
real
*
v
,
const
int
*
i
,
size_t
l
,
IntWithFloatArray
::
IntWithFloatArray
(
const
float
*
v
,
const
int
*
i
,
size_t
l
,
bool
f
)
:
valBuf
(
v
),
idxBuf
(
i
),
length
(
l
),
needFree
(
f
)
{}
...
...
paddle/api/Vector.cpp
浏览文件 @
aaed5cfc
...
...
@@ -140,7 +140,7 @@ struct VectorPrivate {
paddle
::
VectorPtr
vec
;
void
safeAccessData
(
const
size_t
idx
,
const
std
::
function
<
void
(
real
&
)
>&
func
)
const
const
std
::
function
<
void
(
float
&
)
>&
func
)
const
throw
(
RangeError
,
UnsupportError
)
{
auto
cpuVec
=
std
::
dynamic_pointer_cast
<
const
paddle
::
CpuVector
>
(
vec
);
if
(
cpuVec
!=
nullptr
)
{
...
...
@@ -170,7 +170,7 @@ Vector* Vector::createZero(size_t sz, bool useGpu) {
return
retVec
;
}
Vector
*
Vector
::
create
(
const
std
::
vector
<
real
>&
data
,
bool
useGpu
)
{
Vector
*
Vector
::
create
(
const
std
::
vector
<
float
>&
data
,
bool
useGpu
)
{
auto
retVec
=
new
Vector
();
retVec
->
m
->
vec
=
paddle
::
Vector
::
create
(
data
.
size
(),
useGpu
);
retVec
->
m
->
vec
->
copyFrom
(
data
.
data
(),
data
.
size
());
...
...
@@ -188,7 +188,7 @@ Vector* Vector::createByPaddleVectorPtr(void* ptr) {
}
}
Vector
*
Vector
::
createCpuVectorFromNumpy
(
real
*
data
,
int
dim
,
bool
copy
)
{
Vector
*
Vector
::
createCpuVectorFromNumpy
(
float
*
data
,
int
dim
,
bool
copy
)
{
CHECK_GT
(
dim
,
0
);
auto
retVec
=
new
Vector
();
if
(
copy
)
{
...
...
@@ -200,7 +200,7 @@ Vector* Vector::createCpuVectorFromNumpy(real* data, int dim, bool copy) {
return
retVec
;
}
Vector
*
Vector
::
createGpuVectorFromNumpy
(
real
*
data
,
int
dim
)
{
Vector
*
Vector
::
createGpuVectorFromNumpy
(
float
*
data
,
int
dim
)
{
CHECK_GT
(
dim
,
0
);
auto
retVec
=
new
Vector
();
retVec
->
m
->
vec
=
paddle
::
Vector
::
create
((
size_t
)
dim
,
true
);
...
...
@@ -208,7 +208,7 @@ Vector* Vector::createGpuVectorFromNumpy(real* data, int dim) {
return
retVec
;
}
void
Vector
::
toNumpyArrayInplace
(
real
**
view_data
,
void
Vector
::
toNumpyArrayInplace
(
float
**
view_data
,
int
*
dim1
)
throw
(
UnsupportError
)
{
auto
v
=
std
::
dynamic_pointer_cast
<
paddle
::
CpuVector
>
(
m
->
vec
);
if
(
v
!=
nullptr
)
{
...
...
@@ -219,20 +219,20 @@ void Vector::toNumpyArrayInplace(real** view_data,
}
}
void
Vector
::
copyToNumpyArray
(
real
**
view_m_data
,
int
*
dim1
)
{
void
Vector
::
copyToNumpyArray
(
float
**
view_m_data
,
int
*
dim1
)
{
*
dim1
=
m
->
vec
->
getSize
();
*
view_m_data
=
new
real
[
*
dim1
];
*
view_m_data
=
new
float
[
*
dim1
];
if
(
auto
cpuVec
=
dynamic_cast
<
paddle
::
CpuVector
*>
(
m
->
vec
.
get
()))
{
std
::
memcpy
(
*
view_m_data
,
cpuVec
->
getData
(),
sizeof
(
real
)
*
(
*
dim1
));
std
::
memcpy
(
*
view_m_data
,
cpuVec
->
getData
(),
sizeof
(
float
)
*
(
*
dim1
));
}
else
if
(
auto
gpuVec
=
dynamic_cast
<
paddle
::
CpuVector
*>
(
m
->
vec
.
get
()))
{
hl_memcpy_device2host
(
*
view_m_data
,
gpuVec
->
getData
(),
sizeof
(
real
)
*
(
*
dim1
));
sizeof
(
float
)
*
(
*
dim1
));
}
else
{
LOG
(
INFO
)
<<
"Unexpected situation"
;
}
}
void
Vector
::
copyFromNumpyArray
(
real
*
data
,
int
dim
)
{
void
Vector
::
copyFromNumpyArray
(
float
*
data
,
int
dim
)
{
m
->
vec
->
resize
(
dim
);
m
->
vec
->
copyFrom
(
data
,
dim
);
}
...
...
@@ -241,15 +241,15 @@ bool Vector::isGpu() const {
return
std
::
dynamic_pointer_cast
<
paddle
::
GpuVector
>
(
m
->
vec
)
!=
nullptr
;
}
real
Vector
::
get
(
const
size_t
idx
)
const
throw
(
RangeError
,
UnsupportError
)
{
real
r
;
m
->
safeAccessData
(
idx
,
[
&
](
real
&
o
)
{
r
=
o
;
});
float
Vector
::
get
(
const
size_t
idx
)
const
throw
(
RangeError
,
UnsupportError
)
{
float
r
;
m
->
safeAccessData
(
idx
,
[
&
](
float
&
o
)
{
r
=
o
;
});
return
r
;
}
void
Vector
::
set
(
const
size_t
idx
,
real
val
)
throw
(
RangeError
,
void
Vector
::
set
(
const
size_t
idx
,
float
val
)
throw
(
RangeError
,
UnsupportError
)
{
m
->
safeAccessData
(
idx
,
[
&
](
real
&
o
)
{
o
=
val
;
});
m
->
safeAccessData
(
idx
,
[
&
](
float
&
o
)
{
o
=
val
;
});
}
size_t
Vector
::
getSize
()
const
{
return
m
->
vec
->
getSize
();
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录