Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleDetection
提交
2be3a747
P
PaddleDetection
项目概览
PaddlePaddle
/
PaddleDetection
大约 1 年 前同步成功
通知
695
Star
11112
Fork
2696
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
184
列表
看板
标记
里程碑
合并请求
40
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
184
Issue
184
列表
看板
标记
里程碑
合并请求
40
合并请求
40
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
2be3a747
编写于
11月 07, 2016
作者:
W
wangyang59
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Modified API to use FLAGS_use_gpu as useGpu default value
上级
0ba0f02c
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
67 addition
and
10 deletion
+67
-10
paddle/api/Matrix.cpp
paddle/api/Matrix.cpp
+11
-0
paddle/api/Paddle.swig
paddle/api/Paddle.swig
+8
-1
paddle/api/PaddleAPI.h
paddle/api/PaddleAPI.h
+23
-8
paddle/api/Util.cpp
paddle/api/Util.cpp
+2
-0
paddle/api/Vector.cpp
paddle/api/Vector.cpp
+23
-1
未找到文件。
paddle/api/Matrix.cpp
浏览文件 @
2be3a747
...
...
@@ -52,6 +52,17 @@ Matrix* Matrix::createDense(const std::vector<float>& data, size_t height,
return
m
;
}
Matrix
*
Matrix
::
createDenseFromNumpy
(
float
*
data
,
int
dim1
,
int
dim2
,
bool
copy
,
bool
useGpu
)
{
if
(
useGpu
)
{
/// Gpu mode only supports copy=True
CHECK
(
copy
);
return
Matrix
::
createGpuDenseFromNumpy
(
data
,
dim1
,
dim2
);
}
else
{
return
Matrix
::
createCpuDenseFromNumpy
(
data
,
dim1
,
dim2
,
copy
);
}
}
Matrix
*
Matrix
::
createCpuDenseFromNumpy
(
float
*
data
,
int
dim1
,
int
dim2
,
bool
copy
)
{
auto
m
=
new
Matrix
();
...
...
paddle/api/Paddle.swig
浏览文件 @
2be3a747
...
...
@@ -133,14 +133,21 @@ namespace std {
%newobject Matrix::createZero;
%newobject Matrix::createSparse;
%newobject Matrix::createDense;
%newobject Matrix::createDenseFromNumpy;
%newobject Matrix::createCpuDenseFromNumpy;
%newobject Matrix::createGpuDenseFromNumpy;
%newobject Vector::createZero;
%newobject Vector::create;
%newobject Vector::createVectorFromNumpy;
%newobject Vector::createCpuVectorFromNumpy;
%newobject Vector::createGpuVectorFromNumpy;
%newobject IVector::createZero;
%newobject IVector::create;
%newobject IVector::createVectorFromNumpy;
%newobject IVector::createCpuVectorFromNumpy;
%newobject IVector::createGpuVectorFromNumpy;
%newobject Trainer::createByCommandLine;
%newobject Trainer::get
Network
Output;
%newobject Trainer::get
Forward
Output;
%newobject Trainer::getLayerOutput;
%newobject Arguments::getSlotValue;
%newobject Arguments::getSlotIds;
...
...
paddle/api/PaddleAPI.h
浏览文件 @
2be3a747
...
...
@@ -42,6 +42,9 @@ using namespace paddle::enumeration_wrapper; // NOLINT
*/
void
initPaddle
(
int
argc
,
char
**
argv
);
/// Return FLAGS_use_gpu
bool
isUseGpu
();
/// Return true if this py_paddle is compiled in GPU Version
bool
isGpuVersion
();
...
...
@@ -101,7 +104,8 @@ public:
/**
* Create A Matrix with height,width, which is filled by zero.
*/
static
Matrix
*
createZero
(
size_t
height
,
size_t
width
,
bool
useGpu
=
false
);
static
Matrix
*
createZero
(
size_t
height
,
size_t
width
,
bool
useGpu
=
isUseGpu
());
/**
* Create Sparse Matrix.
...
...
@@ -114,7 +118,7 @@ public:
*/
static
Matrix
*
createSparse
(
size_t
height
,
size_t
width
,
size_t
nnz
,
bool
isNonVal
=
true
,
bool
trans
=
false
,
bool
useGpu
=
false
);
bool
useGpu
=
isUseGpu
()
);
/**
* Create Dense Matrix.
...
...
@@ -123,7 +127,11 @@ public:
* @note the value will be copy into a new matrix.
*/
static
Matrix
*
createDense
(
const
std
::
vector
<
float
>&
data
,
size_t
height
,
size_t
width
,
bool
useGpu
=
false
);
size_t
width
,
bool
useGpu
=
isUseGpu
());
static
Matrix
*
createDenseFromNumpy
(
float
*
data
,
int
dim1
,
int
dim2
,
bool
copy
=
true
,
bool
useGpu
=
isUseGpu
());
/**
* Create Cpu Dense Matrix from numpy matrix, dtype=float32
...
...
@@ -221,15 +229,18 @@ public:
~
Vector
();
/// Create Vector filled with zero.
static
Vector
*
createZero
(
size_t
sz
,
bool
useGpu
=
false
);
static
Vector
*
createZero
(
size_t
sz
,
bool
useGpu
=
isUseGpu
()
);
/**
* Create Vector from list of float.
*
* It will create a new vector, and copy data into it.
*/
static
Vector
*
create
(
const
std
::
vector
<
float
>&
data
,
bool
useGpu
=
false
);
static
Vector
*
create
(
const
std
::
vector
<
float
>&
data
,
bool
useGpu
=
isUseGpu
());
static
Vector
*
createVectorFromNumpy
(
float
*
data
,
int
dim
,
bool
copy
=
true
,
bool
useGpu
=
isUseGpu
());
/**
* Create Cpu Vector from numpy array, which dtype=float32
*
...
...
@@ -279,13 +290,17 @@ class IVector {
public:
/// Create IVector filled with zero
static
IVector
*
createZero
(
size_t
sz
,
bool
useGpu
=
false
);
static
IVector
*
createZero
(
size_t
sz
,
bool
useGpu
=
isUseGpu
()
);
/**
* Create IVector from list of int.
* It will create a new vector, and copy data into it.
*/
static
IVector
*
create
(
const
std
::
vector
<
int
>&
data
,
bool
useGpu
=
false
);
static
IVector
*
create
(
const
std
::
vector
<
int
>&
data
,
bool
useGpu
=
isUseGpu
());
static
IVector
*
createVectorFromNumpy
(
int
*
data
,
int
dim
,
bool
copy
=
true
,
bool
useGpu
=
isUseGpu
());
/**
* Create Cpu IVector from numpy array, which dtype=int32
...
...
@@ -297,7 +312,7 @@ public:
/**
* Create Gpu IVector from numpy array, which dtype=int32
*/
static
IVector
*
createGpuVectorFromNumy
(
int
*
data
,
int
dim
);
static
IVector
*
createGpuVectorFromNum
p
y
(
int
*
data
,
int
dim
);
/// Cast to numpy array inplace.
void
toNumpyArrayInplace
(
int
**
view_data
,
int
*
dim1
)
throw
(
UnsupportError
);
...
...
paddle/api/Util.cpp
浏览文件 @
2be3a747
...
...
@@ -41,6 +41,8 @@ IntWithFloatArray::IntWithFloatArray(const float* v, const int* i, size_t l,
bool
f
)
:
valBuf
(
v
),
idxBuf
(
i
),
length
(
l
),
needFree
(
f
)
{}
bool
isUseGpu
()
{
return
FLAGS_use_gpu
;}
bool
isGpuVersion
()
{
#ifdef PADDLE_ONLY_CPU
return
false
;
...
...
paddle/api/Vector.cpp
浏览文件 @
2be3a747
...
...
@@ -39,6 +39,17 @@ IVector* IVector::create(const std::vector<int>& data, bool useGpu) {
return
v
;
}
IVector
*
IVector
::
createVectorFromNumpy
(
int
*
data
,
int
dim
,
bool
copy
,
bool
useGpu
)
{
if
(
useGpu
)
{
/// if use gpu only copy=true is supported
CHECK
(
copy
);
return
IVector
::
createGpuVectorFromNumpy
(
data
,
dim
);
}
else
{
return
IVector
::
createCpuVectorFromNumpy
(
data
,
dim
,
copy
);
}
}
IVector
*
IVector
::
createCpuVectorFromNumpy
(
int
*
data
,
int
dim
,
bool
copy
)
{
auto
v
=
new
IVector
();
if
(
copy
)
{
...
...
@@ -50,7 +61,7 @@ IVector* IVector::createCpuVectorFromNumpy(int* data, int dim, bool copy) {
return
v
;
}
IVector
*
IVector
::
createGpuVectorFromNumy
(
int
*
data
,
int
dim
)
{
IVector
*
IVector
::
createGpuVectorFromNum
p
y
(
int
*
data
,
int
dim
)
{
auto
v
=
new
IVector
();
v
->
m
->
vec
=
paddle
::
IVector
::
create
(
dim
,
true
);
v
->
m
->
vec
->
copyFrom
(
data
,
dim
);
...
...
@@ -188,6 +199,17 @@ Vector* Vector::createByPaddleVectorPtr(void* ptr) {
}
}
Vector
*
Vector
::
createVectorFromNumpy
(
float
*
data
,
int
dim
,
bool
copy
,
bool
useGpu
)
{
if
(
useGpu
)
{
/// if use gpu only copy=True is supported
CHECK
(
copy
);
return
Vector
::
createGpuVectorFromNumpy
(
data
,
dim
);
}
else
{
return
Vector
::
createCpuVectorFromNumpy
(
data
,
dim
,
copy
);
}
}
Vector
*
Vector
::
createCpuVectorFromNumpy
(
float
*
data
,
int
dim
,
bool
copy
)
{
CHECK_GT
(
dim
,
0
);
auto
retVec
=
new
Vector
();
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录