Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
e1351e5e
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
331
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e1351e5e
编写于
7月 10, 2018
作者:
E
eclipsycn
提交者:
GitHub
7月 10, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'develop' into develop
上级
6c86e48c
cd30eb8a
变更
9
展开全部
隐藏空白更改
内联
并排
Showing
9 changed file
with
1329 addition
and
627 deletion
+1329
-627
CMakeLists.txt
CMakeLists.txt
+0
-1
demo/android/PaddleMobile_Android/app/src/main/java/com/baidu/paddle/MainActivity.java
...roid/app/src/main/java/com/baidu/paddle/MainActivity.java
+8
-1
demo/android/PaddleMobile_Android/app/src/main/java/com/baidu/paddle/PML.java
...obile_Android/app/src/main/java/com/baidu/paddle/PML.java
+8
-0
src/jni/paddle_mobile_jni.cpp
src/jni/paddle_mobile_jni.cpp
+9
-0
src/jni/paddle_mobile_jni.h
src/jni/paddle_mobile_jni.h
+6
-1
src/operators/math/gemm.cpp
src/operators/math/gemm.cpp
+1224
-590
src/operators/math/gemm.h
src/operators/math/gemm.h
+55
-21
src/operators/math/math_function.cpp
src/operators/math/math_function.cpp
+13
-13
src/operators/math/math_function.h
src/operators/math/math_function.h
+6
-0
未找到文件。
CMakeLists.txt
浏览文件 @
e1351e5e
...
@@ -9,7 +9,6 @@ option(LOG_PROFILE "log profile" ON)
...
@@ -9,7 +9,6 @@ option(LOG_PROFILE "log profile" ON)
option
(
CPU
"armv7 with neon"
ON
)
option
(
CPU
"armv7 with neon"
ON
)
option
(
MALI_GPU
"mali gpu"
OFF
)
option
(
MALI_GPU
"mali gpu"
OFF
)
option
(
FPGA
"fpga"
OFF
)
option
(
FPGA
"fpga"
OFF
)
set
(
DEBUGING ON
)
if
(
ARM_LINUX
)
if
(
ARM_LINUX
)
include
(
"
${
CMAKE_CURRENT_LIST_DIR
}
/tools/arm-platform.cmake"
)
include
(
"
${
CMAKE_CURRENT_LIST_DIR
}
/tools/arm-platform.cmake"
)
...
...
demo/android/PaddleMobile_Android/app/src/main/java/com/baidu/paddle/MainActivity.java
浏览文件 @
e1351e5e
...
@@ -121,7 +121,14 @@ public class MainActivity extends Activity {
...
@@ -121,7 +121,14 @@ public class MainActivity extends Activity {
String
assetPath
=
"pml_demo"
;
String
assetPath
=
"pml_demo"
;
String
sdcardPath
=
Environment
.
getExternalStorageDirectory
()
String
sdcardPath
=
Environment
.
getExternalStorageDirectory
()
+
File
.
separator
+
assetPath
+
File
.
separator
+
type
;
+
File
.
separator
+
assetPath
+
File
.
separator
+
type
;
PML
.
load
(
sdcardPath
);
//PML.load(sdcardPath);
String
modelPath
=
Environment
.
getExternalStorageDirectory
()
+
File
.
separator
+
assetPath
+
File
.
separator
+
"googlenet_combine"
+
File
.
separator
+
"model"
;
String
paramPath
=
Environment
.
getExternalStorageDirectory
()
+
File
.
separator
+
assetPath
+
File
.
separator
+
"googlenet_combine"
+
File
.
separator
+
"params"
;
PML
.
loadCombined
(
modelPath
,
paramPath
);
}
}
});
});
...
...
demo/android/PaddleMobile_Android/app/src/main/java/com/baidu/paddle/PML.java
浏览文件 @
e1351e5e
...
@@ -8,6 +8,14 @@ public class PML {
...
@@ -8,6 +8,14 @@ public class PML {
*/
*/
public
static
native
boolean
load
(
String
modelPath
);
public
static
native
boolean
load
(
String
modelPath
);
/**
* Load
* @param modelPath
* @param paramPath
* @return
*/
public
static
native
boolean
loadCombined
(
String
modelPath
,
String
paramPath
);
/**
/**
* object detection
* object detection
...
...
src/jni/paddle_mobile_jni.cpp
浏览文件 @
e1351e5e
...
@@ -60,6 +60,15 @@ JNIEXPORT jboolean JNICALL Java_com_baidu_paddle_PML_load(JNIEnv *env,
...
@@ -60,6 +60,15 @@ JNIEXPORT jboolean JNICALL Java_com_baidu_paddle_PML_load(JNIEnv *env,
optimize
);
optimize
);
}
}
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_PML_loadCombined
(
JNIEnv
*
env
,
jclass
thiz
,
jstring
modelPath
,
jstring
paramPath
)
{
ANDROIDLOGI
(
"load invoked"
);
bool
optimize
=
true
;
return
getPaddleMobileInstance
()
->
Load
(
jstring2cppstring
(
env
,
modelPath
),
jstring2cppstring
(
env
,
paramPath
),
optimize
);
}
JNIEXPORT
jfloatArray
JNICALL
JNIEXPORT
jfloatArray
JNICALL
Java_com_baidu_paddle_PML_predict
(
JNIEnv
*
env
,
jclass
thiz
,
jfloatArray
buf
)
{
Java_com_baidu_paddle_PML_predict
(
JNIEnv
*
env
,
jclass
thiz
,
jfloatArray
buf
)
{
jfloatArray
result
=
NULL
;
jfloatArray
result
=
NULL
;
...
...
src/jni/paddle_mobile_jni.h
浏览文件 @
e1351e5e
...
@@ -22,11 +22,16 @@ extern "C" {
...
@@ -22,11 +22,16 @@ extern "C" {
namespace
paddle_mobile
{
namespace
paddle_mobile
{
namespace
jni
{
namespace
jni
{
/**
/**
* load
model & params of the net
for android
* load
separated model
for android
*/
*/
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_PML_load
(
JNIEnv
*
env
,
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_PML_load
(
JNIEnv
*
env
,
jclass
thiz
,
jclass
thiz
,
jstring
modelPath
);
jstring
modelPath
);
/**
* load combined model for android
*/
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_PML_loadCombined
(
JNIEnv
*
env
,
jclass
thiz
,
jstring
modelPath
,
jstring
paramPath
);
/**
/**
* object detection for anroid
* object detection for anroid
...
...
src/operators/math/gemm.cpp
浏览文件 @
e1351e5e
此差异已折叠。
点击以展开。
src/operators/math/gemm.h
浏览文件 @
e1351e5e
...
@@ -19,12 +19,8 @@ limitations under the License. */
...
@@ -19,12 +19,8 @@ limitations under the License. */
#define B(i, j) B[(i)*ldb + (j)]
#define B(i, j) B[(i)*ldb + (j)]
#define C(i, j) C[(i)*ldc + (j)]
#define C(i, j) C[(i)*ldc + (j)]
// 分块计算的块大小,mc 与 kc 分别对应分块计算时的 m 与 k
#define MC 128
#define KC 128
#define NC 1024
#define MR 4
#define MR 4
#define NR
4
#define NR
8
#define s_min(i, j) ((i) < (j) ? (i) : (j))
#define s_min(i, j) ((i) < (j) ? (i) : (j))
...
@@ -49,28 +45,66 @@ void PackMatrixB_(int k, int n, int n_tail, const float *B, int ldb,
...
@@ -49,28 +45,66 @@ void PackMatrixB_(int k, int n, int n_tail, const float *B, int ldb,
float
*
buffer
);
float
*
buffer
);
// 分块矩阵乘法
// 分块矩阵乘法
void
InnerKernel
(
int
m
,
int
n
,
int
k
,
float
alpha
,
const
float
*
A
,
int
lda
,
void
InnerKernel
(
int
mc
,
int
nc
,
float
alpha
,
const
float
*
a
,
const
float
*
b
,
const
float
*
B
,
int
ldb
,
float
beta
,
float
*
C
,
int
ldc
,
float
beta
,
float
*
c
,
float
*
C
,
int
ldc
,
bool
relu
);
int
first_time
);
void
InnerKernelWithBn
(
int
mc
,
int
nc
,
float
alpha
,
const
float
*
a
,
const
float
*
b
,
float
beta
,
float
*
c
,
float
*
C
,
int
ldc
,
bool
relu
,
float
*
new_scale
,
float
*
new_bias
);
// 向量矩阵乘法 (M = 1)
// 向量矩阵乘法 (M = 1)
void
VectorKernel
(
int
m
,
int
n
,
int
k
,
float
alpha
,
const
float
*
A
,
int
lda
,
void
VectorKernel
(
int
m
,
int
n
,
int
k
,
float
alpha
,
const
float
*
A
,
int
lda
,
const
float
*
B
,
int
ldb
,
float
beta
,
float
*
C
,
int
ldc
);
const
float
*
B
,
int
ldb
,
float
beta
,
float
*
C
,
int
ldc
,
bool
relu
);
// 计算一个更小的 4 * 4 的 C 矩阵分块
void
AddDot4x4
(
int
k
,
float
alpha
,
const
float
*
A
,
int
lda
,
const
float
*
B
,
void
VectorKernelWithBn
(
int
m
,
int
n
,
int
k
,
float
alpha
,
const
float
*
A
,
int
ldb
,
float
beta
,
float
*
C
,
int
ldc
,
int
mc
,
int
nc
);
int
lda
,
const
float
*
B
,
int
ldb
,
float
beta
,
float
*
C
,
int
ldc
,
bool
relu
,
float
*
new_scale
,
float
*
new_bias
);
void
AddDot4x4_relu
(
int
k
,
float
alpha
,
const
float
*
a
,
int
lda
,
const
float
*
b
,
int
ldb
,
float
beta
,
float
*
C
,
int
ldc
,
int
mc
,
int
nc
,
// 计算一个更小的 C 矩阵分块
bool
relu
);
void
AddDot4x4
(
int
k
,
const
float
*
a
,
const
float
*
b
,
float
*
c
,
int
ldc
);
void
AddDot4x8
(
int
k
,
const
float
*
a
,
const
float
*
b
,
float
*
c
,
int
ldc
);
// 分块矩阵乘法结果回写
// C = A * B
void
WriteBasic
(
int
mc
,
int
nc
,
float
*
c
,
float
*
C
,
int
ldc
);
// C = alpha * A * B + beta * C
void
WriteWithAlphaBeta
(
int
mc
,
int
nc
,
float
*
c
,
float
*
C
,
int
ldc
);
// C = A * B + C
void
WriteWithAdd
(
int
mc
,
int
nc
,
float
*
c
,
float
*
C
,
int
ldc
);
// C = A * B + C, relu(C)
void
WriteWithAddRelu
(
int
mc
,
int
nc
,
float
*
c
,
float
*
C
,
int
ldc
);
// C = A * B, batchnorm(C)
void
WriteWithBn
(
int
mc
,
int
nc
,
float
*
c
,
float
*
C
,
int
ldc
,
float
*
new_scale
,
float
*
new_bias
);
// C = A * B, batchnorm(C), relu(C)
void
WriteWithBnRelu
(
int
mc
,
int
nc
,
float
*
c
,
float
*
C
,
int
ldc
,
float
*
new_scale
,
float
*
new_bias
);
// 向量矩阵乘法结果回写
// C = A * B
void
VecWriteBasic
(
int
n
,
float
*
c
,
float
*
C
,
int
ldc
);
// C = alpha * A * B + beta * C
void
VecWriteWithAlphaBeta
(
int
n
,
float
*
c
,
float
*
C
,
int
ldc
);
// C = A * B + C
void
VecWriteWithAdd
(
int
n
,
float
*
c
,
float
*
C
,
int
ldc
);
// C = A * B + C, relu(C)
void
VecWriteWithAddRelu
(
int
n
,
float
*
c
,
float
*
C
,
int
ldc
);
// C = A * B, batchnorm(C)
void
VecWriteWithBn
(
int
n
,
float
*
c
,
float
*
C
,
int
ldc
,
float
*
new_scale
,
float
*
new_bias
);
// C = A * B, batchnorm(C), relu(C)
void
VecWriteWithBnRelu
(
int
n
,
float
*
c
,
float
*
C
,
int
ldc
,
float
*
new_scale
,
float
*
new_bias
);
// 32位 float 矩阵乘法
// 32位 float 矩阵乘法
void
s
gemm
(
int
m
,
int
n
,
int
k
,
float
alpha
,
const
float
*
A
,
int
lda
,
void
S
gemm
(
int
m
,
int
n
,
int
k
,
float
alpha
,
const
float
*
A
,
int
lda
,
const
float
*
B
,
int
ldb
,
float
beta
,
float
*
C
,
int
ldc
);
const
float
*
B
,
int
ldb
,
float
beta
,
float
*
C
,
int
ldc
,
bool
relu
);
void
sgemm_relu
(
int
m
,
int
n
,
int
k
,
float
alpha
,
const
float
*
A
,
int
lda
,
// 32位 float 矩阵乘法, 并对结果进行 batchnrom
const
float
*
B
,
int
ldb
,
float
beta
,
float
*
C
,
int
ldc
);
void
SgemmWithBn
(
int
m
,
int
n
,
int
k
,
float
alpha
,
const
float
*
A
,
int
lda
,
const
float
*
B
,
int
ldb
,
float
beta
,
float
*
C
,
int
ldc
,
bool
relu
,
float
*
new_scale
,
float
*
new_bias
);
// 64位 double 矩阵乘法
// 64位 double 矩阵乘法
void
dgemm
(
int
m
,
int
n
,
int
k
,
float
alpha
,
const
double
*
A
,
int
lda
,
void
dgemm
(
int
m
,
int
n
,
int
k
,
float
alpha
,
const
double
*
A
,
int
lda
,
...
...
src/operators/math/math_function.cpp
浏览文件 @
e1351e5e
...
@@ -39,22 +39,18 @@ void matmul<float>(const framework::Tensor &matrix_a, bool trans_a,
...
@@ -39,22 +39,18 @@ void matmul<float>(const framework::Tensor &matrix_a, bool trans_a,
int
M
=
dim_out
[
0
];
int
M
=
dim_out
[
0
];
int
N
=
dim_out
[
1
];
int
N
=
dim_out
[
1
];
int
K
=
(
trans_a
==
false
)
?
dim_a
[
1
]
:
dim_a
[
0
];
int
K
=
(
!
trans_a
)
?
dim_a
[
1
]
:
dim_a
[
0
];
if
(
relu
)
{
Sgemm
(
M
,
N
,
K
,
alpha
,
matrix_a
.
data
<
float
>
(),
K
,
matrix_b
.
data
<
float
>
(),
N
,
sgemm_relu
(
M
,
N
,
K
,
alpha
,
matrix_a
.
data
<
float
>
(),
K
,
beta
,
matrix_out
->
data
<
float
>
(),
N
,
relu
);
matrix_b
.
data
<
float
>
(),
N
,
beta
,
matrix_out
->
data
<
float
>
(),
N
);
}
else
{
sgemm
(
M
,
N
,
K
,
alpha
,
matrix_a
.
data
<
float
>
(),
K
,
matrix_b
.
data
<
float
>
(),
N
,
beta
,
matrix_out
->
data
<
float
>
(),
N
);
}
}
}
template
<
>
template
<
>
void
matmul
<
double
>
(
const
framework
::
Tensor
&
matrix_a
,
bool
trans_a
,
void
matmulWithBn
<
float
>
(
const
framework
::
Tensor
&
matrix_a
,
bool
trans_a
,
const
framework
::
Tensor
&
matrix_b
,
bool
trans_b
,
const
framework
::
Tensor
&
matrix_b
,
bool
trans_b
,
double
alpha
,
framework
::
Tensor
*
matrix_out
,
double
beta
,
float
alpha
,
framework
::
Tensor
*
matrix_out
,
float
beta
,
bool
relu
)
{
bool
relu
,
framework
::
Tensor
*
new_scale
,
framework
::
Tensor
*
new_bias
)
{
auto
dim_a
=
matrix_a
.
dims
();
auto
dim_a
=
matrix_a
.
dims
();
auto
dim_b
=
matrix_b
.
dims
();
auto
dim_b
=
matrix_b
.
dims
();
auto
dim_out
=
matrix_out
->
dims
();
auto
dim_out
=
matrix_out
->
dims
();
...
@@ -71,7 +67,11 @@ void matmul<double>(const framework::Tensor &matrix_a, bool trans_a,
...
@@ -71,7 +67,11 @@ void matmul<double>(const framework::Tensor &matrix_a, bool trans_a,
int
M
=
dim_out
[
0
];
int
M
=
dim_out
[
0
];
int
N
=
dim_out
[
1
];
int
N
=
dim_out
[
1
];
int
K
=
(
trans_a
==
false
)
?
dim_a
[
1
]
:
dim_a
[
0
];
int
K
=
(
!
trans_a
)
?
dim_a
[
1
]
:
dim_a
[
0
];
SgemmWithBn
(
M
,
N
,
K
,
alpha
,
matrix_a
.
data
<
float
>
(),
K
,
matrix_b
.
data
<
float
>
(),
N
,
beta
,
matrix_out
->
data
<
float
>
(),
N
,
relu
,
new_scale
->
data
<
float
>
(),
new_bias
->
data
<
float
>
());
}
}
}
// namespace math
}
// namespace math
...
...
src/operators/math/math_function.h
浏览文件 @
e1351e5e
...
@@ -26,6 +26,12 @@ template <typename T>
...
@@ -26,6 +26,12 @@ template <typename T>
void
matmul
(
const
framework
::
Tensor
&
matrix_a
,
bool
trans_a
,
void
matmul
(
const
framework
::
Tensor
&
matrix_a
,
bool
trans_a
,
const
framework
::
Tensor
&
matrix_b
,
bool
trans_b
,
T
alpha
,
const
framework
::
Tensor
&
matrix_b
,
bool
trans_b
,
T
alpha
,
framework
::
Tensor
*
matrix_out
,
T
beta
,
bool
relu
=
false
);
framework
::
Tensor
*
matrix_out
,
T
beta
,
bool
relu
=
false
);
template
<
typename
T
>
void
matmulWithBn
(
const
framework
::
Tensor
&
matrix_a
,
bool
trans_a
,
const
framework
::
Tensor
&
matrix_b
,
bool
trans_b
,
T
alpha
,
framework
::
Tensor
*
matrix_out
,
T
beta
,
bool
relu
,
framework
::
Tensor
*
new_scale
,
framework
::
Tensor
*
new_bias
);
}
// namespace math
}
// namespace math
}
// namespace operators
}
// namespace operators
}
// namespace paddle_mobile
}
// namespace paddle_mobile
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录