Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
c70d0f9b
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
332
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c70d0f9b
编写于
8月 02, 2018
作者:
R
Ruilong Liu
提交者:
GitHub
8月 02, 2018
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #682 from xiebaiyuan/develop
fix
#677
---> add android jni for load qualified models && fix bugs …
上级
c2034998
c2d9b6f2
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
54 addition
and
21 deletion
+54
-21
.gitignore
.gitignore
+2
-1
src/io/loader.cpp
src/io/loader.cpp
+2
-1
src/jni/paddle_mobile_jni.cpp
src/jni/paddle_mobile_jni.cpp
+19
-0
src/jni/paddle_mobile_jni.h
src/jni/paddle_mobile_jni.h
+12
-0
tools/quantification/convert.cpp
tools/quantification/convert.cpp
+19
-19
未找到文件。
.gitignore
浏览文件 @
c70d0f9b
...
...
@@ -75,4 +75,5 @@ cmake-build-release
demo/ios/PaddleMobileDemo/PaddleMobileDemo/googlenet_combine/
demo/ios/PaddleMobileDemo/PaddleMobileDemo/*.jpg
demo/ios/PaddleMobileDemo/PaddleMobileDemo/PaddleMobile/*.a
*.xcuserstate
\ No newline at end of file
*.xcuserstate
/tools/quantification/quantify
src/io/loader.cpp
浏览文件 @
c70d0f9b
...
...
@@ -56,7 +56,8 @@ template <typename Dtype, Precision P>
const
framework
::
Program
<
Dtype
,
P
>
Loader
<
Dtype
,
P
>::
Load
(
const
std
::
string
&
model_path
,
const
std
::
string
&
para_path
,
bool
optimize
,
bool
quantification
)
{
auto
program
=
this
->
LoadProgram
(
model_path
,
optimize
);
auto
program
=
this
->
LoadProgram
(
model_path
,
optimize
,
quantification
);
program
.
para_path
=
para_path
;
program
.
combined
=
true
;
program
.
quantification
=
quantification
;
...
...
src/jni/paddle_mobile_jni.cpp
浏览文件 @
c70d0f9b
...
...
@@ -61,6 +61,15 @@ JNIEXPORT jboolean JNICALL Java_com_baidu_paddle_PML_load(JNIEnv *env,
optimize
);
}
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_PML_loadQualified
(
JNIEnv
*
env
,
jclass
thiz
,
jstring
modelPath
)
{
ANDROIDLOGI
(
"loadQualified invoked"
);
bool
optimize
=
true
;
bool
qualified
=
true
;
return
getPaddleMobileInstance
()
->
Load
(
jstring2cppstring
(
env
,
modelPath
),
optimize
,
qualified
);
}
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_PML_loadCombined
(
JNIEnv
*
env
,
jclass
thiz
,
jstring
modelPath
,
jstring
paramPath
)
{
ANDROIDLOGI
(
"loadCombined invoked"
);
...
...
@@ -70,6 +79,16 @@ JNIEXPORT jboolean JNICALL Java_com_baidu_paddle_PML_loadCombined(
optimize
);
}
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_PML_loadCombinedQualified
(
JNIEnv
*
env
,
jclass
thiz
,
jstring
modelPath
,
jstring
paramPath
)
{
ANDROIDLOGI
(
"loadCombinedQualified invoked"
);
bool
optimize
=
true
;
bool
qualified
=
true
;
return
getPaddleMobileInstance
()
->
Load
(
jstring2cppstring
(
env
,
modelPath
),
jstring2cppstring
(
env
,
paramPath
),
optimize
,
qualified
);
}
JNIEXPORT
jfloatArray
JNICALL
Java_com_baidu_paddle_PML_predictImage
(
JNIEnv
*
env
,
jclass
thiz
,
jfloatArray
buf
,
jintArray
ddims
)
{
ANDROIDLOGI
(
"predictImage invoked"
);
...
...
src/jni/paddle_mobile_jni.h
浏览文件 @
c70d0f9b
...
...
@@ -27,12 +27,24 @@ namespace jni {
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_PML_load
(
JNIEnv
*
env
,
jclass
thiz
,
jstring
modelPath
);
/**
* load separated qualified model for android
*/
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_PML_loadQualified
(
JNIEnv
*
env
,
jclass
thiz
,
jstring
modelPath
);
/**
* load combined model for android
*/
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_PML_loadCombined
(
JNIEnv
*
env
,
jclass
thiz
,
jstring
modelPath
,
jstring
paramPath
);
/**
* load combined qualified model for android
*/
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_PML_loadCombinedQualified
(
JNIEnv
*
env
,
jclass
thiz
,
jstring
modelPath
,
jstring
paramPath
);
/**
* object detection for anroid
*/
...
...
tools/quantification/convert.cpp
浏览文件 @
c70d0f9b
...
...
@@ -68,60 +68,60 @@ std::shared_ptr<ProgramDesc> loadParams(const std::string &model_path) {
}
void
LoadWithDump
(
const
paddle_mobile
::
framework
::
VarDesc
&
var_desc
,
char
*
dataP
,
FILE
*
out_file
)
{
void
LoadWithDump
(
const
paddle_mobile
::
framework
::
VarDesc
&
var_desc
,
char
*
*
dataP
,
FILE
*
out_file
)
{
// 1. version
uint32_t
version
=
*
reinterpret_cast
<
uint32_t
*>
(
dataP
);
uint32_t
version
=
*
reinterpret_cast
<
uint32_t
*>
(
*
dataP
);
// write version
fwrite
(
&
version
,
kSize32
,
1
,
out_file
);
dataP
+=
kSize32
;
*
dataP
+=
kSize32
;
// 2 Lod information
auto
*
lod_level_ptr
=
new
uint64_t
();
memcpy
(
lod_level_ptr
,
dataP
,
kSize64
);
memcpy
(
lod_level_ptr
,
*
dataP
,
kSize64
);
uint64_t
lod_level
=
0
;
// write lod Information
fwrite
(
&
lod_level
,
kSize64
,
1
,
out_file
);
delete
lod_level_ptr
;
dataP
+=
kSize64
;
*
dataP
+=
kSize64
;
for
(
uint64_t
i
=
0
;
i
<
lod_level
;
++
i
)
{
uint64_t
size
=
*
reinterpret_cast
<
uint64_t
*>
(
dataP
);
uint64_t
size
=
*
reinterpret_cast
<
uint64_t
*>
(
*
dataP
);
// write lod size
fwrite
(
&
size
,
kSize64
,
1
,
out_file
);
(
dataP
)
+=
kSize64
;
(
*
dataP
)
+=
kSize64
;
std
::
vector
<
size_t
>
tmp
(
size
/
sizeof
(
size_t
));
for
(
unsigned
long
&
k
:
tmp
)
{
k
=
*
reinterpret_cast
<
size_t
*>
(
dataP
);
(
dataP
)
+=
sizeof
(
size_t
);
k
=
*
reinterpret_cast
<
size_t
*>
(
*
dataP
);
(
*
dataP
)
+=
sizeof
(
size_t
);
}
// write lod size vector
fwrite
(
&
tmp
,
sizeof
(
size_t
),
tmp
.
size
(),
out_file
);
}
// 3. tensor version
uint32_t
tensor_version
=
*
reinterpret_cast
<
uint32_t
*>
(
dataP
);
uint32_t
tensor_version
=
*
reinterpret_cast
<
uint32_t
*>
(
*
dataP
);
// write tensor version
fwrite
(
&
tensor_version
,
kSize32
,
1
,
out_file
);
(
dataP
)
+=
kSize32
;
(
*
dataP
)
+=
kSize32
;
// 4. tensor desc
int32_t
size
=
*
reinterpret_cast
<
int32_t
*>
(
dataP
);
int32_t
size
=
*
reinterpret_cast
<
int32_t
*>
(
*
dataP
);
// write tensor desc
fwrite
(
&
size
,
sizeof
(
int32_t
),
1
,
out_file
);
(
dataP
)
+=
sizeof
(
int32_t
);
(
*
dataP
)
+=
sizeof
(
int32_t
);
std
::
unique_ptr
<
char
[]
>
buf
(
new
char
[
size
]);
for
(
int
m
=
0
;
m
<
size
;
++
m
)
{
buf
.
get
()[
m
]
=
(
dataP
)[
m
];
buf
.
get
()[
m
]
=
(
*
dataP
)[
m
];
}
fwrite
(
buf
.
get
(),
sizeof
(
char
),
static_cast
<
size_t
>
(
size
),
out_file
);
(
dataP
)
+=
(
sizeof
(
char
)
*
size
);
(
*
dataP
)
+=
(
sizeof
(
char
)
*
size
);
const
paddle_mobile
::
framework
::
TensorDesc
&
desc
=
var_desc
.
Tensor_desc
();
int
memory_size
=
1
;
...
...
@@ -158,9 +158,9 @@ void LoadWithDump(const paddle_mobile::framework::VarDesc &var_desc, char *dataP
memory
=
new
char
[
tensorSize
];
for
(
int
n
=
0
;
n
<
tensorSize
;
++
n
)
{
static_cast
<
char
*>
(
memory
)[
n
]
=
(
dataP
)[
n
];
static_cast
<
char
*>
(
memory
)[
n
]
=
(
*
dataP
)[
n
];
}
dataP
+=
tensorSize
;
*
dataP
+=
tensorSize
;
// for float 32
float
min_value
=
std
::
numeric_limits
<
float
>::
max
();
...
...
@@ -194,7 +194,7 @@ quantificate_combined(const std::string &model_path, const std::string ¶m_pa
if
(
var_desc
->
Name
()
==
"feed"
||
var_desc
->
Name
()
==
"fetch"
)
{
continue
;
}
LoadWithDump
(
*
var_desc
,
data
,
out_file
);
LoadWithDump
(
*
var_desc
,
&
data
,
out_file
);
}
}
}
...
...
@@ -220,7 +220,7 @@ void quantificate_seperated(const std::string model_dir, const std::string param
FILE
*
out_file
=
fopen
(
file_name
.
c_str
(),
"wb"
);
char
*
origin_data
=
Get_binary_data
(
model_dir
+
"/"
+
var_desc
->
Name
());
char
*
data
=
origin_data
;
LoadWithDump
(
*
var_desc
,
data
,
out_file
);
LoadWithDump
(
*
var_desc
,
&
data
,
out_file
);
delete
origin_data
;
fclose
(
out_file
);
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录