Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle-Lite
提交
9b15bb3f
P
Paddle-Lite
项目概览
PaddlePaddle
/
Paddle-Lite
通知
331
Star
4
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
271
列表
看板
标记
里程碑
合并请求
78
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle-Lite
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
271
Issue
271
列表
看板
标记
里程碑
合并请求
78
合并请求
78
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
9b15bb3f
编写于
8月 21, 2018
作者:
qnqinan
浏览文件
操作
浏览文件
下载
差异文件
Merge remote-tracking branch 'origin/develop' into develop
上级
0a4363b6
7bece51d
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
262 addition
and
26 deletion
+262
-26
src/jni/PML.java
src/jni/PML.java
+63
-0
src/jni/paddle_mobile_jni.cpp
src/jni/paddle_mobile_jni.cpp
+195
-24
src/jni/paddle_mobile_jni.h
src/jni/paddle_mobile_jni.h
+2
-2
src/operators/feed_op.h
src/operators/feed_op.h
+2
-0
未找到文件。
src/jni/PML.java
0 → 100644
浏览文件 @
9b15bb3f
package
com.baidu.paddle
;
public
class
PML
{
/**
* load seperated model
*
* @param modelDir model dir
* @return isloadsuccess
*/
public
static
native
boolean
load
(
String
modelDir
);
/**
* load combined model
*
* @param modelPath model file path
* @param paramPath param file path
* @return isloadsuccess
*/
public
static
native
boolean
loadCombined
(
String
modelPath
,
String
paramPath
);
/**
* load model and qualified params
*
* @param modelDir qualified model dir
* @return isloadsuccess
*/
public
static
native
boolean
loadQualified
(
String
modelDir
);
/**
* load model and qualified combined params
*
* @param modelPath model file path
* @param paramPath qualified param path
* @return isloadsuccess
*/
public
static
native
boolean
loadCombinedQualified
(
String
modelPath
,
String
paramPath
);
/**
* predict image
*
* @param buf of pretreated image (as your model like)
* @param ddims format of your input
* @return result
*/
public
static
native
float
[]
predictImage
(
float
[]
buf
,
int
[]
ddims
);
public
static
native
float
[]
predictYuv
(
byte
[]
buf
,
int
imgWidth
,
int
imgHeight
,
int
[]
ddims
,
float
[]
meanValues
);
/**
* clear model data
*/
public
static
native
void
clear
();
/**
* setThread num when u enable openmp
*
* @param threadCount threadCount
*/
public
static
native
void
setThread
(
int
threadCount
);
}
src/jni/paddle_mobile_jni.cpp
浏览文件 @
9b15bb3f
...
@@ -20,6 +20,12 @@ limitations under the License. */
...
@@ -20,6 +20,12 @@ limitations under the License. */
#include "framework/tensor.h"
#include "framework/tensor.h"
#include "io/paddle_mobile.h"
#include "io/paddle_mobile.h"
#ifdef ENABLE_EXCEPTION
#include "common/enforce.h"
#endif
#ifdef __cplusplus
#ifdef __cplusplus
extern
"C"
{
extern
"C"
{
#endif
#endif
...
@@ -33,17 +39,10 @@ using std::string;
...
@@ -33,17 +39,10 @@ using std::string;
extern
const
char
*
ANDROID_LOG_TAG
=
extern
const
char
*
ANDROID_LOG_TAG
=
"paddle_mobile LOG built on "
__DATE__
" "
__TIME__
;
"paddle_mobile LOG built on "
__DATE__
" "
__TIME__
;
static
PaddleMobile
<
CPU
>
*
shared_paddle_mobile_instance
=
nullptr
;
paddle_mobile
::
PaddleMobile
<
paddle_mobile
::
CPU
>
paddle_mobile
;
static
std
::
mutex
shared_mutex
;
// toDo mutex lock
PaddleMobile
<
CPU
>
*
getPaddleMobileInstance
()
{
return
&
paddle_mobile
;
}
// static std::mutex shared_mutex;
PaddleMobile
<
CPU
>
*
getPaddleMobileInstance
()
{
if
(
nullptr
==
shared_paddle_mobile_instance
)
{
shared_paddle_mobile_instance
=
new
PaddleMobile
<
CPU
>
();
}
return
shared_paddle_mobile_instance
;
}
string
jstring2cppstring
(
JNIEnv
*
env
,
jstring
jstr
)
{
string
jstring2cppstring
(
JNIEnv
*
env
,
jstring
jstr
)
{
const
char
*
cstr
=
env
->
GetStringUTFChars
(
jstr
,
0
);
const
char
*
cstr
=
env
->
GetStringUTFChars
(
jstr
,
0
);
...
@@ -55,43 +54,111 @@ string jstring2cppstring(JNIEnv *env, jstring jstr) {
...
@@ -55,43 +54,111 @@ string jstring2cppstring(JNIEnv *env, jstring jstr) {
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_PML_load
(
JNIEnv
*
env
,
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_PML_load
(
JNIEnv
*
env
,
jclass
thiz
,
jclass
thiz
,
jstring
modelPath
)
{
jstring
modelPath
)
{
std
::
lock_guard
<
std
::
mutex
>
lock
(
shared_mutex
);
ANDROIDLOGI
(
"load invoked"
);
ANDROIDLOGI
(
"load invoked"
);
bool
optimize
=
true
;
bool
optimize
=
true
;
return
getPaddleMobileInstance
()
->
Load
(
jstring2cppstring
(
env
,
modelPath
),
bool
isLoadOk
=
false
;
#ifdef ENABLE_EXCEPTION
try
{
isLoadOk
=
getPaddleMobileInstance
()
->
Load
(
jstring2cppstring
(
env
,
modelPath
),
optimize
);
}
catch
(
paddle_mobile
::
PaddleMobileException
&
e
)
{
ANDROIDLOGE
(
"jni got an PaddleMobileException! "
,
e
.
what
());
isLoadOk
=
false
;
}
#else
isLoadOk
=
getPaddleMobileInstance
()
->
Load
(
jstring2cppstring
(
env
,
modelPath
),
optimize
);
optimize
);
#endif
return
static_cast
<
jboolean
>
(
isLoadOk
);
}
}
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_PML_loadQualified
(
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_PML_loadQualified
(
JNIEnv
*
env
,
jclass
thiz
,
jstring
modelPath
)
{
JNIEnv
*
env
,
jclass
thiz
,
jstring
modelPath
)
{
std
::
lock_guard
<
std
::
mutex
>
lock
(
shared_mutex
);
ANDROIDLOGI
(
"loadQualified invoked"
);
ANDROIDLOGI
(
"loadQualified invoked"
);
bool
optimize
=
true
;
bool
optimize
=
true
;
bool
qualified
=
true
;
bool
qualified
=
true
;
return
getPaddleMobileInstance
()
->
Load
(
jstring2cppstring
(
env
,
modelPath
),
bool
isLoadOk
=
false
;
#ifdef ENABLE_EXCEPTION
try
{
isLoadOk
=
getPaddleMobileInstance
()
->
Load
(
jstring2cppstring
(
env
,
modelPath
),
optimize
,
qualified
);
}
catch
(
paddle_mobile
::
PaddleMobileException
&
e
)
{
ANDROIDLOGE
(
"jni got an PaddleMobileException! "
,
e
.
what
());
isLoadOk
=
false
;
}
#else
isLoadOk
=
getPaddleMobileInstance
()
->
Load
(
jstring2cppstring
(
env
,
modelPath
),
optimize
,
qualified
);
optimize
,
qualified
);
#endif
return
static_cast
<
jboolean
>
(
isLoadOk
);
}
}
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_PML_loadCombined
(
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_PML_loadCombined
(
JNIEnv
*
env
,
jclass
thiz
,
jstring
modelPath
,
jstring
paramPath
)
{
JNIEnv
*
env
,
jclass
thiz
,
jstring
modelPath
,
jstring
paramPath
)
{
std
::
lock_guard
<
std
::
mutex
>
lock
(
shared_mutex
);
ANDROIDLOGI
(
"loadCombined invoked"
);
ANDROIDLOGI
(
"loadCombined invoked"
);
bool
optimize
=
true
;
bool
optimize
=
true
;
return
getPaddleMobileInstance
()
->
Load
(
jstring2cppstring
(
env
,
modelPath
),
bool
isLoadOk
=
false
;
#ifdef ENABLE_EXCEPTION
try
{
isLoadOk
=
getPaddleMobileInstance
()
->
Load
(
jstring2cppstring
(
env
,
modelPath
),
jstring2cppstring
(
env
,
paramPath
),
optimize
);
}
catch
(
paddle_mobile
::
PaddleMobileException
&
e
)
{
ANDROIDLOGE
(
"jni got an PaddleMobileException! "
,
e
.
what
());
isLoadOk
=
false
;
}
#else
isLoadOk
=
getPaddleMobileInstance
()
->
Load
(
jstring2cppstring
(
env
,
modelPath
),
jstring2cppstring
(
env
,
paramPath
),
jstring2cppstring
(
env
,
paramPath
),
optimize
);
optimize
);
#endif
return
static_cast
<
jboolean
>
(
isLoadOk
);
}
}
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_PML_loadCombinedQualified
(
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_PML_loadCombinedQualified
(
JNIEnv
*
env
,
jclass
thiz
,
jstring
modelPath
,
jstring
paramPath
)
{
JNIEnv
*
env
,
jclass
thiz
,
jstring
modelPath
,
jstring
paramPath
)
{
std
::
lock_guard
<
std
::
mutex
>
lock
(
shared_mutex
);
ANDROIDLOGI
(
"loadCombinedQualified invoked"
);
ANDROIDLOGI
(
"loadCombinedQualified invoked"
);
bool
optimize
=
true
;
bool
optimize
=
true
;
bool
qualified
=
true
;
bool
qualified
=
true
;
return
getPaddleMobileInstance
()
->
Load
(
jstring2cppstring
(
env
,
modelPath
),
bool
isLoadOk
=
false
;
#ifdef ENABLE_EXCEPTION
try
{
isLoadOk
=
getPaddleMobileInstance
()
->
Load
(
jstring2cppstring
(
env
,
modelPath
),
jstring2cppstring
(
env
,
paramPath
),
optimize
,
qualified
);
}
catch
(
paddle_mobile
::
PaddleMobileException
&
e
)
{
ANDROIDLOGE
(
"jni got an PaddleMobileException! "
,
e
.
what
());
isLoadOk
=
false
;
}
#else
isLoadOk
=
getPaddleMobileInstance
()
->
Load
(
jstring2cppstring
(
env
,
modelPath
),
jstring2cppstring
(
env
,
paramPath
),
jstring2cppstring
(
env
,
paramPath
),
optimize
,
qualified
);
optimize
,
qualified
);
#endif
return
static_cast
<
jboolean
>
(
isLoadOk
);
}
}
JNIEXPORT
jfloatArray
JNICALL
Java_com_baidu_paddle_PML_predictImage
(
JNIEXPORT
jfloatArray
JNICALL
Java_com_baidu_paddle_PML_predictImage
(
JNIEnv
*
env
,
jclass
thiz
,
jfloatArray
buf
,
jintArray
ddims
)
{
JNIEnv
*
env
,
jclass
thiz
,
jfloatArray
buf
,
jintArray
ddims
)
{
std
::
lock_guard
<
std
::
mutex
>
lock
(
shared_mutex
);
ANDROIDLOGI
(
"predictImage invoked"
);
ANDROIDLOGI
(
"predictImage invoked"
);
jfloatArray
result
=
NULL
;
#ifdef ENABLE_EXCEPTION
ANDROIDLOGE
(
"ENABLE_EXCEPTION!"
);
try
{
jsize
ddim_size
=
env
->
GetArrayLength
(
ddims
);
jsize
ddim_size
=
env
->
GetArrayLength
(
ddims
);
if
(
ddim_size
!=
4
)
{
if
(
ddim_size
!=
4
)
{
ANDROIDLOGE
(
"ddims size not equal to 4"
);
ANDROIDLOGE
(
"ddims size not equal to 4"
);
...
@@ -100,7 +167,6 @@ JNIEXPORT jfloatArray JNICALL Java_com_baidu_paddle_PML_predictImage(
...
@@ -100,7 +167,6 @@ JNIEXPORT jfloatArray JNICALL Java_com_baidu_paddle_PML_predictImage(
framework
::
DDim
ddim
=
framework
::
make_ddim
(
framework
::
DDim
ddim
=
framework
::
make_ddim
(
{
ddim_ptr
[
0
],
ddim_ptr
[
1
],
ddim_ptr
[
2
],
ddim_ptr
[
3
]});
{
ddim_ptr
[
0
],
ddim_ptr
[
1
],
ddim_ptr
[
2
],
ddim_ptr
[
3
]});
int
length
=
framework
::
product
(
ddim
);
int
length
=
framework
::
product
(
ddim
);
jfloatArray
result
=
NULL
;
int
count
=
0
;
int
count
=
0
;
float
*
dataPointer
=
nullptr
;
float
*
dataPointer
=
nullptr
;
if
(
nullptr
!=
buf
)
{
if
(
nullptr
!=
buf
)
{
...
@@ -112,12 +178,52 @@ JNIEXPORT jfloatArray JNICALL Java_com_baidu_paddle_PML_predictImage(
...
@@ -112,12 +178,52 @@ JNIEXPORT jfloatArray JNICALL Java_com_baidu_paddle_PML_predictImage(
for
(
int
i
=
0
;
i
<
length
;
i
++
)
{
for
(
int
i
=
0
;
i
<
length
;
i
++
)
{
input_ptr
[
i
]
=
dataPointer
[
i
];
input_ptr
[
i
]
=
dataPointer
[
i
];
}
}
auto
output
=
shared_paddle_mobile_instance
->
Predict
(
input
);
auto
output
=
getPaddleMobileInstance
()
->
Predict
(
input
);
count
=
output
->
numel
();
count
=
output
->
numel
();
result
=
env
->
NewFloatArray
(
count
);
result
=
env
->
NewFloatArray
(
count
);
env
->
SetFloatArrayRegion
(
result
,
0
,
count
,
output
->
data
<
float
>
());
env
->
SetFloatArrayRegion
(
result
,
0
,
count
,
output
->
data
<
float
>
());
env
->
ReleaseIntArrayElements
(
ddims
,
ddim_ptr
,
0
);
env
->
ReleaseIntArrayElements
(
ddims
,
ddim_ptr
,
0
);
env
->
DeleteLocalRef
(
ddims
);
env
->
ReleaseFloatArrayElements
(
buf
,
dataPointer
,
0
);
env
->
DeleteLocalRef
(
buf
);
}
catch
(
paddle_mobile
::
PaddleMobileException
&
e
)
{
ANDROIDLOGE
(
"jni got an PaddleMobileException! "
,
e
.
what
());
}
#else
jsize
ddim_size
=
env
->
GetArrayLength
(
ddims
);
if
(
ddim_size
!=
4
)
{
ANDROIDLOGE
(
"ddims size not equal to 4"
);
}
jint
*
ddim_ptr
=
env
->
GetIntArrayElements
(
ddims
,
NULL
);
framework
::
DDim
ddim
=
framework
::
make_ddim
(
{
ddim_ptr
[
0
],
ddim_ptr
[
1
],
ddim_ptr
[
2
],
ddim_ptr
[
3
]});
int
length
=
framework
::
product
(
ddim
);
int
count
=
0
;
float
*
dataPointer
=
nullptr
;
if
(
nullptr
!=
buf
)
{
dataPointer
=
env
->
GetFloatArrayElements
(
buf
,
NULL
);
}
framework
::
Tensor
input
;
input
.
Resize
(
ddim
);
auto
input_ptr
=
input
.
mutable_data
<
float
>
();
for
(
int
i
=
0
;
i
<
length
;
i
++
)
{
input_ptr
[
i
]
=
dataPointer
[
i
];
}
auto
output
=
getPaddleMobileInstance
()
->
Predict
(
input
);
count
=
output
->
numel
();
result
=
env
->
NewFloatArray
(
count
);
env
->
SetFloatArrayRegion
(
result
,
0
,
count
,
output
->
data
<
float
>
());
env
->
ReleaseIntArrayElements
(
ddims
,
ddim_ptr
,
0
);
env
->
DeleteLocalRef
(
ddims
);
env
->
ReleaseFloatArrayElements
(
buf
,
dataPointer
,
0
);
env
->
DeleteLocalRef
(
buf
);
env
->
DeleteLocalRef
(
dataPointer
);
#endif
ANDROIDLOGI
(
"predictImage finished"
);
ANDROIDLOGI
(
"predictImage finished"
);
return
result
;
return
result
;
}
}
...
@@ -170,7 +276,48 @@ void convert_nv21_to_matrix(uint8_t *nv21, float *matrix, int width, int height,
...
@@ -170,7 +276,48 @@ void convert_nv21_to_matrix(uint8_t *nv21, float *matrix, int width, int height,
JNIEXPORT
jfloatArray
JNICALL
Java_com_baidu_paddle_PML_predictYuv
(
JNIEXPORT
jfloatArray
JNICALL
Java_com_baidu_paddle_PML_predictYuv
(
JNIEnv
*
env
,
jclass
thiz
,
jbyteArray
yuv_
,
jint
imgwidth
,
jint
imgHeight
,
JNIEnv
*
env
,
jclass
thiz
,
jbyteArray
yuv_
,
jint
imgwidth
,
jint
imgHeight
,
jintArray
ddims
,
jfloatArray
meanValues
)
{
jintArray
ddims
,
jfloatArray
meanValues
)
{
std
::
lock_guard
<
std
::
mutex
>
lock
(
shared_mutex
);
ANDROIDLOGI
(
"predictYuv invoked"
);
ANDROIDLOGI
(
"predictYuv invoked"
);
jfloatArray
result
=
NULL
;
#ifdef ENABLE_EXCEPTION
try
{
jsize
ddim_size
=
env
->
GetArrayLength
(
ddims
);
if
(
ddim_size
!=
4
)
{
ANDROIDLOGE
(
"ddims size not equal to 4"
);
}
jint
*
ddim_ptr
=
env
->
GetIntArrayElements
(
ddims
,
NULL
);
framework
::
DDim
ddim
=
framework
::
make_ddim
(
{
ddim_ptr
[
0
],
ddim_ptr
[
1
],
ddim_ptr
[
2
],
ddim_ptr
[
3
]});
int
length
=
framework
::
product
(
ddim
);
float
matrix
[
length
];
jbyte
*
yuv
=
env
->
GetByteArrayElements
(
yuv_
,
NULL
);
float
*
meansPointer
=
nullptr
;
if
(
nullptr
!=
meanValues
)
{
meansPointer
=
env
->
GetFloatArrayElements
(
meanValues
,
NULL
);
}
convert_nv21_to_matrix
((
uint8_t
*
)
yuv
,
matrix
,
imgwidth
,
imgHeight
,
ddim
[
3
],
ddim
[
2
],
meansPointer
);
int
count
=
0
;
framework
::
Tensor
input
;
input
.
Resize
(
ddim
);
auto
input_ptr
=
input
.
mutable_data
<
float
>
();
for
(
int
i
=
0
;
i
<
length
;
i
++
)
{
input_ptr
[
i
]
=
matrix
[
i
];
}
auto
output
=
getPaddleMobileInstance
()
->
Predict
(
input
);
count
=
output
->
numel
();
result
=
env
->
NewFloatArray
(
count
);
env
->
SetFloatArrayRegion
(
result
,
0
,
count
,
output
->
data
<
float
>
());
env
->
ReleaseByteArrayElements
(
yuv_
,
yuv
,
0
);
env
->
ReleaseIntArrayElements
(
ddims
,
ddim_ptr
,
0
);
env
->
ReleaseFloatArrayElements
(
meanValues
,
meansPointer
,
0
);
ANDROIDLOGI
(
"predictYuv finished"
);
}
catch
(
paddle_mobile
::
PaddleMobileException
&
e
)
{
ANDROIDLOGE
(
"jni got an PaddleMobileException! "
,
e
.
what
());
}
#else
jsize
ddim_size
=
env
->
GetArrayLength
(
ddims
);
jsize
ddim_size
=
env
->
GetArrayLength
(
ddims
);
if
(
ddim_size
!=
4
)
{
if
(
ddim_size
!=
4
)
{
ANDROIDLOGE
(
"ddims size not equal to 4"
);
ANDROIDLOGE
(
"ddims size not equal to 4"
);
...
@@ -187,7 +334,6 @@ JNIEXPORT jfloatArray JNICALL Java_com_baidu_paddle_PML_predictYuv(
...
@@ -187,7 +334,6 @@ JNIEXPORT jfloatArray JNICALL Java_com_baidu_paddle_PML_predictYuv(
}
}
convert_nv21_to_matrix
((
uint8_t
*
)
yuv
,
matrix
,
imgwidth
,
imgHeight
,
ddim
[
3
],
convert_nv21_to_matrix
((
uint8_t
*
)
yuv
,
matrix
,
imgwidth
,
imgHeight
,
ddim
[
3
],
ddim
[
2
],
meansPointer
);
ddim
[
2
],
meansPointer
);
jfloatArray
result
=
NULL
;
int
count
=
0
;
int
count
=
0
;
framework
::
Tensor
input
;
framework
::
Tensor
input
;
input
.
Resize
(
ddim
);
input
.
Resize
(
ddim
);
...
@@ -195,7 +341,7 @@ JNIEXPORT jfloatArray JNICALL Java_com_baidu_paddle_PML_predictYuv(
...
@@ -195,7 +341,7 @@ JNIEXPORT jfloatArray JNICALL Java_com_baidu_paddle_PML_predictYuv(
for
(
int
i
=
0
;
i
<
length
;
i
++
)
{
for
(
int
i
=
0
;
i
<
length
;
i
++
)
{
input_ptr
[
i
]
=
matrix
[
i
];
input_ptr
[
i
]
=
matrix
[
i
];
}
}
auto
output
=
shared_paddle_mobile_instance
->
Predict
(
input
);
auto
output
=
getPaddleMobileInstance
()
->
Predict
(
input
);
count
=
output
->
numel
();
count
=
output
->
numel
();
result
=
env
->
NewFloatArray
(
count
);
result
=
env
->
NewFloatArray
(
count
);
env
->
SetFloatArrayRegion
(
result
,
0
,
count
,
output
->
data
<
float
>
());
env
->
SetFloatArrayRegion
(
result
,
0
,
count
,
output
->
data
<
float
>
());
...
@@ -203,19 +349,44 @@ JNIEXPORT jfloatArray JNICALL Java_com_baidu_paddle_PML_predictYuv(
...
@@ -203,19 +349,44 @@ JNIEXPORT jfloatArray JNICALL Java_com_baidu_paddle_PML_predictYuv(
env
->
ReleaseIntArrayElements
(
ddims
,
ddim_ptr
,
0
);
env
->
ReleaseIntArrayElements
(
ddims
,
ddim_ptr
,
0
);
env
->
ReleaseFloatArrayElements
(
meanValues
,
meansPointer
,
0
);
env
->
ReleaseFloatArrayElements
(
meanValues
,
meansPointer
,
0
);
ANDROIDLOGI
(
"predictYuv finished"
);
ANDROIDLOGI
(
"predictYuv finished"
);
#endif
return
result
;
return
result
;
}
}
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_PML_setThread
(
JNIEnv
*
env
,
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_PML_setThread
(
JNIEnv
*
env
,
jclass
thiz
,
jclass
thiz
,
jint
threadCount
)
{
jint
threadCount
)
{
std
::
lock_guard
<
std
::
mutex
>
lock
(
shared_mutex
);
ANDROIDLOGI
(
"setThreadCount %d"
,
threadCount
);
ANDROIDLOGI
(
"setThreadCount %d"
,
threadCount
);
#ifdef ENABLE_EXCEPTION
try
{
getPaddleMobileInstance
()
->
SetThreadNum
((
int
)
threadCount
);
getPaddleMobileInstance
()
->
SetThreadNum
((
int
)
threadCount
);
}
catch
(
paddle_mobile
::
PaddleMobileException
&
e
)
{
ANDROIDLOGE
(
"jni got an PaddleMobileException! "
,
e
.
what
());
}
#else
getPaddleMobileInstance
()
->
SetThreadNum
((
int
)
threadCount
);
#endif
}
}
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_PML_clear
(
JNIEnv
*
env
,
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_PML_clear
(
JNIEnv
*
env
,
jclass
thiz
)
{
jclass
thiz
)
{
std
::
lock_guard
<
std
::
mutex
>
lock
(
shared_mutex
);
#ifdef ENABLE_EXCEPTION
try
{
getPaddleMobileInstance
()
->
Clear
();
}
catch
(
paddle_mobile
::
PaddleMobileException
&
e
)
{
ANDROIDLOGE
(
"jni got an PaddleMobileException! "
,
e
.
what
());
}
#else
getPaddleMobileInstance
()
->
Clear
();
getPaddleMobileInstance
()
->
Clear
();
#endif
}
}
}
// namespace jni
}
// namespace jni
...
...
src/jni/paddle_mobile_jni.h
浏览文件 @
9b15bb3f
...
@@ -73,7 +73,7 @@ JNIEXPORT void JNICALL Java_com_baidu_paddle_PML_setThread(JNIEnv *env,
...
@@ -73,7 +73,7 @@ JNIEXPORT void JNICALL Java_com_baidu_paddle_PML_setThread(JNIEnv *env,
/**
/**
* clear data of the net when destroy for android
* clear data of the net when destroy for android
*/
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_PML
L
_clear
(
JNIEnv
*
env
,
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_PML_clear
(
JNIEnv
*
env
,
jclass
thiz
);
jclass
thiz
);
}
// namespace jni
}
// namespace jni
}
// namespace paddle_mobile
}
// namespace paddle_mobile
...
...
src/operators/feed_op.h
浏览文件 @
9b15bb3f
...
@@ -49,6 +49,7 @@ class FeedOp : public framework::OperatorBase<DeviceType> {
...
@@ -49,6 +49,7 @@ class FeedOp : public framework::OperatorBase<DeviceType> {
auto
input_ptr
=
input
->
data
<
float
>
();
auto
input_ptr
=
input
->
data
<
float
>
();
Tensor
*
output
=
param_
.
Out
();
Tensor
*
output
=
param_
.
Out
();
auto
output_ptr
=
output
->
mutable_data
<
half
>
();
auto
output_ptr
=
output
->
mutable_data
<
half
>
();
auto
out_address
=
output
->
fpga_args
().
scale_pointer
();
fpga
::
BypassArgs
args
;
fpga
::
BypassArgs
args
;
args
.
convert_type
=
fpga
::
DATA_FP32_TO_FP16
;
args
.
convert_type
=
fpga
::
DATA_FP32_TO_FP16
;
args
.
layout_type
=
fpga
::
LAYOUT_CHW_TO_HWC
;
args
.
layout_type
=
fpga
::
LAYOUT_CHW_TO_HWC
;
...
@@ -59,6 +60,7 @@ class FeedOp : public framework::OperatorBase<DeviceType> {
...
@@ -59,6 +60,7 @@ class FeedOp : public framework::OperatorBase<DeviceType> {
args
.
image
.
pad_height
=
0
;
args
.
image
.
pad_height
=
0
;
args
.
image
.
pad_width
=
0
;
args
.
image
.
pad_width
=
0
;
args
.
output
.
address
=
output_ptr
;
args
.
output
.
address
=
output_ptr
;
args
.
output
.
scale_address
=
out_address
;
fpga
::
PerformBypass
(
args
);
fpga
::
PerformBypass
(
args
);
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录