Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
238f3c8e
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
238f3c8e
编写于
2月 10, 2022
作者:
C
chenyanlann
提交者:
GitHub
2月 10, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
【PaddlePaddle Hackathon】31. Add Java frontend for Paddle Inference (#37162)
上级
e2ad433b
变更
15
显示空白变更内容
内联
并排
Showing
15 changed file
with
1863 addition
and
0 deletion
+1863
-0
paddle/fluid/inference/experimental/javaapi/CMakeLists.txt
paddle/fluid/inference/experimental/javaapi/CMakeLists.txt
+5
-0
paddle/fluid/inference/experimental/javaapi/build.sh
paddle/fluid/inference/experimental/javaapi/build.sh
+23
-0
paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Config.cpp
...ntal/javaapi/native/com_baidu_paddle_inference_Config.cpp
+257
-0
paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Config.h
...mental/javaapi/native/com_baidu_paddle_inference_Config.h
+285
-0
paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Predictor.cpp
...l/javaapi/native/com_baidu_paddle_inference_Predictor.cpp
+105
-0
paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Predictor.h
...tal/javaapi/native/com_baidu_paddle_inference_Predictor.h
+127
-0
paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Tensor.cpp
...ntal/javaapi/native/com_baidu_paddle_inference_Tensor.cpp
+128
-0
paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Tensor.h
...mental/javaapi/native/com_baidu_paddle_inference_Tensor.h
+159
-0
paddle/fluid/inference/experimental/javaapi/native/jni_convert_util.h
.../inference/experimental/javaapi/native/jni_convert_util.h
+123
-0
paddle/fluid/inference/experimental/javaapi/readme.md
paddle/fluid/inference/experimental/javaapi/readme.md
+93
-0
paddle/fluid/inference/experimental/javaapi/src/main/java/com/baidu/paddle/inference/Config.java
...aapi/src/main/java/com/baidu/paddle/inference/Config.java
+270
-0
paddle/fluid/inference/experimental/javaapi/src/main/java/com/baidu/paddle/inference/Predictor.java
...i/src/main/java/com/baidu/paddle/inference/Predictor.java
+97
-0
paddle/fluid/inference/experimental/javaapi/src/main/java/com/baidu/paddle/inference/Tensor.java
...aapi/src/main/java/com/baidu/paddle/inference/Tensor.java
+101
-0
paddle/fluid/inference/experimental/javaapi/test.java
paddle/fluid/inference/experimental/javaapi/test.java
+67
-0
paddle/fluid/inference/experimental/javaapi/test.sh
paddle/fluid/inference/experimental/javaapi/test.sh
+23
-0
未找到文件。
paddle/fluid/inference/experimental/javaapi/CMakeLists.txt
0 → 100644
浏览文件 @
238f3c8e
include_directories
(
$ENV{jni_path} $ENV{jni_sub_path} $ENV{paddle_path}
)
find_library
(
PADDLE_INFERENCE_C libpaddle_inference_c.so HINTS $ENV{paddle_inference_lib}
)
aux_source_directory
(
native JNI_SRCS
)
add_library
(
paddle_inference SHARED
${
JNI_SRCS
}
)
target_link_libraries
(
paddle_inference
${
PADDLE_INFERENCE_C
}
)
paddle/fluid/inference/experimental/javaapi/build.sh
0 → 100755
浏览文件 @
238f3c8e
#!/bin/bash
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
mkdir
build
&&
cd
build
export
library_path
=
$1
export
jni_path
=
$2
export
jni_sub_path
=
$3
mkldnn_lib
=
$library_path
"/third_party/install/mkldnn/lib"
mklml_lib
=
$library_path
"/third_party/install/mklml/lib"
export
paddle_inference_lib
=
$library_path
"/paddle/lib"
export
paddle_path
=
$library_path
"/paddle/include"
export
LD_LIBRARY_PATH
=
mkldnn_lib:mklml_lib:paddle_inference_lib
cmake ..
&&
make
#g++ -fPIC -D_REENTRANT -I $jni_path -I $jni_sub_path -I $paddle_path -L $paddle_inference_lib -c com_baidu_paddle_inference_Predictor.cpp com_baidu_paddle_inference_Config.cpp com_baidu_paddle_inference_Tensor.cpp
#g++ -shared -I $paddle_path -L $paddle_inference_lib com_baidu_paddle_inference_Config.o com_baidu_paddle_inference_Predictor.o com_baidu_paddle_inference_Tensor.o -o libpaddle_inference.so -lpaddle_inference_c
cd
../src/main/java/com/baidu/paddle/inference
javac Config.java Predictor.java Tensor.java
cd
../../../../../../../
cp
./native/libpaddle_inference.so libpaddle_inference.so
pwd
jar cvf JavaInference.jar
-C
src/main/java/
.
paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Config.cpp
0 → 100644
浏览文件 @
238f3c8e
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "com_baidu_paddle_inference_Config.h"
#include <iostream>
#include "jni_convert_util.h" // NOLINT
#include "pd_inference_api.h" // NOLINT
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_cppConfigDestroy
(
JNIEnv
*
,
jobject
,
jlong
cppPaddleConfigPointer
)
{
PD_ConfigDestroy
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
));
}
// 1. create Config
JNIEXPORT
jlong
JNICALL
Java_com_baidu_paddle_inference_Config_createCppConfig
(
JNIEnv
*
env
,
jobject
obj
)
{
jlong
cppPaddleConfigPointer
=
reinterpret_cast
<
jlong
>
(
PD_ConfigCreate
());
return
cppPaddleConfigPointer
;
}
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_inference_Config_isCppConfigValid
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
)
{
bool
flag
=
PD_ConfigIsValid
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
));
return
cpp_bool_to_jboolean
(
env
,
flag
);
}
// 2. not combined model settings
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_setCppModel
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
,
jstring
modelFile
,
jstring
paramsFile
)
{
PD_ConfigSetModel
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
),
jstring_to_cpp_string
(
env
,
modelFile
).
c_str
(),
jstring_to_cpp_string
(
env
,
paramsFile
).
c_str
());
}
// 3. combined model settings
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_setCppModelDir
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
,
jstring
modelDir
)
{
PD_ConfigSetModelDir
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
),
jstring_to_cpp_string
(
env
,
modelDir
).
c_str
());
}
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_setCppProgFile
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
,
jstring
progFile
)
{
PD_ConfigSetProgFile
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
),
jstring_to_cpp_string
(
env
,
progFile
).
c_str
());
}
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_setCppParamsFile
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
,
jstring
paramsFile
)
{
PD_ConfigSetParamsFile
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
),
jstring_to_cpp_string
(
env
,
paramsFile
).
c_str
());
}
JNIEXPORT
jstring
JNICALL
Java_com_baidu_paddle_inference_Config_modelDir
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
)
{
return
cpp_string_to_jstring
(
env
,
PD_ConfigGetModelDir
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
)));
}
JNIEXPORT
jstring
JNICALL
Java_com_baidu_paddle_inference_Config_progFile
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
)
{
return
cpp_string_to_jstring
(
env
,
PD_ConfigGetProgFile
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
)));
}
JNIEXPORT
jstring
JNICALL
Java_com_baidu_paddle_inference_Config_paramsFile
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
)
{
return
cpp_string_to_jstring
(
env
,
PD_ConfigGetParamsFile
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
)));
}
// 4. cpu settings
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_setCpuMathLibraryNumThreads
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
,
jint
mathThreadsNum
)
{
int
math_threads_num
=
reinterpret_cast
<
int
>
(
mathThreadsNum
);
PD_ConfigSetCpuMathLibraryNumThreads
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
),
math_threads_num
);
}
JNIEXPORT
jint
JNICALL
Java_com_baidu_paddle_inference_Config_cpuMathLibraryNumThreads
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
)
{
jint
mathThreadsNum
=
reinterpret_cast
<
jint
>
(
PD_ConfigGetCpuMathLibraryNumThreads
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
)));
return
mathThreadsNum
;
}
// 5. MKLDNN settings
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_enableMKLDNN
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
)
{
PD_ConfigEnableMKLDNN
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
));
}
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_inference_Config_mkldnnEnabled
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
)
{
bool
flag
=
PD_ConfigMkldnnEnabled
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
));
return
cpp_bool_to_jboolean
(
env
,
flag
);
}
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_enableMkldnnBfloat16
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
)
{
PD_ConfigEnableMkldnnBfloat16
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
));
}
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_inference_Config_mkldnnBfloat16Enabled
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
)
{
bool
flag
=
PD_ConfigMkldnnBfloat16Enabled
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
));
return
cpp_bool_to_jboolean
(
env
,
flag
);
}
// 6. gpu setting
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_enableUseGpu
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
,
jlong
memorySize
,
jint
deviceId
)
{
PD_ConfigEnableUseGpu
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
),
(
uint64_t
)
memorySize
,
(
int32_t
)
deviceId
);
}
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_disableGpu
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
)
{
PD_ConfigDisableGpu
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
));
}
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_inference_Config_useGpu
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
)
{
bool
flag
=
PD_ConfigUseGpu
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
));
return
cpp_bool_to_jboolean
(
env
,
flag
);
}
JNIEXPORT
jint
JNICALL
Java_com_baidu_paddle_inference_Config_gpuDeviceId
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
)
{
int
device_id
=
PD_ConfigGpuDeviceId
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
));
return
reinterpret_cast
<
jint
>
(
device_id
);
}
JNIEXPORT
jint
JNICALL
Java_com_baidu_paddle_inference_Config_memoryPoolInitSizeMb
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
)
{
int
memory_pool_init_size_mb
=
PD_ConfigMemoryPoolInitSizeMb
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
));
return
reinterpret_cast
<
jint
>
(
memory_pool_init_size_mb
);
}
JNIEXPORT
jfloat
JNICALL
Java_com_baidu_paddle_inference_Config_fractionOfGpuMemoryForPool
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
)
{
float
fraction_of_gpuMemory_for_pool
=
PD_ConfigFractionOfGpuMemoryForPool
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
));
return
(
jfloat
)
fraction_of_gpuMemory_for_pool
;
}
// 7. TensorRT To Do
// 8. optim setting
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_switchIrOptim
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
,
jboolean
flag
)
{
PD_ConfigSwitchIrOptim
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
),
jboolean_to_cpp_bool
(
env
,
flag
));
}
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_inference_Config_irOptim
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
)
{
bool
flag
=
PD_ConfigIrOptim
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
));
return
cpp_bool_to_jboolean
(
env
,
flag
);
}
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_switchIrDebug
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
,
jboolean
flag
)
{
PD_ConfigSwitchIrDebug
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
),
jboolean_to_cpp_bool
(
env
,
flag
));
}
// 9. enable memory optimization
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_enableMemoryOptim
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
,
jboolean
flag
)
{
PD_ConfigEnableMemoryOptim
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
),
jboolean_to_cpp_bool
(
env
,
flag
));
}
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_inference_Config_memoryOptimEnabled
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
)
{
bool
flag
=
PD_ConfigMemoryOptimEnabled
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
));
return
cpp_bool_to_jboolean
(
env
,
flag
);
}
// 10. profile setting
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_enableProfile
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
)
{
PD_ConfigEnableProfile
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
));
}
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_inference_Config_profileEnabled
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
)
{
bool
flag
=
PD_ConfigProfileEnabled
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
));
return
cpp_bool_to_jboolean
(
env
,
flag
);
}
// 11. log setting
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_disableGlogInfo
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
)
{
PD_ConfigDisableGlogInfo
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
));
}
// 12. view config configuration
JNIEXPORT
jstring
JNICALL
Java_com_baidu_paddle_inference_Config_summary
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddleConfigPointer
)
{
return
cpp_string_to_jstring
(
env
,
PD_ConfigSummary
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddleConfigPointer
)));
}
paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Config.h
0 → 100644
浏览文件 @
238f3c8e
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
/* Header for class com_baidu_paddle_inference_Config */
#ifndef PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_CONFIG_H_
#define PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_CONFIG_H_
#ifdef __cplusplus
extern
"C"
{
#endif
/*
* Class: com_baidu_paddle_inference_Config
* Method: cppConfigDestroy
* Signature: (J)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_cppConfigDestroy
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: createCppConfig
* Signature: ()J
*/
JNIEXPORT
jlong
JNICALL
Java_com_baidu_paddle_inference_Config_createCppConfig
(
JNIEnv
*
,
jobject
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: isCppConfigValid
* Signature: (J)Z
*/
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_inference_Config_isCppConfigValid
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: setCppModel
* Signature: (JLjava/lang/String;Ljava/lang/String;)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_setCppModel
(
JNIEnv
*
,
jobject
,
jlong
,
jstring
,
jstring
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: setCppModelDir
* Signature: (JLjava/lang/String;)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_setCppModelDir
(
JNIEnv
*
,
jobject
,
jlong
,
jstring
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: setCppProgFile
* Signature: (JLjava/lang/String;)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_setCppProgFile
(
JNIEnv
*
,
jobject
,
jlong
,
jstring
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: setCppParamsFile
* Signature: (JLjava/lang/String;)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_setCppParamsFile
(
JNIEnv
*
,
jobject
,
jlong
,
jstring
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: modelDir
* Signature: (J)Ljava/lang/String;
*/
JNIEXPORT
jstring
JNICALL
Java_com_baidu_paddle_inference_Config_modelDir
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: progFile
* Signature: (J)Ljava/lang/String;
*/
JNIEXPORT
jstring
JNICALL
Java_com_baidu_paddle_inference_Config_progFile
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: paramsFile
* Signature: (J)Ljava/lang/String;
*/
JNIEXPORT
jstring
JNICALL
Java_com_baidu_paddle_inference_Config_paramsFile
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: setCpuMathLibraryNumThreads
* Signature: (JI)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_setCpuMathLibraryNumThreads
(
JNIEnv
*
,
jobject
,
jlong
,
jint
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: cpuMathLibraryNumThreads
* Signature: (J)I
*/
JNIEXPORT
jint
JNICALL
Java_com_baidu_paddle_inference_Config_cpuMathLibraryNumThreads
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: enableMKLDNN
* Signature: (J)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_enableMKLDNN
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: mkldnnEnabled
* Signature: (J)Z
*/
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_inference_Config_mkldnnEnabled
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: enableMkldnnBfloat16
* Signature: (J)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_enableMkldnnBfloat16
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: mkldnnBfloat16Enabled
* Signature: (J)Z
*/
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_inference_Config_mkldnnBfloat16Enabled
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: enableUseGpu
* Signature: (JJI)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_enableUseGpu
(
JNIEnv
*
,
jobject
,
jlong
,
jlong
,
jint
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: disableGpu
* Signature: (J)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_disableGpu
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: useGpu
* Signature: (J)Z
*/
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_inference_Config_useGpu
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: gpuDeviceId
* Signature: (J)I
*/
JNIEXPORT
jint
JNICALL
Java_com_baidu_paddle_inference_Config_gpuDeviceId
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: memoryPoolInitSizeMb
* Signature: (J)I
*/
JNIEXPORT
jint
JNICALL
Java_com_baidu_paddle_inference_Config_memoryPoolInitSizeMb
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: fractionOfGpuMemoryForPool
* Signature: (J)F
*/
JNIEXPORT
jfloat
JNICALL
Java_com_baidu_paddle_inference_Config_fractionOfGpuMemoryForPool
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: switchIrOptim
* Signature: (JZ)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_switchIrOptim
(
JNIEnv
*
,
jobject
,
jlong
,
jboolean
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: irOptim
* Signature: (J)Z
*/
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_inference_Config_irOptim
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: switchIrDebug
* Signature: (JZ)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_switchIrDebug
(
JNIEnv
*
,
jobject
,
jlong
,
jboolean
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: enableMemoryOptim
* Signature: (JZ)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_enableMemoryOptim
(
JNIEnv
*
,
jobject
,
jlong
,
jboolean
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: memoryOptimEnabled
* Signature: (J)Z
*/
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_inference_Config_memoryOptimEnabled
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: enableProfile
* Signature: (J)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_enableProfile
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: profileEnabled
* Signature: (J)Z
*/
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_inference_Config_profileEnabled
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: disableGlogInfo
* Signature: (J)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Config_disableGlogInfo
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Config
* Method: summary
* Signature: (J)Ljava/lang/String;
*/
JNIEXPORT
jstring
JNICALL
Java_com_baidu_paddle_inference_Config_summary
(
JNIEnv
*
,
jobject
,
jlong
);
#ifdef __cplusplus
}
#endif
#endif // PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_CONFIG_H_
paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Predictor.cpp
0 → 100644
浏览文件 @
238f3c8e
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "com_baidu_paddle_inference_Predictor.h"
#include <jni.h>
#include "jni_convert_util.h" // NOLINT
#include "pd_inference_api.h" // NOLINT
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Predictor_cppPredictorDestroy
(
JNIEnv
*
,
jobject
,
jlong
cppPaddlePredictorPointer
)
{
PD_PredictorDestroy
(
reinterpret_cast
<
PD_Predictor
*>
(
cppPaddlePredictorPointer
));
}
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Predictor_predictorTryShrinkMemory
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddlePredictorPointer
)
{
PD_PredictorTryShrinkMemory
(
reinterpret_cast
<
PD_Predictor
*>
(
cppPaddlePredictorPointer
));
}
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Predictor_predictorClearIntermediateTensor
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddlePredictorPointer
)
{
PD_PredictorClearIntermediateTensor
(
reinterpret_cast
<
PD_Predictor
*>
(
cppPaddlePredictorPointer
));
}
JNIEXPORT
jlong
JNICALL
Java_com_baidu_paddle_inference_Predictor_createPredictor
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddlePredictorPointer
)
{
return
(
jlong
)
PD_PredictorCreate
(
reinterpret_cast
<
PD_Config
*>
(
cppPaddlePredictorPointer
));
}
JNIEXPORT
jlong
JNICALL
Java_com_baidu_paddle_inference_Predictor_getInputNum
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddlePredictorPointer
)
{
return
(
jlong
)
PD_PredictorGetInputNum
(
reinterpret_cast
<
PD_Predictor
*>
(
cppPaddlePredictorPointer
));
}
JNIEXPORT
jlong
JNICALL
Java_com_baidu_paddle_inference_Predictor_getOutputNum
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddlePredictorPointer
)
{
return
(
jlong
)
PD_PredictorGetOutputNum
(
reinterpret_cast
<
PD_Predictor
*>
(
cppPaddlePredictorPointer
));
}
JNIEXPORT
jstring
JNICALL
Java_com_baidu_paddle_inference_Predictor_getInputNameByIndex
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddlePredictorPointer
,
jlong
index
)
{
const
char
*
c_str
=
PD_PredictorGetInputNames
(
reinterpret_cast
<
PD_Predictor
*>
(
cppPaddlePredictorPointer
))
->
data
[
static_cast
<
int
>
(
index
)];
return
env
->
NewStringUTF
(
c_str
);
}
JNIEXPORT
jstring
JNICALL
Java_com_baidu_paddle_inference_Predictor_getOutputNameByIndex
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddlePredictorPointer
,
jlong
index
)
{
const
char
*
c_str
=
PD_PredictorGetOutputNames
(
reinterpret_cast
<
PD_Predictor
*>
(
cppPaddlePredictorPointer
))
->
data
[
static_cast
<
int
>
(
index
)];
return
env
->
NewStringUTF
(
c_str
);
}
JNIEXPORT
jlong
JNICALL
Java_com_baidu_paddle_inference_Predictor_getInputHandleByName
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddlePredictorPointer
,
jstring
name
)
{
// const char* input_name = env->GetStringUTFChars(name, 0);
PD_Predictor
*
pd_predictor
=
reinterpret_cast
<
PD_Predictor
*>
(
cppPaddlePredictorPointer
);
jlong
output_tensor
=
(
jlong
)
PD_PredictorGetInputHandle
(
pd_predictor
,
jstring_to_cpp_string
(
env
,
name
).
c_str
());
return
output_tensor
;
}
JNIEXPORT
jlong
JNICALL
Java_com_baidu_paddle_inference_Predictor_getOutputHandleByName
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddlePredictorPointer
,
jstring
name
)
{
// const char* output_name = env->GetStringUTFChars(name, 0);
PD_Predictor
*
pd_predictor
=
reinterpret_cast
<
PD_Predictor
*>
(
cppPaddlePredictorPointer
);
jlong
output_tensor
=
(
jlong
)
PD_PredictorGetOutputHandle
(
pd_predictor
,
jstring_to_cpp_string
(
env
,
name
).
c_str
());
return
output_tensor
;
}
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_inference_Predictor_runPD
(
JNIEnv
*
env
,
jobject
obj
,
jlong
cppPaddlePredictorPointer
)
{
return
(
jboolean
)
PD_PredictorRun
(
reinterpret_cast
<
PD_Predictor
*>
(
cppPaddlePredictorPointer
));
}
paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Predictor.h
0 → 100644
浏览文件 @
238f3c8e
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
/* Header for class com_baidu_paddle_inference_Predictor */
#ifndef PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_PREDICTOR_H_
#define PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_PREDICTOR_H_
#ifdef __cplusplus
extern
"C"
{
#endif
/*
* Class: com_baidu_paddle_inference_Predictor
* Method: cppPredictorDestroy
* Signature: (J)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Predictor_cppPredictorDestroy
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Predictor
* Method: predictorTryShrinkMemory
* Signature: (J)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Predictor_predictorTryShrinkMemory
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Predictor
* Method: predictorClearIntermediateTensor
* Signature: (J)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Predictor_predictorClearIntermediateTensor
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Predictor
* Method: createPredictor
* Signature: (J)J
*/
JNIEXPORT
jlong
JNICALL
Java_com_baidu_paddle_inference_Predictor_createPredictor
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Predictor
* Method: getInputNum
* Signature: (J)J
*/
JNIEXPORT
jlong
JNICALL
Java_com_baidu_paddle_inference_Predictor_getInputNum
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Predictor
* Method: getOutputNum
* Signature: (J)J
*/
JNIEXPORT
jlong
JNICALL
Java_com_baidu_paddle_inference_Predictor_getOutputNum
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Predictor
* Method: getInputNameByIndex
* Signature: (JJ)Ljava/lang/String;
*/
JNIEXPORT
jstring
JNICALL
Java_com_baidu_paddle_inference_Predictor_getInputNameByIndex
(
JNIEnv
*
,
jobject
,
jlong
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Predictor
* Method: getOutputNameByIndex
* Signature: (JJ)Ljava/lang/String;
*/
JNIEXPORT
jstring
JNICALL
Java_com_baidu_paddle_inference_Predictor_getOutputNameByIndex
(
JNIEnv
*
,
jobject
,
jlong
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Predictor
* Method: getInputHandleByName
* Signature: (JLjava/lang/String;)J
*/
JNIEXPORT
jlong
JNICALL
Java_com_baidu_paddle_inference_Predictor_getInputHandleByName
(
JNIEnv
*
,
jobject
,
jlong
,
jstring
);
/*
* Class: com_baidu_paddle_inference_Predictor
* Method: getOutputHandleByName
* Signature: (JLjava/lang/String;)J
*/
JNIEXPORT
jlong
JNICALL
Java_com_baidu_paddle_inference_Predictor_getOutputHandleByName
(
JNIEnv
*
,
jobject
,
jlong
,
jstring
);
/*
* Class: com_baidu_paddle_inference_Predictor
* Method: runPD
* Signature: (J)Z
*/
JNIEXPORT
jboolean
JNICALL
Java_com_baidu_paddle_inference_Predictor_runPD
(
JNIEnv
*
,
jobject
,
jlong
);
#ifdef __cplusplus
}
#endif
#endif // PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_PREDICTOR_H_
paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Tensor.cpp
0 → 100644
浏览文件 @
238f3c8e
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "com_baidu_paddle_inference_Tensor.h"
#include <jni.h>
#include "pd_inference_api.h" // NOLINT
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorDestroy
(
JNIEnv
*
,
jobject
,
jlong
tensorPointer
)
{
PD_TensorDestroy
(
reinterpret_cast
<
PD_Tensor
*>
(
tensorPointer
));
}
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorReshape
(
JNIEnv
*
env
,
jobject
,
jlong
tensorPointer
,
jint
dim
,
jintArray
array
)
{
int32_t
*
input_shape
=
env
->
GetIntArrayElements
(
array
,
nullptr
);
PD_TensorReshape
(
reinterpret_cast
<
PD_Tensor
*>
(
tensorPointer
),
static_cast
<
int
>
(
dim
),
input_shape
);
env
->
ReleaseIntArrayElements
(
array
,
input_shape
,
JNI_ABORT
);
}
JNIEXPORT
jintArray
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorGetShape
(
JNIEnv
*
env
,
jobject
,
jlong
tensorPointer
)
{
PD_Tensor
*
tensor
=
reinterpret_cast
<
PD_Tensor
*>
(
tensorPointer
);
PD_OneDimArrayInt32
*
output_shape
=
PD_TensorGetShape
(
tensor
);
jintArray
result
=
env
->
NewIntArray
(
output_shape
->
size
);
env
->
SetIntArrayRegion
(
result
,
0
,
output_shape
->
size
,
output_shape
->
data
);
return
result
;
}
JNIEXPORT
jstring
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorGetName
(
JNIEnv
*
env
,
jobject
,
jlong
tensorPointer
)
{
const
char
*
c_str
=
PD_TensorGetName
(
reinterpret_cast
<
PD_Tensor
*>
(
tensorPointer
));
return
env
->
NewStringUTF
(
c_str
);
}
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuFloat
(
JNIEnv
*
env
,
jobject
,
jlong
tensorPointer
,
jfloatArray
array
)
{
float
*
data
=
env
->
GetFloatArrayElements
(
array
,
nullptr
);
PD_TensorCopyFromCpuFloat
(
reinterpret_cast
<
PD_Tensor
*>
(
tensorPointer
),
data
);
env
->
ReleaseFloatArrayElements
(
array
,
data
,
JNI_ABORT
);
}
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuInt
(
JNIEnv
*
env
,
jobject
,
jlong
tensorPointer
,
jintArray
array
)
{
int32_t
*
data
=
env
->
GetIntArrayElements
(
array
,
nullptr
);
PD_TensorCopyFromCpuInt32
(
reinterpret_cast
<
PD_Tensor
*>
(
tensorPointer
),
data
);
env
->
ReleaseIntArrayElements
(
array
,
data
,
0
);
}
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuLong
(
JNIEnv
*
env
,
jobject
,
jlong
tensorPointer
,
jlongArray
array
)
{
int64_t
*
data
=
env
->
GetLongArrayElements
(
array
,
nullptr
);
PD_TensorCopyFromCpuInt64
(
reinterpret_cast
<
PD_Tensor
*>
(
tensorPointer
),
data
);
env
->
ReleaseLongArrayElements
(
array
,
data
,
0
);
}
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuByte
(
JNIEnv
*
env
,
jobject
,
jlong
tensorPointer
,
jbyteArray
array
)
{
int8_t
*
data
=
env
->
GetByteArrayElements
(
array
,
nullptr
);
PD_TensorCopyFromCpuInt8
(
reinterpret_cast
<
PD_Tensor
*>
(
tensorPointer
),
data
);
env
->
ReleaseByteArrayElements
(
array
,
data
,
0
);
}
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuBoolean
(
JNIEnv
*
env
,
jobject
,
jlong
tensorPointer
,
jbooleanArray
array
)
{
uint8_t
*
data
=
env
->
GetBooleanArrayElements
(
array
,
nullptr
);
PD_TensorCopyFromCpuUint8
(
reinterpret_cast
<
PD_Tensor
*>
(
tensorPointer
),
data
);
env
->
ReleaseBooleanArrayElements
(
array
,
data
,
0
);
}
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuFloat
(
JNIEnv
*
env
,
jobject
,
jlong
tensorPointer
,
jfloatArray
array
)
{
float
*
data
=
env
->
GetFloatArrayElements
(
array
,
nullptr
);
PD_TensorCopyToCpuFloat
(
reinterpret_cast
<
PD_Tensor
*>
(
tensorPointer
),
data
);
env
->
ReleaseFloatArrayElements
(
array
,
data
,
0
);
}
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuInt
(
JNIEnv
*
env
,
jobject
,
jlong
tensorPointer
,
jintArray
array
)
{
int32_t
*
data
=
env
->
GetIntArrayElements
(
array
,
nullptr
);
PD_TensorCopyToCpuInt32
(
reinterpret_cast
<
PD_Tensor
*>
(
tensorPointer
),
data
);
env
->
ReleaseIntArrayElements
(
array
,
data
,
0
);
}
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuLong
(
JNIEnv
*
env
,
jobject
,
jlong
tensorPointer
,
jlongArray
array
)
{
int64_t
*
data
=
env
->
GetLongArrayElements
(
array
,
nullptr
);
PD_TensorCopyToCpuInt64
(
reinterpret_cast
<
PD_Tensor
*>
(
tensorPointer
),
data
);
env
->
ReleaseLongArrayElements
(
array
,
data
,
0
);
}
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuByte
(
JNIEnv
*
env
,
jobject
,
jlong
tensorPointer
,
jbyteArray
array
)
{
int8_t
*
data
=
env
->
GetByteArrayElements
(
array
,
nullptr
);
PD_TensorCopyToCpuInt8
(
reinterpret_cast
<
PD_Tensor
*>
(
tensorPointer
),
data
);
env
->
ReleaseByteArrayElements
(
array
,
data
,
0
);
}
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuBoolean
(
JNIEnv
*
env
,
jobject
,
jlong
tensorPointer
,
jbooleanArray
array
)
{
uint8_t
*
data
=
env
->
GetBooleanArrayElements
(
array
,
nullptr
);
PD_TensorCopyToCpuUint8
(
reinterpret_cast
<
PD_Tensor
*>
(
tensorPointer
),
data
);
env
->
ReleaseBooleanArrayElements
(
array
,
data
,
0
);
}
paddle/fluid/inference/experimental/javaapi/native/com_baidu_paddle_inference_Tensor.h
0 → 100644
浏览文件 @
238f3c8e
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* DO NOT EDIT THIS FILE - it is machine generated */
#include <jni.h>
/* Header for class com_baidu_paddle_inference_Tensor */
#ifndef PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_TENSOR_H_
#define PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_TENSOR_H_
#ifdef __cplusplus
extern
"C"
{
#endif
/*
* Class: com_baidu_paddle_inference_Tensor
* Method: cppTensorDestroy
* Signature: (J)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorDestroy
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Tensor
* Method: cppTensorReshape
* Signature: (JI[I)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorReshape
(
JNIEnv
*
,
jobject
,
jlong
,
jint
,
jintArray
);
/*
* Class: com_baidu_paddle_inference_Tensor
* Method: cppTensorGetShape
* Signature: (J)[I
*/
JNIEXPORT
jintArray
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorGetShape
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Tensor
* Method: cppTensorGetName
* Signature: (J)Ljava/lang/String;
*/
JNIEXPORT
jstring
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorGetName
(
JNIEnv
*
,
jobject
,
jlong
);
/*
* Class: com_baidu_paddle_inference_Tensor
* Method: cppTensorCopyFromCpuFloat
* Signature: (J[F)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuFloat
(
JNIEnv
*
,
jobject
,
jlong
,
jfloatArray
);
/*
* Class: com_baidu_paddle_inference_Tensor
* Method: cppTensorCopyFromCpuInt
* Signature: (J[I)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuInt
(
JNIEnv
*
,
jobject
,
jlong
,
jintArray
);
/*
* Class: com_baidu_paddle_inference_Tensor
* Method: cppTensorCopyFromCpuLong
* Signature: (J[J)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuLong
(
JNIEnv
*
,
jobject
,
jlong
,
jlongArray
);
/*
* Class: com_baidu_paddle_inference_Tensor
* Method: cppTensorCopyFromCpuByte
* Signature: (J[B)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuByte
(
JNIEnv
*
,
jobject
,
jlong
,
jbyteArray
);
/*
* Class: com_baidu_paddle_inference_Tensor
* Method: cppTensorCopyFromCpuBoolean
* Signature: (J[Z)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyFromCpuBoolean
(
JNIEnv
*
,
jobject
,
jlong
,
jbooleanArray
);
/*
* Class: com_baidu_paddle_inference_Tensor
* Method: cppTensorCopyToCpuFloat
* Signature: (J[F)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuFloat
(
JNIEnv
*
,
jobject
,
jlong
,
jfloatArray
);
/*
* Class: com_baidu_paddle_inference_Tensor
* Method: cppTensorCopyToCpuInt
* Signature: (J[I)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuInt
(
JNIEnv
*
,
jobject
,
jlong
,
jintArray
);
/*
* Class: com_baidu_paddle_inference_Tensor
* Method: cppTensorCopyToCpuLong
* Signature: (J[J)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuLong
(
JNIEnv
*
,
jobject
,
jlong
,
jlongArray
);
/*
* Class: com_baidu_paddle_inference_Tensor
* Method: cppTensorCopyToCpuByte
* Signature: (J[B)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuByte
(
JNIEnv
*
,
jobject
,
jlong
,
jbyteArray
);
/*
* Class: com_baidu_paddle_inference_Tensor
* Method: cppTensorCopyToCpuBoolean
* Signature: (J[Z)V
*/
JNIEXPORT
void
JNICALL
Java_com_baidu_paddle_inference_Tensor_cppTensorCopyToCpuBoolean
(
JNIEnv
*
,
jobject
,
jlong
,
jbooleanArray
);
#ifdef __cplusplus
}
#endif
#endif // PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_COM_BAIDU_PADDLE_INFERENCE_TENSOR_H_
paddle/fluid/inference/experimental/javaapi/native/jni_convert_util.h
0 → 100644
浏览文件 @
238f3c8e
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_JNI_CONVERT_UTIL_H_ // NOLINT
#define PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_JNI_CONVERT_UTIL_H_
#include <jni.h>
#include <string.h>
#include <string>
#include <vector>
#define PADDLE_WITH_CUDA PADDLE_WITH_CUDA
inline
std
::
string
jstring_to_cpp_string
(
JNIEnv
*
env
,
jstring
jstr
)
{
if
(
!
jstr
)
{
return
""
;
}
const
jclass
stringClass
=
env
->
GetObjectClass
(
jstr
);
const
jmethodID
getBytes
=
env
->
GetMethodID
(
stringClass
,
"getBytes"
,
"(Ljava/lang/String;)[B"
);
const
jbyteArray
stringJbytes
=
(
jbyteArray
)
env
->
CallObjectMethod
(
jstr
,
getBytes
,
env
->
NewStringUTF
(
"UTF-8"
));
size_t
length
=
static_cast
<
size_t
>
(
env
->
GetArrayLength
(
stringJbytes
));
jbyte
*
pBytes
=
env
->
GetByteArrayElements
(
stringJbytes
,
NULL
);
std
::
string
ret
=
std
::
string
(
reinterpret_cast
<
char
*>
(
pBytes
),
length
);
env
->
ReleaseByteArrayElements
(
stringJbytes
,
pBytes
,
JNI_ABORT
);
env
->
DeleteLocalRef
(
stringJbytes
);
env
->
DeleteLocalRef
(
stringClass
);
return
ret
;
}
inline
jstring
cpp_string_to_jstring
(
JNIEnv
*
env
,
std
::
string
str
)
{
auto
*
data
=
str
.
c_str
();
jclass
strClass
=
env
->
FindClass
(
"java/lang/String"
);
jmethodID
strClassInitMethodID
=
env
->
GetMethodID
(
strClass
,
"<init>"
,
"([BLjava/lang/String;)V"
);
jbyteArray
bytes
=
env
->
NewByteArray
(
strlen
(
data
));
env
->
SetByteArrayRegion
(
bytes
,
0
,
strlen
(
data
),
reinterpret_cast
<
const
jbyte
*>
(
data
));
jstring
encoding
=
env
->
NewStringUTF
(
"UTF-8"
);
jstring
res
=
(
jstring
)(
env
->
NewObject
(
strClass
,
strClassInitMethodID
,
bytes
,
encoding
));
env
->
DeleteLocalRef
(
strClass
);
env
->
DeleteLocalRef
(
encoding
);
env
->
DeleteLocalRef
(
bytes
);
return
res
;
}
inline
jboolean
cpp_bool_to_jboolean
(
JNIEnv
*
env
,
bool
flag
)
{
return
flag
?
JNI_TRUE
:
JNI_FALSE
;
}
inline
bool
jboolean_to_cpp_bool
(
JNIEnv
*
env
,
jboolean
flag
)
{
return
flag
==
JNI_TRUE
;
}
inline
jfloatArray
cpp_array_to_jfloatarray
(
JNIEnv
*
env
,
const
float
*
buf
,
int64_t
len
)
{
jfloatArray
result
=
env
->
NewFloatArray
(
len
);
env
->
SetFloatArrayRegion
(
result
,
0
,
len
,
buf
);
return
result
;
}
inline
jbyteArray
cpp_array_to_jbytearray
(
JNIEnv
*
env
,
const
int8_t
*
buf
,
int64_t
len
)
{
jbyteArray
result
=
env
->
NewByteArray
(
len
);
env
->
SetByteArrayRegion
(
result
,
0
,
len
,
buf
);
return
result
;
}
inline
jintArray
cpp_array_to_jintarray
(
JNIEnv
*
env
,
const
int
*
buf
,
int64_t
len
)
{
jintArray
result
=
env
->
NewIntArray
(
len
);
env
->
SetIntArrayRegion
(
result
,
0
,
len
,
buf
);
return
result
;
}
inline
jlongArray
cpp_array_to_jlongarray
(
JNIEnv
*
env
,
const
int64_t
*
buf
,
int64_t
len
)
{
jlongArray
result
=
env
->
NewLongArray
(
len
);
env
->
SetLongArrayRegion
(
result
,
0
,
len
,
buf
);
return
result
;
}
inline
jlongArray
int64_vector_to_jlongarray
(
JNIEnv
*
env
,
const
std
::
vector
<
int64_t
>
&
vec
)
{
jlongArray
result
=
env
->
NewLongArray
(
vec
.
size
());
jlong
*
buf
=
new
jlong
[
vec
.
size
()];
for
(
size_t
i
=
0
;
i
<
vec
.
size
();
++
i
)
{
buf
[
i
]
=
(
jlong
)
vec
[
i
];
}
env
->
SetLongArrayRegion
(
result
,
0
,
vec
.
size
(),
buf
);
delete
[]
buf
;
return
result
;
}
inline
std
::
vector
<
int64_t
>
jlongarray_to_int64_vector
(
JNIEnv
*
env
,
jlongArray
dims
)
{
int
dim_size
=
env
->
GetArrayLength
(
dims
);
jlong
*
dim_nums
=
env
->
GetLongArrayElements
(
dims
,
nullptr
);
std
::
vector
<
int64_t
>
dim_vec
(
dim_nums
,
dim_nums
+
dim_size
);
env
->
ReleaseLongArrayElements
(
dims
,
dim_nums
,
0
);
return
dim_vec
;
}
#endif // PADDLE_FLUID_INFERENCE_JAVAAPI_NATIVE_JNI_CONVERT_UTIL_H_
paddle/fluid/inference/experimental/javaapi/readme.md
0 → 100644
浏览文件 @
238f3c8e
# Paddle Inference java API
Paddle Inference java API 基于
[
capi
](
../capi_exp
)
和 jni 实现,需要您提前准备好C预测库。
## 安装(Linux)
##### 1.下载C预测库
您可以选择直接下载
[
paddle_inference_c
](
https://github.com/PaddlePaddle/Paddle-Inference-Demo/blob/master/docs/user_guides/download_lib.md
)
预测库,或通过源码编译的方式安装,源码编译方式参考官网文档,注意这里cmake编译时打开
`-DON_INFER=ON`
,在编译目录下得到
`paddle_inference_c_install_dir`
。
##### 2.准备预测部署模型
下载
[
resnet50
](
https://paddle-inference-dist.bj.bcebos.com/Paddle-Inference-Demo/resnet50.tgz
)
模型后解压,得到 Paddle Combined 形式的模型。
```
wget https://paddle-inference-dist.bj.bcebos.com/Paddle-Inference-Demo/resnet50.tgz
tar zxf resnet50.tgz
# 获得 resnet50 目录结构如下
resnet50/
├── inference.pdmodel
├── inference.pdiparams
└── inference.pdiparams.info
```
##### 3.准备预测执行目录
```
git clone github.com/paddlepaddle/paddle/paddle/fluid/inference/javaapi
```
##### 3. 编译动态链接库和jar包
```
bash
在javaapi目录下执行
./build_gpu.sh
{
c预测库目录
}
{
jni头文件目录
}
{
jni系统头文件目录
}
以笔者的目录结构为例
./build.sh /root/paddle_c/paddle_inference_c_2.2/paddle_inference_c /usr/lib/jvm/java-8-openjdk-amd64/include /usr/lib/jvm/java-8-openjdk-amd64/include/linux
执行完成后,会在当前目录下生成JavaInference.jar和libpaddle_inference.so
```
##### 5.运行单测,验证
```
在javaapi目录下执行
./test.sh {c预测库目录} {.pdmodel文件目录} {.pdiparams文件目录}
以笔者的目录结构为例
./test.sh "/root/paddle_c/paddle_inference_c_2.2/paddle_inference_c" "/root/paddle_c/resnet50/inference.pdmodel" "/root/paddle_c/resnet50/inference.pdiparams"
```
## 在Java中使用Paddle预测
首先创建预测配置
```
java
Config
config
=
new
Config
();
config
.
setCppModel
(
model_file
,
params_file
);
```
创建predictor
```
java
Predictor
predictor
=
Predictor
.
createPaddlePredictor
(
config
);
```
获取输入Tensor
```
java
String
inNames
=
predictor
.
getInputNameById
(
0
);
Tensor
inHandle
=
predictor
.
getInputHandle
(
inNames
);
```
设置输入数据(假设只有一个输入)
```
java
inHandle
.
Reshape
(
4
,
new
int
[]{
1
,
3
,
224
,
224
});
float
[]
inData
=
new
float
[
1
*
3
*
224
*
224
];
inHandle
.
CopyFromCpu
(
inData
);
```
运行预测
```
java
predictor
.
Run
();
```
获取输出Tensor
```
java
String
outNames
=
predictor
.
getOutputNameById
(
0
);
Tensor
outHandle
=
predictor
.
getOutputHandle
(
outNames
);
float
[]
outData
=
new
float
[
outHandle
.
GetSize
()];
outHandle
.
CopyToCpu
(
outData
);
```
paddle/fluid/inference/experimental/javaapi/src/main/java/com/baidu/paddle/inference/Config.java
0 → 100644
浏览文件 @
238f3c8e
package
com.baidu.paddle.inference
;
public
class
Config
{
private
long
cppPaddleConfigPointer
;
private
String
modelFile
;
private
String
paramsFile
;
private
String
modelDir
;
private
String
progFile
;
private
int
mathThreadsNum
;
public
Config
()
{
this
.
cppPaddleConfigPointer
=
createCppConfig
();
}
@Override
protected
void
finalize
()
throws
Throwable
{
destroyNativeConfig
();
}
public
void
destroyNativeConfig
()
{
if
(
cppPaddleConfigPointer
!=
0
)
cppConfigDestroy
(
cppPaddleConfigPointer
);
cppPaddleConfigPointer
=
0
;
}
public
boolean
isValid
()
{
if
(
cppPaddleConfigPointer
==
0
)
return
false
;
return
isCppConfigValid
(
cppPaddleConfigPointer
);
}
public
void
setCppModel
(
String
modelFile
,
String
paramsFile
)
{
this
.
modelFile
=
modelFile
;
this
.
paramsFile
=
paramsFile
;
setCppModel
(
this
.
cppPaddleConfigPointer
,
modelFile
,
paramsFile
);
}
public
void
setCppModelDir
(
String
modelDir
)
{
this
.
modelDir
=
modelDir
;
setCppModelDir
(
this
.
cppPaddleConfigPointer
,
modelDir
);
}
public
void
setCppProgFile
(
String
progFile
){
this
.
progFile
=
progFile
;
setCppProgFile
(
this
.
cppPaddleConfigPointer
,
progFile
);
}
public
void
setCppParamsFile
(
String
paramsFile
){
this
.
paramsFile
=
paramsFile
;
setCppParamsFile
(
this
.
cppPaddleConfigPointer
,
paramsFile
);
}
public
String
getCppModelDir
()
{
return
modelDir
(
this
.
cppPaddleConfigPointer
);
}
public
String
getCppProgFile
(){
return
progFile
(
this
.
cppPaddleConfigPointer
);
}
public
String
getCppParamsFile
()
{
return
paramsFile
(
this
.
cppPaddleConfigPointer
);
}
public
void
setCpuMathLibraryNumThreads
(
int
mathThreadsNum
){
this
.
mathThreadsNum
=
mathThreadsNum
;
setCpuMathLibraryNumThreads
(
this
.
cppPaddleConfigPointer
,
mathThreadsNum
);
}
public
int
getCpuMathLibraryNumThreads
(){
return
cpuMathLibraryNumThreads
(
this
.
cppPaddleConfigPointer
);
}
public
void
enableMKLDNN
(){
enableMKLDNN
(
this
.
cppPaddleConfigPointer
);
}
public
boolean
mkldnnEnabled
(){
return
mkldnnEnabled
(
this
.
cppPaddleConfigPointer
);
}
public
void
enableMkldnnBfloat16
(){
enableMkldnnBfloat16
(
this
.
cppPaddleConfigPointer
);
}
public
boolean
mkldnnBfloat16Enabled
(){
return
mkldnnBfloat16Enabled
(
this
.
cppPaddleConfigPointer
);
}
public
void
enableUseGpu
(
long
memorySize
,
int
deviceId
){
enableUseGpu
(
this
.
cppPaddleConfigPointer
,
memorySize
,
deviceId
);
}
public
void
disableGpu
(){
disableGpu
(
this
.
cppPaddleConfigPointer
);
}
public
boolean
useGpu
(){
return
useGpu
(
this
.
cppPaddleConfigPointer
);
}
public
int
getGpuDeviceId
(){
return
gpuDeviceId
(
this
.
cppPaddleConfigPointer
);
}
public
int
getMemoryPoolInitSizeMb
(){
return
memoryPoolInitSizeMb
(
this
.
cppPaddleConfigPointer
);
}
public
float
getFractionOfGpuMemoryForPool
(){
return
fractionOfGpuMemoryForPool
(
this
.
cppPaddleConfigPointer
);
}
public
void
switchIrOptim
(
boolean
flag
){
switchIrOptim
(
this
.
cppPaddleConfigPointer
,
flag
);
}
public
boolean
irOptim
(){
return
irOptim
(
this
.
cppPaddleConfigPointer
);
}
public
void
switchIrDebug
(
boolean
flag
){
switchIrDebug
(
this
.
cppPaddleConfigPointer
,
flag
);
}
public
void
enableMemoryOptim
(
boolean
flag
){
enableMemoryOptim
(
this
.
cppPaddleConfigPointer
,
flag
);
}
public
boolean
memoryOptimEnabled
(){
return
memoryOptimEnabled
(
this
.
cppPaddleConfigPointer
);
}
public
void
enableProfile
(){
enableProfile
(
this
.
cppPaddleConfigPointer
);
}
public
boolean
profileEnabled
(){
return
profileEnabled
(
this
.
cppPaddleConfigPointer
);
}
public
void
disableGlogInfo
(){
disableGlogInfo
(
this
.
cppPaddleConfigPointer
);
}
public
String
summary
(){
return
summary
(
this
.
cppPaddleConfigPointer
);
}
public
long
getCppPaddleConfigPointer
()
{
return
cppPaddleConfigPointer
;
}
public
String
getModelFile
()
{
return
modelFile
;
}
public
String
getParamsFile
()
{
return
paramsFile
;
}
public
String
getModelDir
()
{
return
modelDir
;
}
public
String
getProgFile
()
{
return
progFile
;
}
public
int
getMathThreadsNum
()
{
return
mathThreadsNum
;
}
public
void
resetCppPaddleConfigPointer
()
{
cppPaddleConfigPointer
=
0
;
}
private
native
void
cppConfigDestroy
(
long
cppPaddleConfigPointer
);
// 1. create Config
private
native
long
createCppConfig
();
private
native
boolean
isCppConfigValid
(
long
cppPaddleConfigPointer
);
// 2. not combined model settings
private
native
void
setCppModel
(
long
cppPaddleConfigPointer
,
String
modelFile
,
String
paramsFile
);
// 3. combined model settings
private
native
void
setCppModelDir
(
long
cppPaddleConfigPointer
,
String
modelDir
);
private
native
void
setCppProgFile
(
long
cppPaddleConfigPointer
,
String
modelFile
);
private
native
void
setCppParamsFile
(
long
cppPaddleConfigPointer
,
String
paramsFile
);
private
native
String
modelDir
(
long
cppPaddleConfigPointer
);
private
native
String
progFile
(
long
cppPaddleConfigPointer
);
private
native
String
paramsFile
(
long
cppPaddleConfigPointer
);
// 4. cpu settings
private
native
void
setCpuMathLibraryNumThreads
(
long
cppPaddleConfigPointer
,
int
mathThreadsNum
);
private
native
int
cpuMathLibraryNumThreads
(
long
cppPaddleConfigPointer
);
// 5. MKLDNN settings
private
native
void
enableMKLDNN
(
long
cppPaddleConfigPointer
);
private
native
boolean
mkldnnEnabled
(
long
cppPaddleConfigPointer
);
private
native
void
enableMkldnnBfloat16
(
long
cppPaddleConfigPointer
);
private
native
boolean
mkldnnBfloat16Enabled
(
long
cppPaddleConfigPointer
);
// 6. gpu setting
// 这里有个bug java没有uint64 这里用 long代替
// memorySize 太大的时候 java里long会是负数
private
native
void
enableUseGpu
(
long
cppPaddleConfigPointer
,
long
memorySize
,
int
deviceId
);
private
native
void
disableGpu
(
long
cppPaddleConfigPointer
);
private
native
boolean
useGpu
(
long
cppPaddleConfigPointer
);
private
native
int
gpuDeviceId
(
long
cppPaddleConfigPointer
);
private
native
int
memoryPoolInitSizeMb
(
long
cppPaddleConfigPointer
);
private
native
float
fractionOfGpuMemoryForPool
(
long
cppPaddleConfigPointer
);
// 7. TensorRT use To Do
// 8. optim setting
private
native
void
switchIrOptim
(
long
cppPaddleConfigPointer
,
boolean
flag
);
private
native
boolean
irOptim
(
long
cppPaddleConfigPointer
);
private
native
void
switchIrDebug
(
long
cppPaddleConfigPointer
,
boolean
flag
);
// 9. enable memory optimization
private
native
void
enableMemoryOptim
(
long
cppPaddleConfigPointer
,
boolean
flag
);
private
native
boolean
memoryOptimEnabled
(
long
cppPaddleConfigPointer
);
// 10. profile setting
private
native
void
enableProfile
(
long
cppPaddleConfigPointer
);
private
native
boolean
profileEnabled
(
long
cppPaddleConfigPointer
);
// 11. log setting
private
native
void
disableGlogInfo
(
long
cppPaddleConfigPointer
);
// 12. view config configuration
private
native
String
summary
(
long
cppPaddleConfigPointer
);
}
paddle/fluid/inference/experimental/javaapi/src/main/java/com/baidu/paddle/inference/Predictor.java
0 → 100644
浏览文件 @
238f3c8e
package
com.baidu.paddle.inference
;
public
class
Predictor
{
private
long
cppPaddlePredictorPointer
;
private
long
inputNum
;
private
long
outputNum
;
public
Predictor
(
Config
config
)
{
cppPaddlePredictorPointer
=
createPredictor
(
config
.
getCppPaddleConfigPointer
());
config
.
resetCppPaddleConfigPointer
();
inputNum
=
getInputNum
(
cppPaddlePredictorPointer
);
outputNum
=
getOutputNum
(
cppPaddlePredictorPointer
);
}
@Override
protected
void
finalize
()
throws
Throwable
{
destroyNativePredictor
();
}
public
static
Predictor
createPaddlePredictor
(
Config
config
){
Predictor
predictor
=
new
Predictor
(
config
);
return
predictor
.
cppPaddlePredictorPointer
==
0L
?
null
:
predictor
;
}
public
void
destroyNativePredictor
()
{
if
(
cppPaddlePredictorPointer
!=
0
)
cppPredictorDestroy
(
cppPaddlePredictorPointer
);
cppPaddlePredictorPointer
=
0
;
}
public
String
getInputNameById
(
long
id
){
return
getInputNameByIndex
(
this
.
cppPaddlePredictorPointer
,
id
);
}
public
String
getOutputNameById
(
long
id
){
return
getOutputNameByIndex
(
this
.
cppPaddlePredictorPointer
,
id
);
}
public
Tensor
getInputHandle
(
String
name
){
long
cppTensorPointer
=
getInputHandleByName
(
this
.
cppPaddlePredictorPointer
,
name
);
return
cppTensorPointer
==
0
?
null
:
new
Tensor
(
cppTensorPointer
);
}
public
Tensor
getOutputHandle
(
String
name
){
long
cppTensorPointer
=
getOutputHandleByName
(
this
.
cppPaddlePredictorPointer
,
name
);
return
cppTensorPointer
==
0
?
null
:
new
Tensor
(
cppTensorPointer
);
}
public
void
clearIntermediateTensor
(){
predictorClearIntermediateTensor
(
this
.
cppPaddlePredictorPointer
);
}
public
void
tryShrinkMemory
(){
predictorTryShrinkMemory
(
this
.
cppPaddlePredictorPointer
);
}
public
boolean
run
(){
return
runPD
(
this
.
cppPaddlePredictorPointer
);
}
public
long
getCppPaddlePredictorPointer
()
{
return
cppPaddlePredictorPointer
;
}
public
long
getInputNum
()
{
return
inputNum
;
}
public
long
getOutputNum
()
{
return
outputNum
;
}
private
native
void
cppPredictorDestroy
(
long
cppPaddleConfigPointer
);
private
native
void
predictorTryShrinkMemory
(
long
cppPaddleConfigPointer
);
private
native
void
predictorClearIntermediateTensor
(
long
cppPaddleConfigPointer
);
private
native
long
createPredictor
(
long
cppPaddleConfigPointer
);
private
native
long
getInputNum
(
long
cppPaddlePredictorPointer
);
private
native
long
getOutputNum
(
long
cppPaddlePredictorPointer
);
private
native
String
getInputNameByIndex
(
long
cppPaddlePredictorPointer
,
long
index
);
private
native
String
getOutputNameByIndex
(
long
cppPaddlePredictorPointer
,
long
index
);
private
native
long
getInputHandleByName
(
long
cppPaddlePredictorPointer
,
String
name
);
private
native
long
getOutputHandleByName
(
long
cppPaddlePredictorPointer
,
String
name
);
private
native
boolean
runPD
(
long
cppPaddlePredictorPointer
);
}
paddle/fluid/inference/experimental/javaapi/src/main/java/com/baidu/paddle/inference/Tensor.java
0 → 100644
浏览文件 @
238f3c8e
package
com.baidu.paddle.inference
;
public
class
Tensor
{
long
nativeTensorPointer
;
//构造函数
public
Tensor
(
long
nativeTensorPointer
)
{
this
.
nativeTensorPointer
=
nativeTensorPointer
;
}
@Override
protected
void
finalize
()
throws
Throwable
{
destroyNativeTensor
();
}
public
void
destroyNativeTensor
()
{
if
(
nativeTensorPointer
!=
0
)
cppTensorDestroy
(
nativeTensorPointer
);
nativeTensorPointer
=
0
;
}
public
void
reshape
(
int
dim_num
,
int
[]
shape
)
{
cppTensorReshape
(
nativeTensorPointer
,
dim_num
,
shape
);
}
public
int
getSize
()
{
int
[]
shape
=
getShape
();
if
(
shape
.
length
==
0
)
return
0
;
int
size
=
1
;
for
(
int
i
:
shape
)
size
*=
i
;
return
size
;
}
public
int
[]
getShape
()
{
return
cppTensorGetShape
(
nativeTensorPointer
);
}
public
String
getName
()
{
return
cppTensorGetName
(
nativeTensorPointer
);
}
public
long
getCppPaddleTensorPointer
()
{
return
nativeTensorPointer
;
}
public
void
copyFromCpu
(
Object
obj
)
{
if
(
obj
instanceof
float
[])
{
cppTensorCopyFromCpuFloat
(
this
.
nativeTensorPointer
,
(
float
[])
obj
);
}
else
if
(
obj
instanceof
long
[])
{
cppTensorCopyFromCpuLong
(
this
.
nativeTensorPointer
,
(
long
[])
obj
);
}
else
if
(
obj
instanceof
int
[])
{
cppTensorCopyFromCpuInt
(
this
.
nativeTensorPointer
,
(
int
[])
obj
);
}
else
if
(
obj
instanceof
byte
[])
{
cppTensorCopyFromCpuByte
(
this
.
nativeTensorPointer
,
(
byte
[])
obj
);
}
else
if
(
obj
instanceof
boolean
[])
{
cppTensorCopyFromCpuBoolean
(
this
.
nativeTensorPointer
,
(
boolean
[])
obj
);
}
}
public
void
copyToCpu
(
Object
obj
)
{
if
(
obj
instanceof
float
[])
{
cppTensorCopyToCpuFloat
(
this
.
nativeTensorPointer
,
(
float
[])
obj
);
}
else
if
(
obj
instanceof
long
[])
{
cppTensorCopyToCpuLong
(
this
.
nativeTensorPointer
,
(
long
[])
obj
);
}
else
if
(
obj
instanceof
int
[])
{
cppTensorCopyToCpuInt
(
this
.
nativeTensorPointer
,
(
int
[])
obj
);
}
else
if
(
obj
instanceof
byte
[])
{
cppTensorCopyToCpuByte
(
this
.
nativeTensorPointer
,
(
byte
[])
obj
);
}
else
if
(
obj
instanceof
boolean
[])
{
cppTensorCopyToCpuBoolean
(
this
.
nativeTensorPointer
,
(
boolean
[])
obj
);
}
}
private
native
void
cppTensorDestroy
(
long
TensorPointer
);
private
native
void
cppTensorReshape
(
long
tensor
,
int
dim_num
,
int
[]
shape
);
private
native
int
[]
cppTensorGetShape
(
long
tensor
);
private
native
String
cppTensorGetName
(
long
tensor
);
private
native
void
cppTensorCopyFromCpuFloat
(
long
TensorPointer
,
float
[]
data
);
private
native
void
cppTensorCopyFromCpuInt
(
long
TensorPointer
,
int
[]
data
);
private
native
void
cppTensorCopyFromCpuLong
(
long
TensorPointer
,
long
[]
data
);
private
native
void
cppTensorCopyFromCpuByte
(
long
TensorPointer
,
byte
[]
data
);
private
native
void
cppTensorCopyFromCpuBoolean
(
long
TensorPointer
,
boolean
[]
data
);
private
native
void
cppTensorCopyToCpuFloat
(
long
TensorPointer
,
float
[]
data
);
private
native
void
cppTensorCopyToCpuInt
(
long
TensorPointer
,
int
[]
data
);
private
native
void
cppTensorCopyToCpuLong
(
long
TensorPointer
,
long
[]
data
);
private
native
void
cppTensorCopyToCpuByte
(
long
TensorPointer
,
byte
[]
data
);
private
native
void
cppTensorCopyToCpuBoolean
(
long
TensorPointer
,
boolean
[]
data
);
}
paddle/fluid/inference/experimental/javaapi/test.java
0 → 100644
浏览文件 @
238f3c8e
import
com.baidu.paddle.inference.Predictor
;
import
com.baidu.paddle.inference.Config
;
import
com.baidu.paddle.inference.Tensor
;
public
class
test
{
static
{
System
.
loadLibrary
(
"paddle_inference"
);
}
public
static
void
main
(
String
[]
args
)
{
Config
config
=
new
Config
();
config
.
setCppModel
(
args
[
0
],
args
[
1
]);
config
.
enableMemoryOptim
(
true
);
config
.
enableProfile
();
config
.
enableMKLDNN
();
System
.
out
.
println
(
"summary:\n"
+
config
.
summary
());
System
.
out
.
println
(
"model dir:\n"
+
config
.
getCppModelDir
());
System
.
out
.
println
(
"prog file:\n"
+
config
.
getProgFile
());
System
.
out
.
println
(
"params file:\n"
+
config
.
getCppParamsFile
());
config
.
getCpuMathLibraryNumThreads
();
config
.
getFractionOfGpuMemoryForPool
();
config
.
switchIrDebug
(
false
);
System
.
out
.
println
(
config
.
summary
());
Predictor
predictor
=
Predictor
.
createPaddlePredictor
(
config
);
long
n
=
predictor
.
getInputNum
();
String
inNames
=
predictor
.
getInputNameById
(
0
);
Tensor
inHandle
=
predictor
.
getInputHandle
(
inNames
);
inHandle
.
reshape
(
4
,
new
int
[]{
1
,
3
,
224
,
224
});
float
[]
inData
=
new
float
[
1
*
3
*
224
*
224
];
inHandle
.
copyFromCpu
(
inData
);
predictor
.
run
();
String
outNames
=
predictor
.
getOutputNameById
(
0
);
Tensor
outHandle
=
predictor
.
getOutputHandle
(
outNames
);
float
[]
outData
=
new
float
[
outHandle
.
getSize
()];
outHandle
.
copyToCpu
(
outData
);
predictor
.
tryShrinkMemory
();
predictor
.
clearIntermediateTensor
();
System
.
out
.
println
(
outData
[
0
]);
System
.
out
.
println
(
outData
.
length
);
outHandle
.
destroyNativeTensor
();
inHandle
.
destroyNativeTensor
();
predictor
.
destroyNativePredictor
();
Config
newConfig
=
new
Config
();
newConfig
.
setCppModelDir
(
"/model_dir"
);
newConfig
.
setCppProgFile
(
"/prog_file"
);
newConfig
.
setCppParamsFile
(
"/param"
);
System
.
out
.
println
(
"model dir:\n"
+
newConfig
.
getCppModelDir
());
System
.
out
.
println
(
"prog file:\n"
+
newConfig
.
getProgFile
());
System
.
out
.
println
(
"params file:\n"
+
newConfig
.
getCppParamsFile
());
config
.
destroyNativeConfig
();
}
}
paddle/fluid/inference/experimental/javaapi/test.sh
0 → 100644
浏览文件 @
238f3c8e
#!/bin/bash
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
library_path
=
$1
mkldnn_lib
=
$library_path
"/third_party/install/mkldnn/lib"
mklml_lib
=
$library_path
"/third_party/install/mklml/lib"
paddle_inference_lib
=
$library_path
"/paddle/lib"
export
LD_LIBRARY_PATH
=
$mkldnn_lib
:
$mklml_lib
:
$paddle_inference_lib
:.
javac
-cp
$CLASSPATH
:JavaInference.jar:. test.java
java
-cp
$CLASSPATH
:JavaInference.jar:.
test
$2
$3
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录