Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
weixin_41840029
PaddleOCR
提交
44406c51
P
PaddleOCR
项目概览
weixin_41840029
/
PaddleOCR
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleOCR
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
44406c51
编写于
11月 03, 2020
作者:
L
LKKlein
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add paddle_c_api.h
上级
b9e79906
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
292 addition
and
3 deletion
+292
-3
deploy/paddleocr-go/README.md
deploy/paddleocr-go/README.md
+1
-2
deploy/paddleocr-go/paddle/common.go
deploy/paddleocr-go/paddle/common.go
+1
-1
deploy/paddleocr-go/paddle_c/include/paddle_c_api.h
deploy/paddleocr-go/paddle_c/include/paddle_c_api.h
+290
-0
未找到文件。
deploy/paddleocr-go/README.md
浏览文件 @
44406c51
...
...
@@ -179,12 +179,11 @@ build/fluid_inference_c_install_dir
└── version.txt
```
其中
`paddle`
就是Paddle库的C语言预测API,
`version.txt`
中包含当前预测库的版本信息。最后,将C推理库
及头文件
配置到环境变量。
其中
`paddle`
就是Paddle库的C语言预测API,
`version.txt`
中包含当前预测库的版本信息。最后,将C推理库配置到环境变量。
```
shell
echo
"export LD_LIBRARY_PATH=
$LD_LIBRARY_PATH
:
$PADDLE_ROOT
/build/fluid_inference_c_install_dir/paddle/lib"
>>
~/.bashrc
echo
"export LIBRARY_PATH=
$LIBRARY_PATH
:
$PADDLE_ROOT
/build/fluid_inference_c_install_dir/paddle/lib"
>>
~/.bashrc
echo
"export C_INCLUDE_PATH="
$C_INCLUDE_PATH
:
$PADDLE_ROOT
/build/fluid_inference_c_install_dir/paddle/include
""
>>
~/.bashrc
souce ~/.bashrc
```
...
...
deploy/paddleocr-go/paddle/common.go
浏览文件 @
44406c51
package
paddle
// #cgo CFLAGS: -I../paddle_c/
paddle/
include
// #cgo CFLAGS: -I../paddle_c/include
// #cgo LDFLAGS: -lpaddle_fluid_c
// #include <stdbool.h>
import
"C"
...
...
deploy/paddleocr-go/paddle_c/include/paddle_c_api.h
0 → 100644
浏览文件 @
44406c51
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#if defined(_WIN32)
#ifdef PADDLE_ON_INFERENCE
#define PADDLE_CAPI_EXPORT __declspec(dllexport)
#else
#define PADDLE_CAPI_EXPORT __declspec(dllimport)
#endif // PADDLE_ON_INFERENCE
#else
#define PADDLE_CAPI_EXPORT __attribute__((visibility("default")))
#endif // _WIN32
#ifdef __cplusplus
extern
"C"
{
#endif
enum
PD_DataType
{
PD_FLOAT32
,
PD_INT32
,
PD_INT64
,
PD_UINT8
,
PD_UNKDTYPE
};
typedef
enum
PD_DataType
PD_DataType
;
typedef
struct
PD_PaddleBuf
PD_PaddleBuf
;
typedef
struct
PD_AnalysisConfig
PD_AnalysisConfig
;
typedef
struct
PD_Predictor
PD_Predictor
;
typedef
struct
PD_Buffer
{
void
*
data
;
size_t
length
;
size_t
capacity
;
}
PD_Buffer
;
typedef
struct
PD_ZeroCopyTensor
{
PD_Buffer
data
;
PD_Buffer
shape
;
PD_Buffer
lod
;
PD_DataType
dtype
;
char
*
name
;
}
PD_ZeroCopyTensor
;
PADDLE_CAPI_EXPORT
extern
PD_ZeroCopyTensor
*
PD_NewZeroCopyTensor
();
PADDLE_CAPI_EXPORT
extern
void
PD_DeleteZeroCopyTensor
(
PD_ZeroCopyTensor
*
);
PADDLE_CAPI_EXPORT
extern
void
PD_InitZeroCopyTensor
(
PD_ZeroCopyTensor
*
);
PADDLE_CAPI_EXPORT
extern
void
PD_DestroyZeroCopyTensor
(
PD_ZeroCopyTensor
*
);
PADDLE_CAPI_EXPORT
extern
void
PD_DeleteZeroCopyTensor
(
PD_ZeroCopyTensor
*
);
typedef
struct
PD_ZeroCopyData
{
char
*
name
;
void
*
data
;
PD_DataType
dtype
;
int
*
shape
;
int
shape_size
;
}
PD_ZeroCopyData
;
typedef
struct
InTensorShape
{
char
*
name
;
int
*
tensor_shape
;
int
shape_size
;
}
InTensorShape
;
PADDLE_CAPI_EXPORT
extern
PD_PaddleBuf
*
PD_NewPaddleBuf
();
PADDLE_CAPI_EXPORT
extern
void
PD_DeletePaddleBuf
(
PD_PaddleBuf
*
buf
);
PADDLE_CAPI_EXPORT
extern
void
PD_PaddleBufResize
(
PD_PaddleBuf
*
buf
,
size_t
length
);
PADDLE_CAPI_EXPORT
extern
void
PD_PaddleBufReset
(
PD_PaddleBuf
*
buf
,
void
*
data
,
size_t
length
);
PADDLE_CAPI_EXPORT
extern
bool
PD_PaddleBufEmpty
(
PD_PaddleBuf
*
buf
);
PADDLE_CAPI_EXPORT
extern
void
*
PD_PaddleBufData
(
PD_PaddleBuf
*
buf
);
PADDLE_CAPI_EXPORT
extern
size_t
PD_PaddleBufLength
(
PD_PaddleBuf
*
buf
);
// PaddleTensor
typedef
struct
PD_Tensor
PD_Tensor
;
PADDLE_CAPI_EXPORT
extern
PD_Tensor
*
PD_NewPaddleTensor
();
PADDLE_CAPI_EXPORT
extern
void
PD_DeletePaddleTensor
(
PD_Tensor
*
tensor
);
PADDLE_CAPI_EXPORT
extern
void
PD_SetPaddleTensorName
(
PD_Tensor
*
tensor
,
char
*
name
);
PADDLE_CAPI_EXPORT
extern
void
PD_SetPaddleTensorDType
(
PD_Tensor
*
tensor
,
PD_DataType
dtype
);
PADDLE_CAPI_EXPORT
extern
void
PD_SetPaddleTensorData
(
PD_Tensor
*
tensor
,
PD_PaddleBuf
*
buf
);
PADDLE_CAPI_EXPORT
extern
void
PD_SetPaddleTensorShape
(
PD_Tensor
*
tensor
,
int
*
shape
,
int
size
);
PADDLE_CAPI_EXPORT
extern
const
char
*
PD_GetPaddleTensorName
(
const
PD_Tensor
*
tensor
);
PADDLE_CAPI_EXPORT
extern
PD_DataType
PD_GetPaddleTensorDType
(
const
PD_Tensor
*
tensor
);
PADDLE_CAPI_EXPORT
extern
PD_PaddleBuf
*
PD_GetPaddleTensorData
(
const
PD_Tensor
*
tensor
);
PADDLE_CAPI_EXPORT
extern
const
int
*
PD_GetPaddleTensorShape
(
const
PD_Tensor
*
tensor
,
int
*
size
);
// AnalysisPredictor
PADDLE_CAPI_EXPORT
extern
bool
PD_PredictorRun
(
const
PD_AnalysisConfig
*
config
,
PD_Tensor
*
inputs
,
int
in_size
,
PD_Tensor
**
output_data
,
int
*
out_size
,
int
batch_size
);
PADDLE_CAPI_EXPORT
extern
bool
PD_PredictorZeroCopyRun
(
const
PD_AnalysisConfig
*
config
,
PD_ZeroCopyData
*
inputs
,
int
in_size
,
PD_ZeroCopyData
**
output
,
int
*
out_size
);
// AnalysisConfig
enum
Precision
{
kFloat32
=
0
,
kInt8
,
kHalf
};
typedef
enum
Precision
Precision
;
PADDLE_CAPI_EXPORT
extern
PD_AnalysisConfig
*
PD_NewAnalysisConfig
();
PADDLE_CAPI_EXPORT
extern
void
PD_DeleteAnalysisConfig
(
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
void
PD_SetModel
(
PD_AnalysisConfig
*
config
,
const
char
*
model_dir
,
const
char
*
params_path
);
PADDLE_CAPI_EXPORT
extern
void
PD_SetProgFile
(
PD_AnalysisConfig
*
config
,
const
char
*
x
);
PADDLE_CAPI_EXPORT
extern
void
PD_SetParamsFile
(
PD_AnalysisConfig
*
config
,
const
char
*
x
);
PADDLE_CAPI_EXPORT
extern
void
PD_SetOptimCacheDir
(
PD_AnalysisConfig
*
config
,
const
char
*
opt_cache_dir
);
PADDLE_CAPI_EXPORT
extern
const
char
*
PD_ModelDir
(
const
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
const
char
*
PD_ProgFile
(
const
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
const
char
*
PD_ParamsFile
(
const
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
void
PD_EnableUseGpu
(
PD_AnalysisConfig
*
config
,
int
memory_pool_init_size_mb
,
int
device_id
);
PADDLE_CAPI_EXPORT
extern
void
PD_DisableGpu
(
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
bool
PD_UseGpu
(
const
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
int
PD_GpuDeviceId
(
const
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
int
PD_MemoryPoolInitSizeMb
(
const
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
float
PD_FractionOfGpuMemoryForPool
(
const
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
void
PD_EnableCUDNN
(
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
bool
PD_CudnnEnabled
(
const
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
void
PD_SwitchIrOptim
(
PD_AnalysisConfig
*
config
,
bool
x
);
PADDLE_CAPI_EXPORT
extern
bool
PD_IrOptim
(
const
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
void
PD_SwitchUseFeedFetchOps
(
PD_AnalysisConfig
*
config
,
bool
x
);
PADDLE_CAPI_EXPORT
extern
bool
PD_UseFeedFetchOpsEnabled
(
const
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
void
PD_SwitchSpecifyInputNames
(
PD_AnalysisConfig
*
config
,
bool
x
);
PADDLE_CAPI_EXPORT
extern
bool
PD_SpecifyInputName
(
const
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
void
PD_EnableTensorRtEngine
(
PD_AnalysisConfig
*
config
,
int
workspace_size
,
int
max_batch_size
,
int
min_subgraph_size
,
Precision
precision
,
bool
use_static
,
bool
use_calib_mode
);
PADDLE_CAPI_EXPORT
extern
bool
PD_TensorrtEngineEnabled
(
const
PD_AnalysisConfig
*
config
);
typedef
struct
PD_MaxInputShape
{
char
*
name
;
int
*
shape
;
int
shape_size
;
}
PD_MaxInputShape
;
PADDLE_CAPI_EXPORT
extern
void
PD_SwitchIrDebug
(
PD_AnalysisConfig
*
config
,
bool
x
);
PADDLE_CAPI_EXPORT
extern
void
PD_EnableMKLDNN
(
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
void
PD_SetMkldnnCacheCapacity
(
PD_AnalysisConfig
*
config
,
int
capacity
);
PADDLE_CAPI_EXPORT
extern
bool
PD_MkldnnEnabled
(
const
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
void
PD_SetCpuMathLibraryNumThreads
(
PD_AnalysisConfig
*
config
,
int
cpu_math_library_num_threads
);
PADDLE_CAPI_EXPORT
extern
int
PD_CpuMathLibraryNumThreads
(
const
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
void
PD_EnableMkldnnQuantizer
(
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
bool
PD_MkldnnQuantizerEnabled
(
const
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
void
PD_SetModelBuffer
(
PD_AnalysisConfig
*
config
,
const
char
*
prog_buffer
,
size_t
prog_buffer_size
,
const
char
*
params_buffer
,
size_t
params_buffer_size
);
PADDLE_CAPI_EXPORT
extern
bool
PD_ModelFromMemory
(
const
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
void
PD_EnableMemoryOptim
(
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
bool
PD_MemoryOptimEnabled
(
const
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
void
PD_EnableProfile
(
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
bool
PD_ProfileEnabled
(
const
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
void
PD_SetInValid
(
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
bool
PD_IsValid
(
const
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
void
PD_DisableGlogInfo
(
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
void
PD_DeletePass
(
PD_AnalysisConfig
*
config
,
char
*
pass_name
);
PADDLE_CAPI_EXPORT
extern
PD_Predictor
*
PD_NewPredictor
(
const
PD_AnalysisConfig
*
config
);
PADDLE_CAPI_EXPORT
extern
void
PD_DeletePredictor
(
PD_Predictor
*
predictor
);
PADDLE_CAPI_EXPORT
extern
int
PD_GetInputNum
(
const
PD_Predictor
*
);
PADDLE_CAPI_EXPORT
extern
int
PD_GetOutputNum
(
const
PD_Predictor
*
);
PADDLE_CAPI_EXPORT
extern
const
char
*
PD_GetInputName
(
const
PD_Predictor
*
,
int
);
PADDLE_CAPI_EXPORT
extern
const
char
*
PD_GetOutputName
(
const
PD_Predictor
*
,
int
);
PADDLE_CAPI_EXPORT
extern
void
PD_SetZeroCopyInput
(
PD_Predictor
*
predictor
,
const
PD_ZeroCopyTensor
*
tensor
);
PADDLE_CAPI_EXPORT
extern
void
PD_GetZeroCopyOutput
(
PD_Predictor
*
predictor
,
PD_ZeroCopyTensor
*
tensor
);
PADDLE_CAPI_EXPORT
extern
void
PD_ZeroCopyRun
(
PD_Predictor
*
predictor
);
#ifdef __cplusplus
}
// extern "C"
#endif
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录