Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
weixin_41840029
PaddleOCR
提交
7399cbac
P
PaddleOCR
项目概览
weixin_41840029
/
PaddleOCR
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleOCR
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
7399cbac
编写于
6月 06, 2022
作者:
A
andyjpaddle
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add tipc for cpp infer
上级
18dec7fa
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
41 addition
and
29 deletion
+41
-29
deploy/pdserving/ocr_cpp_client.py
deploy/pdserving/ocr_cpp_client.py
+21
-8
test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
...del_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
+2
-2
test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
...del_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
+2
-2
test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
...del_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
+2
-2
test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
...del_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
+2
-2
test_tipc/test_serving_infer_cpp.sh
test_tipc/test_serving_infer_cpp.sh
+12
-13
未找到文件。
deploy/pdserving/ocr_cpp_client.py
浏览文件 @
7399cbac
...
@@ -22,15 +22,16 @@ import cv2
...
@@ -22,15 +22,16 @@ import cv2
from
paddle_serving_app.reader
import
Sequential
,
URL2Image
,
ResizeByFactor
from
paddle_serving_app.reader
import
Sequential
,
URL2Image
,
ResizeByFactor
from
paddle_serving_app.reader
import
Div
,
Normalize
,
Transpose
from
paddle_serving_app.reader
import
Div
,
Normalize
,
Transpose
from
ocr_reader
import
OCRReader
from
ocr_reader
import
OCRReader
import
codecs
client
=
Client
()
client
=
Client
()
# TODO:load_client need to load more than one client model.
# TODO:load_client need to load more than one client model.
# this need to figure out some details.
# this need to figure out some details.
client
.
load_client_config
(
sys
.
argv
[
1
:])
client
.
load_client_config
(
sys
.
argv
[
1
:])
client
.
connect
([
"127.0.0.1:
9293"
])
client
.
connect
([
"127.0.0.1:
8181"
])
# 9293
import
paddle
import
paddle
test_img_dir
=
"../../doc/imgs/
1.jpg
"
test_img_dir
=
"../../doc/imgs/"
ocr_reader
=
OCRReader
(
char_dict_path
=
"../../ppocr/utils/ppocr_keys_v1.txt"
)
ocr_reader
=
OCRReader
(
char_dict_path
=
"../../ppocr/utils/ppocr_keys_v1.txt"
)
...
@@ -62,9 +63,21 @@ for img_file in test_img_list:
...
@@ -62,9 +63,21 @@ for img_file in test_img_list:
image
=
cv2_to_base64
(
image_data
)
image
=
cv2_to_base64
(
image_data
)
res_list
=
[]
res_list
=
[]
fetch_map
=
client
.
predict
(
feed
=
{
"x"
:
image
},
fetch
=
[],
batch
=
True
)
fetch_map
=
client
.
predict
(
feed
=
{
"x"
:
image
},
fetch
=
[],
batch
=
True
)
print
(
fetch_map
)
if
fetch_map
is
None
:
one_batch_res
=
ocr_reader
.
postprocess
(
fetch_map
,
with_score
=
True
)
print
(
'no results'
)
for
res
in
one_batch_res
:
else
:
res_list
.
append
(
res
[
0
])
if
"text"
in
fetch_map
:
res
=
{
"res"
:
str
(
res_list
)}
for
x
in
fetch_map
[
"text"
]:
print
(
res
)
x
=
codecs
.
encode
(
x
)
words
=
base64
.
b64decode
(
x
).
decode
(
'utf-8'
)
res_list
.
append
(
words
)
else
:
try
:
one_batch_res
=
ocr_reader
.
postprocess
(
fetch_map
,
with_score
=
True
)
for
res
in
one_batch_res
:
res_list
.
append
(
res
[
0
])
except
:
print
(
'no results'
)
res
=
{
"res"
:
str
(
res_list
)}
print
(
res
)
test_tipc/configs/ch_PP-OCRv2/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
浏览文件 @
7399cbac
...
@@ -13,7 +13,7 @@ trans_model:-m paddle_serving_client.convert
...
@@ -13,7 +13,7 @@ trans_model:-m paddle_serving_client.convert
serving_dir:./deploy/pdserving
serving_dir:./deploy/pdserving
web_service:-m paddle_serving_server.serve
web_service:-m paddle_serving_server.serve
--op:GeneralDetectionOp GeneralInferOp
--op:GeneralDetectionOp GeneralInferOp
--port:
9293
--port:
8181
device:gpu
--gpu_id:"0"|null
cpp_client:ocr_cpp_client.py
cpp_client:ocr_cpp_client.py
--image_dir:../../doc/imgs/1.jpg
--image_dir:../../doc/imgs/1.jpg
test_tipc/configs/ch_PP-OCRv3/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
浏览文件 @
7399cbac
...
@@ -13,7 +13,7 @@ trans_model:-m paddle_serving_client.convert
...
@@ -13,7 +13,7 @@ trans_model:-m paddle_serving_client.convert
serving_dir:./deploy/pdserving
serving_dir:./deploy/pdserving
web_service:-m paddle_serving_server.serve
web_service:-m paddle_serving_server.serve
--op:GeneralDetectionOp GeneralInferOp
--op:GeneralDetectionOp GeneralInferOp
--port:
9293
--port:
8181
device:gpu
--gpu_id:"0"|null
cpp_client:ocr_cpp_client.py
cpp_client:ocr_cpp_client.py
--image_dir:../../doc/imgs/1.jpg
--image_dir:../../doc/imgs/1.jpg
test_tipc/configs/ch_ppocr_mobile_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
浏览文件 @
7399cbac
...
@@ -13,7 +13,7 @@ trans_model:-m paddle_serving_client.convert
...
@@ -13,7 +13,7 @@ trans_model:-m paddle_serving_client.convert
serving_dir:./deploy/pdserving
serving_dir:./deploy/pdserving
web_service:-m paddle_serving_server.serve
web_service:-m paddle_serving_server.serve
--op:GeneralDetectionOp GeneralInferOp
--op:GeneralDetectionOp GeneralInferOp
--port:
9293
--port:
8181
device:gpu
--gpu_id:"0"|null
cpp_client:ocr_cpp_client.py
cpp_client:ocr_cpp_client.py
--image_dir:../../doc/imgs/1.jpg
--image_dir:../../doc/imgs/1.jpg
test_tipc/configs/ch_ppocr_server_v2.0/model_linux_gpu_normal_normal_serving_cpp_linux_gpu_cpu.txt
浏览文件 @
7399cbac
...
@@ -13,7 +13,7 @@ trans_model:-m paddle_serving_client.convert
...
@@ -13,7 +13,7 @@ trans_model:-m paddle_serving_client.convert
serving_dir:./deploy/pdserving
serving_dir:./deploy/pdserving
web_service:-m paddle_serving_server.serve
web_service:-m paddle_serving_server.serve
--op:GeneralDetectionOp GeneralInferOp
--op:GeneralDetectionOp GeneralInferOp
--port:
9293
--port:
8181
device:gpu
--gpu_id:"0"|null
cpp_client:ocr_cpp_client.py
cpp_client:ocr_cpp_client.py
--image_dir:../../doc/imgs/1.jpg
--image_dir:../../doc/imgs/1.jpg
test_tipc/test_serving_infer_cpp.sh
浏览文件 @
7399cbac
...
@@ -47,7 +47,8 @@ op_key=$(func_parser_key "${lines[14]}")
...
@@ -47,7 +47,8 @@ op_key=$(func_parser_key "${lines[14]}")
op_value
=
$(
func_parser_value
"
${
lines
[14]
}
"
)
op_value
=
$(
func_parser_value
"
${
lines
[14]
}
"
)
port_key
=
$(
func_parser_key
"
${
lines
[15]
}
"
)
port_key
=
$(
func_parser_key
"
${
lines
[15]
}
"
)
port_value
=
$(
func_parser_value
"
${
lines
[15]
}
"
)
port_value
=
$(
func_parser_value
"
${
lines
[15]
}
"
)
device_value
=
$(
func_parser_value
"
${
lines
[16]
}
"
)
gpu_key
=
$(
func_parser_key
"
${
lines
[16]
}
"
)
gpu_value
=
$(
func_parser_value
"
${
lines
[16]
}
"
)
cpp_client_py
=
$(
func_parser_value
"
${
lines
[17]
}
"
)
cpp_client_py
=
$(
func_parser_value
"
${
lines
[17]
}
"
)
image_dir_key
=
$(
func_parser_key
"
${
lines
[18]
}
"
)
image_dir_key
=
$(
func_parser_key
"
${
lines
[18]
}
"
)
image_dir_value
=
$(
func_parser_value
"
${
lines
[18]
}
"
)
image_dir_value
=
$(
func_parser_value
"
${
lines
[18]
}
"
)
...
@@ -108,8 +109,8 @@ function func_serving(){
...
@@ -108,8 +109,8 @@ function func_serving(){
# cpp serving
# cpp serving
unset
https_proxy
unset
https_proxy
unset
http_proxy
unset
http_proxy
for
device
in
${
device
_value
[*]
}
;
do
for
gpu_id
in
${
gpu
_value
[*]
}
;
do
if
[
${
device
}
=
"cpu
"
]
;
then
if
[
${
gpu_id
}
=
"null
"
]
;
then
if
[
${
model_name
}
=
"ch_PP-OCRv2"
]
||
[
${
model_name
}
=
"ch_PP-OCRv3"
]
||
[
${
model_name
}
=
"ch_ppocr_mobile_v2.0"
]
||
[
${
model_name
}
=
"ch_ppocr_server_v2.0"
]
;
then
if
[
${
model_name
}
=
"ch_PP-OCRv2"
]
||
[
${
model_name
}
=
"ch_PP-OCRv3"
]
||
[
${
model_name
}
=
"ch_ppocr_mobile_v2.0"
]
||
[
${
model_name
}
=
"ch_ppocr_server_v2.0"
]
;
then
web_service_cpp_cmd
=
"
${
python_list
[0]
}
${
web_service_py
}
--model
${
det_server_value
}
${
rec_server_value
}
${
op_key
}
${
op_value
}
${
port_key
}
${
port_value
}
> serving_log_cpu.log &"
web_service_cpp_cmd
=
"
${
python_list
[0]
}
${
web_service_py
}
--model
${
det_server_value
}
${
rec_server_value
}
${
op_key
}
${
op_value
}
${
port_key
}
${
port_value
}
> serving_log_cpu.log &"
elif
[
${
model_name
}
=
"ch_PP-OCRv2_det"
]
||
[
${
model_name
}
=
"ch_PP-OCRv3_det"
]
||
[
${
model_name
}
=
"ch_ppocr_mobile_v2.0_det"
]
||
[
${
model_name
}
=
"ch_ppocr_server_v2.0_det"
]
;
then
elif
[
${
model_name
}
=
"ch_PP-OCRv2_det"
]
||
[
${
model_name
}
=
"ch_PP-OCRv3_det"
]
||
[
${
model_name
}
=
"ch_ppocr_mobile_v2.0_det"
]
||
[
${
model_name
}
=
"ch_ppocr_server_v2.0_det"
]
;
then
...
@@ -132,16 +133,16 @@ function func_serving(){
...
@@ -132,16 +133,16 @@ function func_serving(){
eval
$cpp_client_cmd
eval
$cpp_client_cmd
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
cpp_client_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
cpp_client_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5s
#
sleep 5s
ps ux |
grep
-i
${
port_value
}
|
awk
'{print $2}'
| xargs
kill
-s
9
ps ux |
grep
-i
${
port_value
}
|
awk
'{print $2}'
| xargs
kill
-s
9
ps ux |
grep
-i
${
web_service_py
}
|
awk
'{print $2}'
| xargs
kill
-s
9
#
ps ux | grep -i ${web_service_py} | awk '{print $2}' | xargs kill -s 9
el
if
[
${
device
}
=
"gpu"
]
;
then
el
se
if
[
${
model_name
}
=
"ch_PP-OCRv2"
]
||
[
${
model_name
}
=
"ch_PP-OCRv3"
]
||
[
${
model_name
}
=
"ch_ppocr_mobile_v2.0"
]
||
[
${
model_name
}
=
"ch_ppocr_server_v2.0"
]
;
then
if
[
${
model_name
}
=
"ch_PP-OCRv2"
]
||
[
${
model_name
}
=
"ch_PP-OCRv3"
]
||
[
${
model_name
}
=
"ch_ppocr_mobile_v2.0"
]
||
[
${
model_name
}
=
"ch_ppocr_server_v2.0"
]
;
then
web_service_cpp_cmd
=
"
${
python_list
[0]
}
${
web_service_py
}
--model
${
det_server_value
}
${
rec_server_value
}
${
op_key
}
${
op_value
}
${
port_key
}
${
port_value
}
--gpu_id=0
> serving_log_gpu.log &"
web_service_cpp_cmd
=
"
${
python_list
[0]
}
${
web_service_py
}
--model
${
det_server_value
}
${
rec_server_value
}
${
op_key
}
${
op_value
}
${
port_key
}
${
port_value
}
${
gpu_key
}
${
gpu_id
}
> serving_log_gpu.log &"
elif
[
${
model_name
}
=
"ch_PP-OCRv2_det"
]
||
[
${
model_name
}
=
"ch_PP-OCRv3_det"
]
||
[
${
model_name
}
=
"ch_ppocr_mobile_v2.0_det"
]
||
[
${
model_name
}
=
"ch_ppocr_server_v2.0_det"
]
;
then
elif
[
${
model_name
}
=
"ch_PP-OCRv2_det"
]
||
[
${
model_name
}
=
"ch_PP-OCRv3_det"
]
||
[
${
model_name
}
=
"ch_ppocr_mobile_v2.0_det"
]
||
[
${
model_name
}
=
"ch_ppocr_server_v2.0_det"
]
;
then
web_service_cpp_cmd
=
"
${
python_list
[0]
}
${
web_service_py
}
--model
${
det_server_value
}
${
op_key
}
${
op_value
}
${
port_key
}
${
port_value
}
--gpu_id=0
> serving_log_gpu.log &"
web_service_cpp_cmd
=
"
${
python_list
[0]
}
${
web_service_py
}
--model
${
det_server_value
}
${
op_key
}
${
op_value
}
${
port_key
}
${
port_value
}
${
gpu_key
}
${
gpu_id
}
> serving_log_gpu.log &"
elif
[
${
model_name
}
=
"ch_PP-OCRv2_rec"
]
||
[
${
model_name
}
=
"ch_PP-OCRv3_rec"
]
||
[
${
model_name
}
=
"ch_ppocr_mobile_v2.0_rec"
]
||
[
${
model_name
}
=
"ch_ppocr_server_v2.0_rec"
]
;
then
elif
[
${
model_name
}
=
"ch_PP-OCRv2_rec"
]
||
[
${
model_name
}
=
"ch_PP-OCRv3_rec"
]
||
[
${
model_name
}
=
"ch_ppocr_mobile_v2.0_rec"
]
||
[
${
model_name
}
=
"ch_ppocr_server_v2.0_rec"
]
;
then
web_service_cpp_cmd
=
"
${
python_list
[0]
}
${
web_service_py
}
--model
${
rec_server_value
}
${
op_key
}
${
op_value
}
${
port_key
}
${
port_value
}
--gpu_id=0
> serving_log_gpu.log &"
web_service_cpp_cmd
=
"
${
python_list
[0]
}
${
web_service_py
}
--model
${
rec_server_value
}
${
op_key
}
${
op_value
}
${
port_key
}
${
port_value
}
${
gpu_key
}
${
gpu_id
}
> serving_log_gpu.log &"
fi
fi
eval
$web_service_cpp_cmd
eval
$web_service_cpp_cmd
sleep
5s
sleep
5s
...
@@ -157,11 +158,9 @@ function func_serving(){
...
@@ -157,11 +158,9 @@ function func_serving(){
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
cpp_client_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
cpp_client_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5s
#
sleep 5s
ps ux |
grep
-i
${
port_value
}
|
awk
'{print $2}'
| xargs
kill
-s
9
ps ux |
grep
-i
${
port_value
}
|
awk
'{print $2}'
| xargs
kill
-s
9
ps ux |
grep
-i
${
web_service_py
}
|
awk
'{print $2}'
| xargs
kill
-s
9
# ps ux | grep -i ${web_service_py} | awk '{print $2}' | xargs kill -s 9
else
echo
"Does not support hardware other than CPU and GPU Currently!"
fi
fi
done
done
}
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录