Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleOCR
提交
c2aaed44
P
PaddleOCR
项目概览
PaddlePaddle
/
PaddleOCR
大约 1 年 前同步成功
通知
1528
Star
32962
Fork
6643
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
108
列表
看板
标记
里程碑
合并请求
7
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
108
Issue
108
列表
看板
标记
里程碑
合并请求
7
合并请求
7
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
c2aaed44
编写于
3月 15, 2022
作者:
X
xiaoting
提交者:
GitHub
3月 15, 2022
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #5711 from tink2123/fix_tipc_serving
polish tipc serving
上级
9c2c5e80
30037fc4
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
81 addition
and
76 deletion
+81
-76
deploy/pdserving/ocr_reader.py
deploy/pdserving/ocr_reader.py
+51
-0
deploy/pdserving/web_service_det.py
deploy/pdserving/web_service_det.py
+1
-51
deploy/pdserving/web_service_rec.py
deploy/pdserving/web_service_rec.py
+1
-2
test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
..._linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
+2
-2
test_tipc/configs/ch_ppocr_mobile_v2.0_rec/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
..._linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
+1
-1
test_tipc/configs/ch_ppocr_server_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
..._linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
+2
-2
test_tipc/configs/ch_ppocr_server_v2.0_rec/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
..._linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
+2
-2
test_tipc/prepare.sh
test_tipc/prepare.sh
+3
-4
test_tipc/test_serving.sh
test_tipc/test_serving.sh
+18
-12
未找到文件。
deploy/pdserving/ocr_reader.py
浏览文件 @
c2aaed44
...
...
@@ -433,3 +433,54 @@ class OCRReader(object):
text
=
self
.
label_ops
.
decode
(
preds_idx
,
preds_prob
,
is_remove_duplicate
=
True
)
return
text
from
argparse
import
ArgumentParser
,
RawDescriptionHelpFormatter
import
yaml
class
ArgsParser
(
ArgumentParser
):
def
__init__
(
self
):
super
(
ArgsParser
,
self
).
__init__
(
formatter_class
=
RawDescriptionHelpFormatter
)
self
.
add_argument
(
"-c"
,
"--config"
,
help
=
"configuration file to use"
)
self
.
add_argument
(
"-o"
,
"--opt"
,
nargs
=
'+'
,
help
=
"set configuration options"
)
def
parse_args
(
self
,
argv
=
None
):
args
=
super
(
ArgsParser
,
self
).
parse_args
(
argv
)
assert
args
.
config
is
not
None
,
\
"Please specify --config=configure_file_path."
args
.
conf_dict
=
self
.
_parse_opt
(
args
.
opt
,
args
.
config
)
print
(
"args config:"
,
args
.
conf_dict
)
return
args
def
_parse_helper
(
self
,
v
):
if
v
.
isnumeric
():
if
"."
in
v
:
v
=
float
(
v
)
else
:
v
=
int
(
v
)
elif
v
==
"True"
or
v
==
"False"
:
v
=
(
v
==
"True"
)
return
v
def
_parse_opt
(
self
,
opts
,
conf_path
):
f
=
open
(
conf_path
)
config
=
yaml
.
load
(
f
,
Loader
=
yaml
.
Loader
)
if
not
opts
:
return
config
for
s
in
opts
:
s
=
s
.
strip
()
k
,
v
=
s
.
split
(
'='
)
v
=
self
.
_parse_helper
(
v
)
print
(
k
,
v
,
type
(
v
))
cur
=
config
parent
=
cur
for
kk
in
k
.
split
(
"."
):
if
kk
not
in
cur
:
cur
[
kk
]
=
{}
parent
=
cur
cur
=
cur
[
kk
]
else
:
parent
=
cur
cur
=
cur
[
kk
]
parent
[
k
.
split
(
"."
)[
-
1
]]
=
v
return
config
\ No newline at end of file
deploy/pdserving/web_service_det.py
浏览文件 @
c2aaed44
...
...
@@ -18,63 +18,13 @@ import numpy as np
import
cv2
import
base64
# from paddle_serving_app.reader import OCRReader
from
ocr_reader
import
OCRReader
,
DetResizeForTest
from
ocr_reader
import
OCRReader
,
DetResizeForTest
,
ArgsParser
from
paddle_serving_app.reader
import
Sequential
,
ResizeByFactor
from
paddle_serving_app.reader
import
Div
,
Normalize
,
Transpose
from
paddle_serving_app.reader
import
DBPostProcess
,
FilterBoxes
,
GetRotateCropImage
,
SortedBoxes
import
yaml
from
argparse
import
ArgumentParser
,
RawDescriptionHelpFormatter
_LOGGER
=
logging
.
getLogger
()
class
ArgsParser
(
ArgumentParser
):
def
__init__
(
self
):
super
(
ArgsParser
,
self
).
__init__
(
formatter_class
=
RawDescriptionHelpFormatter
)
self
.
add_argument
(
"-c"
,
"--config"
,
help
=
"configuration file to use"
)
self
.
add_argument
(
"-o"
,
"--opt"
,
nargs
=
'+'
,
help
=
"set configuration options"
)
def
parse_args
(
self
,
argv
=
None
):
args
=
super
(
ArgsParser
,
self
).
parse_args
(
argv
)
assert
args
.
config
is
not
None
,
\
"Please specify --config=configure_file_path."
args
.
conf_dict
=
self
.
_parse_opt
(
args
.
opt
,
args
.
config
)
return
args
def
_parse_helper
(
self
,
v
):
if
v
.
isnumeric
():
if
"."
in
v
:
v
=
float
(
v
)
else
:
v
=
int
(
v
)
elif
v
==
"True"
or
v
==
"False"
:
v
=
(
v
==
"True"
)
return
v
def
_parse_opt
(
self
,
opts
,
conf_path
):
f
=
open
(
conf_path
)
config
=
yaml
.
load
(
f
,
Loader
=
yaml
.
Loader
)
if
not
opts
:
return
config
for
s
in
opts
:
s
=
s
.
strip
()
k
,
v
=
s
.
split
(
'='
)
v
=
self
.
_parse_helper
(
v
)
print
(
k
,
v
,
type
(
v
))
cur
=
config
parent
=
cur
for
kk
in
k
.
split
(
"."
):
if
kk
not
in
cur
:
cur
[
kk
]
=
{}
parent
=
cur
cur
=
cur
[
kk
]
else
:
parent
=
cur
cur
=
cur
[
kk
]
parent
[
k
.
split
(
"."
)[
-
1
]]
=
v
return
config
class
DetOp
(
Op
):
def
init_op
(
self
):
...
...
deploy/pdserving/web_service_rec.py
浏览文件 @
c2aaed44
...
...
@@ -18,10 +18,9 @@ import numpy as np
import
cv2
import
base64
# from paddle_serving_app.reader import OCRReader
from
ocr_reader
import
OCRReader
,
DetResizeForTest
from
ocr_reader
import
OCRReader
,
DetResizeForTest
,
ArgsParser
from
paddle_serving_app.reader
import
Sequential
,
ResizeByFactor
from
paddle_serving_app.reader
import
Div
,
Normalize
,
Transpose
from
web_service_det
import
ArgsParser
_LOGGER
=
logging
.
getLogger
()
...
...
test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
浏览文件 @
c2aaed44
...
...
@@ -8,8 +8,8 @@ trans_model:-m paddle_serving_client.convert
--serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/
--serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/
serving_dir:./deploy/pdserving
web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=
1
op.det.local_service_conf.devices:
null|0
web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=
"1"
op.det.local_service_conf.devices:
"0"|null
op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True
...
...
test_tipc/configs/ch_ppocr_mobile_v2.0_rec/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
浏览文件 @
c2aaed44
...
...
@@ -9,7 +9,7 @@ trans_model:-m paddle_serving_client.convert
--serving_client:./deploy/pdserving/ppocr_rec_mobile_2.0_client/
serving_dir:./deploy/pdserving
web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1
op.rec.local_service_conf.devices:
null|0
op.rec.local_service_conf.devices:
"0"|null
op.rec.local_service_conf.use_mkldnn:True|False
op.rec.local_service_conf.thread_num:1|6
op.rec.local_service_conf.use_trt:False|True
...
...
test_tipc/configs/ch_ppocr_server_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
浏览文件 @
c2aaed44
...
...
@@ -9,10 +9,10 @@ trans_model:-m paddle_serving_client.convert
--serving_client:./deploy/pdserving/ppocr_det_server_2.0_client/
serving_dir:./deploy/pdserving
web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1
op.det.local_service_conf.devices:
null|0
op.det.local_service_conf.devices:
"0"|null
op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True
op.det.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_rpc_client.py|pipeline_http_client.py
--image_dir:../../doc/imgs_words_en
\ No newline at end of file
--image_dir:../../doc/imgs
\ No newline at end of file
test_tipc/configs/ch_ppocr_server_v2.0_rec/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
浏览文件 @
c2aaed44
===========================serving_params===========================
model_name:ocr_rec_server
python:python3.7
python:python3.7
|cpp
trans_model:-m paddle_serving_client.convert
--dirname:./inference/ch_ppocr_server_v2.0_rec_infer/
--model_filename:inference.pdmodel
...
...
@@ -9,7 +9,7 @@ trans_model:-m paddle_serving_client.convert
--serving_client:./deploy/pdserving/ppocr_rec_server_2.0_client/
serving_dir:./deploy/pdserving
web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1
op.rec.local_service_conf.devices:
null|0
op.rec.local_service_conf.devices:
"0"|null
op.rec.local_service_conf.use_mkldnn:True|False
op.rec.local_service_conf.thread_num:1|6
op.rec.local_service_conf.use_trt:False|True
...
...
test_tipc/prepare.sh
浏览文件 @
c2aaed44
...
...
@@ -308,10 +308,9 @@ if [ ${MODE} = "serving_infer" ];then
IFS
=
'|'
array
=(
${
python_name_list
}
)
python_name
=
${
array
[0]
}
wget
-nc
https://paddle-serving.bj.bcebos.com/chain/paddle_serving_server_gpu-0.0.0.post101-py3-none-any.whl
${
python_name
}
-m
pip
install install
paddle_serving_server_gpu-0.0.0.post101-py3-none-any.whl
${
python_name
}
-m
pip
install
paddle_serving_client
==
0.6.1
${
python_name
}
-m
pip
install
paddle-serving-app
==
0.6.3
${
python_name
}
-m
pip
install
paddle-serving-server-gpu
==
0.8.3.post101
${
python_name
}
-m
pip
install
paddle_serving_client
==
0.8.3
${
python_name
}
-m
pip
install
paddle-serving-app
==
0.8.3
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar
...
...
test_tipc/test_serving.sh
浏览文件 @
c2aaed44
...
...
@@ -58,29 +58,32 @@ function func_serving(){
trans_model_cmd
=
"
${
python_list
[0]
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
"
eval
$trans_model_cmd
cd
${
serving_dir_value
}
echo
$PWD
unset
https_proxy
unset
http_proxy
for
python
in
${
python_list
[*]
}
;
do
if
[
${
python
}
=
"cpp"
]
;
then
for
use_gpu
in
${
web_use_gpu_list
[*]
}
;
do
if
[
${
use_gpu
}
=
"null"
]
;
then
web_service_cpp_cmd
=
"
${
python
}
-m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293"
eval
$web_service_cmd
web_service_cpp_cmd
=
"
${
python_list
[0]
}
-m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293"
eval
$web_service_cpp_cmd
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
web_service_cpp_cmd
}
"
"
${
status_log
}
"
sleep
2s
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd
=
"
${
python
}
ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
pipeline_cmd
=
"
${
python
_list
[0]
}
ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval
$pipeline_cmd
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
sleep
2s
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
else
web_service_cpp_cmd
=
"
${
python
}
-m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293 --gpu_id=0"
eval
$web_service_cmd
web_service_cpp_cmd
=
"
${
python
_list
[0]
}
-m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293 --gpu_id=0"
eval
$web_service_c
pp_c
md
sleep
2s
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd
=
"
${
python
}
ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
pipeline_cmd
=
"
${
python
_list
[0]
}
ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval
$pipeline_cmd
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
sleep
2s
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
...
...
@@ -89,13 +92,14 @@ function func_serving(){
else
# python serving
for
use_gpu
in
${
web_use_gpu_list
[*]
}
;
do
echo
${
ues_gpu
}
if
[
${
use_gpu
}
=
"null"
]
;
then
for
use_mkldnn
in
${
web_use_mkldnn_list
[*]
}
;
do
for
threads
in
${
web_cpu_threads_list
[*]
}
;
do
set_cpu_threads
=
$(
func_set_params
"
${
web_cpu_threads_key
}
"
"
${
threads
}
"
)
web_service_cmd
=
"
${
python
}
${
web_service_py
}
${
web_use_gpu_key
}
=
${
use_gpu
}
${
web_use_mkldnn_key
}
=
${
use_mkldnn
}
${
set_cpu_threads
}
&"
web_service_cmd
=
"
${
python
}
${
web_service_py
}
${
web_use_gpu_key
}
=
""
${
web_use_mkldnn_key
}
=
${
use_mkldnn
}
${
set_cpu_threads
}
&"
eval
$web_service_cmd
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
sleep
2s
for
pipeline
in
${
pipeline_py
[*]
}
;
do
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpu_
${
pipeline
%_client*
}
_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_batchsize_1.log"
...
...
@@ -128,6 +132,8 @@ function func_serving(){
set_precision
=
$(
func_set_params
"
${
web_precision_key
}
"
"
${
precision
}
"
)
web_service_cmd
=
"
${
python
}
${
web_service_py
}
${
web_use_gpu_key
}
=
${
use_gpu
}
${
set_tensorrt
}
${
set_precision
}
& "
eval
$web_service_cmd
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
sleep
2s
for
pipeline
in
${
pipeline_py
[*]
}
;
do
...
...
@@ -151,15 +157,15 @@ function func_serving(){
}
#
set cuda device
#set cuda device
GPUID
=
$2
if
[
${#
GPUID
}
-le
0
]
;
then
env
=
"
"
env
=
"
export CUDA_VISIBLE_DEVICES=0
"
else
env
=
"export CUDA_VISIBLE_DEVICES=
${
GPUID
}
"
fi
set
CUDA_VISIBLE_DEVICES
eval
$env
echo
$env
echo
"################### run test ###################"
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录