Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleOCR
提交
0f5a5d96
P
PaddleOCR
项目概览
PaddlePaddle
/
PaddleOCR
大约 1 年 前同步成功
通知
1528
Star
32962
Fork
6643
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
108
列表
看板
标记
里程碑
合并请求
7
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
108
Issue
108
列表
看板
标记
里程碑
合并请求
7
合并请求
7
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
0f5a5d96
编写于
9月 07, 2021
作者:
M
MissPenguin
提交者:
GitHub
9月 07, 2021
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #3656 from tink2123/service_for_ci
Service for ci
上级
5564e1e6
e0e7cdb1
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
311 addition
and
5 deletion
+311
-5
deploy/pdserving/web_service_det.py
deploy/pdserving/web_service_det.py
+77
-0
deploy/pdserving/web_service_rec.py
deploy/pdserving/web_service_rec.py
+86
-0
tests/ocr_det_params.txt
tests/ocr_det_params.txt
+15
-0
tests/ocr_rec_params.txt
tests/ocr_rec_params.txt
+3
-3
tests/prepare.sh
tests/prepare.sh
+15
-2
tests/test.sh
tests/test.sh
+115
-0
未找到文件。
deploy/pdserving/web_service_det.py
0 → 100644
浏览文件 @
0f5a5d96
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
paddle_serving_server.web_service
import
WebService
,
Op
import
logging
import
numpy
as
np
import
cv2
import
base64
# from paddle_serving_app.reader import OCRReader
from
ocr_reader
import
OCRReader
,
DetResizeForTest
from
paddle_serving_app.reader
import
Sequential
,
ResizeByFactor
from
paddle_serving_app.reader
import
Div
,
Normalize
,
Transpose
from
paddle_serving_app.reader
import
DBPostProcess
,
FilterBoxes
,
GetRotateCropImage
,
SortedBoxes
_LOGGER
=
logging
.
getLogger
()
class
DetOp
(
Op
):
def
init_op
(
self
):
self
.
det_preprocess
=
Sequential
([
DetResizeForTest
(),
Div
(
255
),
Normalize
([
0.485
,
0.456
,
0.406
],
[
0.229
,
0.224
,
0.225
]),
Transpose
(
(
2
,
0
,
1
))
])
self
.
filter_func
=
FilterBoxes
(
10
,
10
)
self
.
post_func
=
DBPostProcess
({
"thresh"
:
0.3
,
"box_thresh"
:
0.5
,
"max_candidates"
:
1000
,
"unclip_ratio"
:
1.5
,
"min_size"
:
3
})
def
preprocess
(
self
,
input_dicts
,
data_id
,
log_id
):
(
_
,
input_dict
),
=
input_dicts
.
items
()
data
=
base64
.
b64decode
(
input_dict
[
"image"
].
encode
(
'utf8'
))
self
.
raw_im
=
data
data
=
np
.
fromstring
(
data
,
np
.
uint8
)
# Note: class variables(self.var) can only be used in process op mode
im
=
cv2
.
imdecode
(
data
,
cv2
.
IMREAD_COLOR
)
self
.
ori_h
,
self
.
ori_w
,
_
=
im
.
shape
det_img
=
self
.
det_preprocess
(
im
)
_
,
self
.
new_h
,
self
.
new_w
=
det_img
.
shape
return
{
"x"
:
det_img
[
np
.
newaxis
,
:].
copy
()},
False
,
None
,
""
def
postprocess
(
self
,
input_dicts
,
fetch_dict
,
log_id
):
det_out
=
fetch_dict
[
"save_infer_model/scale_0.tmp_1"
]
ratio_list
=
[
float
(
self
.
new_h
)
/
self
.
ori_h
,
float
(
self
.
new_w
)
/
self
.
ori_w
]
dt_boxes_list
=
self
.
post_func
(
det_out
,
[
ratio_list
])
dt_boxes
=
self
.
filter_func
(
dt_boxes_list
[
0
],
[
self
.
ori_h
,
self
.
ori_w
])
out_dict
=
{
"dt_boxes"
:
str
(
dt_boxes
)}
return
out_dict
,
None
,
""
class
OcrService
(
WebService
):
def
get_pipeline_response
(
self
,
read_op
):
det_op
=
DetOp
(
name
=
"det"
,
input_ops
=
[
read_op
])
return
det_op
uci_service
=
OcrService
(
name
=
"ocr"
)
uci_service
.
prepare_pipeline_config
(
"config.yml"
)
uci_service
.
run_service
()
deploy/pdserving/web_service_rec.py
0 → 100644
浏览文件 @
0f5a5d96
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
paddle_serving_server.web_service
import
WebService
,
Op
import
logging
import
numpy
as
np
import
cv2
import
base64
# from paddle_serving_app.reader import OCRReader
from
ocr_reader
import
OCRReader
,
DetResizeForTest
from
paddle_serving_app.reader
import
Sequential
,
ResizeByFactor
from
paddle_serving_app.reader
import
Div
,
Normalize
,
Transpose
_LOGGER
=
logging
.
getLogger
()
class
RecOp
(
Op
):
def
init_op
(
self
):
self
.
ocr_reader
=
OCRReader
(
char_dict_path
=
"../../ppocr/utils/ppocr_keys_v1.txt"
)
def
preprocess
(
self
,
input_dicts
,
data_id
,
log_id
):
(
_
,
input_dict
),
=
input_dicts
.
items
()
raw_im
=
base64
.
b64decode
(
input_dict
[
"image"
].
encode
(
'utf8'
))
data
=
np
.
fromstring
(
raw_im
,
np
.
uint8
)
im
=
cv2
.
imdecode
(
data
,
cv2
.
IMREAD_COLOR
)
feed_list
=
[]
max_wh_ratio
=
0
## Many mini-batchs, the type of feed_data is list.
max_batch_size
=
6
# len(dt_boxes)
# If max_batch_size is 0, skipping predict stage
if
max_batch_size
==
0
:
return
{},
True
,
None
,
""
boxes_size
=
max_batch_size
rem
=
boxes_size
%
max_batch_size
h
,
w
=
im
.
shape
[
0
:
2
]
wh_ratio
=
w
*
1.0
/
h
max_wh_ratio
=
max
(
max_wh_ratio
,
wh_ratio
)
_
,
w
,
h
=
self
.
ocr_reader
.
resize_norm_img
(
im
,
max_wh_ratio
).
shape
norm_img
=
self
.
ocr_reader
.
resize_norm_img
(
im
,
max_batch_size
)
norm_img
=
norm_img
[
np
.
newaxis
,
:]
feed
=
{
"x"
:
norm_img
.
copy
()}
feed_list
.
append
(
feed
)
return
feed_list
,
False
,
None
,
""
def
postprocess
(
self
,
input_dicts
,
fetch_data
,
log_id
):
res_list
=
[]
if
isinstance
(
fetch_data
,
dict
):
if
len
(
fetch_data
)
>
0
:
rec_batch_res
=
self
.
ocr_reader
.
postprocess
(
fetch_data
,
with_score
=
True
)
for
res
in
rec_batch_res
:
res_list
.
append
(
res
[
0
])
elif
isinstance
(
fetch_data
,
list
):
for
one_batch
in
fetch_data
:
one_batch_res
=
self
.
ocr_reader
.
postprocess
(
one_batch
,
with_score
=
True
)
for
res
in
one_batch_res
:
res_list
.
append
(
res
[
0
])
res
=
{
"res"
:
str
(
res_list
)}
return
res
,
None
,
""
class
OcrService
(
WebService
):
def
get_pipeline_response
(
self
,
read_op
):
rec_op
=
RecOp
(
name
=
"rec"
,
input_ops
=
[
read_op
])
return
rec_op
uci_service
=
OcrService
(
name
=
"ocr"
)
uci_service
.
prepare_pipeline_config
(
"config.yml"
)
uci_service
.
run_service
()
tests/ocr_det_params.txt
浏览文件 @
0f5a5d96
...
...
@@ -64,4 +64,19 @@ inference:./deploy/cpp_infer/build/ppocr det
--image_dir:./inference/ch_det_data_50/all-sum-510/
--save_log_path:null
--benchmark:True
===========================serving_params===========================
trans_model:-m paddle_serving_client.convert
--dirname:./inference/ch_ppocr_mobile_v2.0_det_infer/
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/
--serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/
serving_dir:./deploy/pdserving
web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1
op.det.local_service_conf.devices:null|0
op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True
op.det.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_http_client.py --image_dir=../../doc/imgs
tests/ocr_rec_params.txt
浏览文件 @
0f5a5d96
===========================train_params===========================
model_name:ocr_rec
python:python3.7
gpu_list:0|
2,3
gpu_list:0|
0,1
Global.use_gpu:True|True
Global.auto_cast:null
Global.epoch_num:lite_train_infer=2|whole_train_infer=300
...
...
@@ -9,7 +9,7 @@ Global.save_model_dir:./output/
Train.loader.batch_size_per_card:lite_train_infer=128|whole_train_infer=128
Global.pretrained_model:null
train_model_name:latest
train_infer_img_dir:./
train_data/ic15_data/train
train_infer_img_dir:./
inference/rec_inference
null:null
##
trainer:norm_train|pact_train
...
...
@@ -41,7 +41,7 @@ inference:tools/infer/predict_rec.py
--use_gpu:True|False
--enable_mkldnn:True|False
--cpu_threads:1|6
--rec_batch_num:1
--rec_batch_num:1
|6
--use_tensorrt:True|False
--precision:fp32|fp16|int8
--rec_model_dir:
...
...
tests/prepare.sh
浏览文件 @
0f5a5d96
...
...
@@ -40,11 +40,13 @@ if [ ${MODE} = "lite_train_infer" ];then
rm
-rf
./train_data/ic15_data
wget
-nc
-P
./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar
wget
-nc
-P
./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ic15_data.tar
# todo change to bcebos
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar
wget
-nc
-P
./deploy/slim/prune https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/sen.pickle
cd
./train_data/
&&
tar
xf icdar2015_lite.tar
&&
tar
xf ic15_data.tar
ln
-s
./icdar2015_lite ./icdar2015
cd
../
cd
./inference
&&
tar
xf rec_inference.tar
&&
cd
../
elif
[
${
MODE
}
=
"whole_train_infer"
]
;
then
wget
-nc
-P
./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams
rm
-rf
./train_data/icdar2015
...
...
@@ -80,12 +82,23 @@ elif [ ${MODE} = "infer" ] || [ ${MODE} = "cpp_infer" ];then
else
rm
-rf
./train_data/ic15_data
eval_model_name
=
"ch_ppocr_mobile_v2.0_rec_infer"
wget
-nc
-P
./
train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ic15_data
.tar
wget
-nc
-P
./
inference/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference
.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar
cd
./inference
&&
tar
xf
${
eval_model_name
}
.tar
&&
tar
xf
ic15_data
.tar
&&
cd
../
cd
./inference
&&
tar
xf
${
eval_model_name
}
.tar
&&
tar
xf
rec_inference
.tar
&&
cd
../
fi
fi
# prepare serving env
python_name
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
wget https://paddle-serving.bj.bcebos.com/chain/paddle_serving_server_gpu-0.0.0.post101-py3-none-any.whl
${
python_name
}
-m
pip
install install
paddle_serving_server_gpu-0.0.0.post101-py3-none-any.whl
${
python_name
}
-m
pip
install
paddle_serving_client
==
0.6.1
${
python_name
}
-m
pip
install
paddle-serving-app
==
0.6.3
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar
cd
./inference
&&
tar
xf ch_ppocr_mobile_v2.0_det_infer.tar
&&
tar
xf ch_ppocr_mobile_v2.0_rec_infer.tar
if
[
${
MODE
}
=
"cpp_infer"
]
;
then
cd
deploy/cpp_infer
use_opencv
=
$(
func_parser_value
"
${
lines
[52]
}
"
)
...
...
tests/test.sh
浏览文件 @
0f5a5d96
...
...
@@ -144,6 +144,32 @@ benchmark_key=$(func_parser_key "${lines[49]}")
benchmark_value
=
$(
func_parser_value
"
${
lines
[49]
}
"
)
infer_key1
=
$(
func_parser_key
"
${
lines
[50]
}
"
)
infer_value1
=
$(
func_parser_value
"
${
lines
[50]
}
"
)
# parser serving
trans_model_py
=
$(
func_parser_value
"
${
lines
[67]
}
"
)
infer_model_dir_key
=
$(
func_parser_key
"
${
lines
[68]
}
"
)
infer_model_dir_value
=
$(
func_parser_value
"
${
lines
[68]
}
"
)
model_filename_key
=
$(
func_parser_key
"
${
lines
[69]
}
"
)
model_filename_value
=
$(
func_parser_value
"
${
lines
[69]
}
"
)
params_filename_key
=
$(
func_parser_key
"
${
lines
[70]
}
"
)
params_filename_value
=
$(
func_parser_value
"
${
lines
[70]
}
"
)
serving_server_key
=
$(
func_parser_key
"
${
lines
[71]
}
"
)
serving_server_value
=
$(
func_parser_value
"
${
lines
[71]
}
"
)
serving_client_key
=
$(
func_parser_key
"
${
lines
[72]
}
"
)
serving_client_value
=
$(
func_parser_value
"
${
lines
[72]
}
"
)
serving_dir_value
=
$(
func_parser_value
"
${
lines
[73]
}
"
)
web_service_py
=
$(
func_parser_value
"
${
lines
[74]
}
"
)
web_use_gpu_key
=
$(
func_parser_key
"
${
lines
[75]
}
"
)
web_use_gpu_list
=
$(
func_parser_value
"
${
lines
[75]
}
"
)
web_use_mkldnn_key
=
$(
func_parser_key
"
${
lines
[76]
}
"
)
web_use_mkldnn_list
=
$(
func_parser_value
"
${
lines
[76]
}
"
)
web_cpu_threads_key
=
$(
func_parser_key
"
${
lines
[77]
}
"
)
web_cpu_threads_list
=
$(
func_parser_value
"
${
lines
[77]
}
"
)
web_use_trt_key
=
$(
func_parser_key
"
${
lines
[78]
}
"
)
web_use_trt_list
=
$(
func_parser_value
"
${
lines
[78]
}
"
)
web_precision_key
=
$(
func_parser_key
"
${
lines
[79]
}
"
)
web_precision_list
=
$(
func_parser_value
"
${
lines
[79]
}
"
)
pipeline_py
=
$(
func_parser_value
"
${
lines
[80]
}
"
)
if
[
${
MODE
}
=
"cpp_infer"
]
;
then
# parser cpp inference model
...
...
@@ -244,6 +270,81 @@ function func_inference(){
fi
done
}
function
func_serving
(){
IFS
=
'|'
_python
=
$1
_script
=
$2
_model_dir
=
$3
# pdserving
set_dirname
=
$(
func_set_params
"
${
infer_model_dir_key
}
"
"
${
infer_model_dir_value
}
"
)
set_model_filename
=
$(
func_set_params
"
${
model_filename_key
}
"
"
${
model_filename_value
}
"
)
set_params_filename
=
$(
func_set_params
"
${
params_filename_key
}
"
"
${
params_filename_value
}
"
)
set_serving_server
=
$(
func_set_params
"
${
serving_server_key
}
"
"
${
serving_server_value
}
"
)
set_serving_client
=
$(
func_set_params
"
${
serving_client_key
}
"
"
${
serving_client_value
}
"
)
trans_model_cmd
=
"
${
python
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
"
eval
$trans_model_cmd
cd
${
serving_dir_value
}
echo
$PWD
unset
https_proxy
unset
http_proxy
for
use_gpu
in
${
web_use_gpu_list
[*]
}
;
do
echo
${
ues_gpu
}
if
[
${
use_gpu
}
=
"null"
]
;
then
for
use_mkldnn
in
${
web_use_mkldnn_list
[*]
}
;
do
if
[
${
use_mkldnn
}
=
"False"
]
;
then
continue
fi
for
threads
in
${
web_cpu_threads_list
[*]
}
;
do
_save_log_path
=
"
${
_log_path
}
/server_cpu_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_batchsize_1.log"
set_cpu_threads
=
$(
func_set_params
"
${
web_cpu_threads_key
}
"
"
${
threads
}
"
)
web_service_cmd
=
"
${
python
}
${
web_service_py
}
${
web_use_gpu_key
}
=
${
use_gpu
}
${
web_use_mkldnn_key
}
=
${
use_mkldnn
}
${
set_cpu_threads
}
&>
${
_save_log_path
}
&"
eval
$web_service_cmd
sleep
2s
pipeline_cmd
=
"
${
python
}
${
pipeline_py
}
"
eval
$pipeline_cmd
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
PID
=
$!
kill
$PID
sleep
2s
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
done
done
elif
[
${
use_gpu
}
=
"0"
]
;
then
for
use_trt
in
${
web_use_trt_list
[*]
}
;
do
for
precision
in
${
web_precision_list
[*]
}
;
do
if
[[
${
_flag_quant
}
=
"False"
]]
&&
[[
${
precision
}
=
~
"int8"
]]
;
then
continue
fi
if
[[
${
precision
}
=
~
"fp16"
||
${
precision
}
=
~
"int8"
]]
&&
[
${
use_trt
}
=
"False"
]
;
then
continue
fi
if
[[
${
use_trt
}
=
"Falg_quantse"
||
${
precision
}
=
~
"int8"
]]
;
then
continue
fi
_save_log_path
=
"
${
_log_path
}
/infer_gpu_usetrt_
${
use_trt
}
_precision_
${
precision
}
_batchsize_1.log"
set_tensorrt
=
$(
func_set_params
"
${
web_use_trt_key
}
"
"
${
use_trt
}
"
)
set_precision
=
$(
func_set_params
"
${
web_precision_key
}
"
"
${
precision
}
"
)
web_service_cmd
=
"
${
python
}
${
web_service_py
}
${
web_use_gpu_key
}
=
${
use_gpu
}
${
set_tensorrt
}
${
set_precision
}
&>
${
_save_log_path
}
& "
eval
$web_service_cmd
sleep
2s
pipeline_cmd
=
"
${
python
}
${
pipeline_py
}
"
eval
$pipeline_cmd
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
PID
=
$!
kill
$PID
sleep
2s
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
done
done
else
echo
"Does not support hardware other than CPU and GPU Currently!"
fi
done
}
function
func_cpp_inference
(){
IFS
=
'|'
...
...
@@ -360,6 +461,20 @@ elif [ ${MODE} = "cpp_infer" ]; then
func_cpp_inference
"
${
inference_cmd
}
"
"
${
infer_model
}
"
"
${
LOG_PATH
}
"
"
${
cpp_infer_img_dir
}
"
${
is_quant
}
Count
=
$((
$Count
+
1
))
done
elif
[
${
MODE
}
=
"serving_infer"
]
;
then
GPUID
=
$3
if
[
${#
GPUID
}
-le
0
]
;
then
env
=
" "
else
env
=
"export CUDA_VISIBLE_DEVICES=
${
GPUID
}
"
fi
# set CUDA_VISIBLE_DEVICES
eval
$env
export
Count
=
0
IFS
=
"|"
#run serving
func_serving
"
${
web_service_cmd
}
"
else
IFS
=
"|"
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录