Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleOCR
提交
455a21ca
P
PaddleOCR
项目概览
PaddlePaddle
/
PaddleOCR
大约 1 年 前同步成功
通知
1528
Star
32962
Fork
6643
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
108
列表
看板
标记
里程碑
合并请求
7
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleOCR
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
108
Issue
108
列表
看板
标记
里程碑
合并请求
7
合并请求
7
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
455a21ca
编写于
3月 15, 2022
作者:
T
tink2123
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
polish tipc serving
上级
35798e29
变更
9
隐藏空白更改
内联
并排
Showing
9 changed file
with
85 addition
and
75 deletion
+85
-75
deploy/pdserving/ocr_reader.py
deploy/pdserving/ocr_reader.py
+51
-0
deploy/pdserving/web_service_det.py
deploy/pdserving/web_service_det.py
+1
-51
deploy/pdserving/web_service_rec.py
deploy/pdserving/web_service_rec.py
+3
-3
test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
..._linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
+2
-2
test_tipc/configs/ch_ppocr_mobile_v2.0_rec/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
..._linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
+1
-1
test_tipc/configs/ch_ppocr_server_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
..._linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
+2
-2
test_tipc/configs/ch_ppocr_server_v2.0_rec/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
..._linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
+2
-2
test_tipc/prepare.sh
test_tipc/prepare.sh
+3
-4
test_tipc/test_serving.sh
test_tipc/test_serving.sh
+20
-10
未找到文件。
deploy/pdserving/ocr_reader.py
浏览文件 @
455a21ca
...
@@ -433,3 +433,54 @@ class OCRReader(object):
...
@@ -433,3 +433,54 @@ class OCRReader(object):
text
=
self
.
label_ops
.
decode
(
text
=
self
.
label_ops
.
decode
(
preds_idx
,
preds_prob
,
is_remove_duplicate
=
True
)
preds_idx
,
preds_prob
,
is_remove_duplicate
=
True
)
return
text
return
text
from
argparse
import
ArgumentParser
,
RawDescriptionHelpFormatter
import
yaml
class
ArgsParser
(
ArgumentParser
):
def
__init__
(
self
):
super
(
ArgsParser
,
self
).
__init__
(
formatter_class
=
RawDescriptionHelpFormatter
)
self
.
add_argument
(
"-c"
,
"--config"
,
help
=
"configuration file to use"
)
self
.
add_argument
(
"-o"
,
"--opt"
,
nargs
=
'+'
,
help
=
"set configuration options"
)
def
parse_args
(
self
,
argv
=
None
):
args
=
super
(
ArgsParser
,
self
).
parse_args
(
argv
)
assert
args
.
config
is
not
None
,
\
"Please specify --config=configure_file_path."
args
.
conf_dict
=
self
.
_parse_opt
(
args
.
opt
,
args
.
config
)
print
(
"args config:"
,
args
.
conf_dict
)
return
args
def
_parse_helper
(
self
,
v
):
if
v
.
isnumeric
():
if
"."
in
v
:
v
=
float
(
v
)
else
:
v
=
int
(
v
)
elif
v
==
"True"
or
v
==
"False"
:
v
=
(
v
==
"True"
)
return
v
def
_parse_opt
(
self
,
opts
,
conf_path
):
f
=
open
(
conf_path
)
config
=
yaml
.
load
(
f
,
Loader
=
yaml
.
Loader
)
if
not
opts
:
return
config
for
s
in
opts
:
s
=
s
.
strip
()
k
,
v
=
s
.
split
(
'='
)
v
=
self
.
_parse_helper
(
v
)
print
(
k
,
v
,
type
(
v
))
cur
=
config
parent
=
cur
for
kk
in
k
.
split
(
"."
):
if
kk
not
in
cur
:
cur
[
kk
]
=
{}
parent
=
cur
cur
=
cur
[
kk
]
else
:
parent
=
cur
cur
=
cur
[
kk
]
parent
[
k
.
split
(
"."
)[
-
1
]]
=
v
return
config
\ No newline at end of file
deploy/pdserving/web_service_det.py
浏览文件 @
455a21ca
...
@@ -18,63 +18,13 @@ import numpy as np
...
@@ -18,63 +18,13 @@ import numpy as np
import
cv2
import
cv2
import
base64
import
base64
# from paddle_serving_app.reader import OCRReader
# from paddle_serving_app.reader import OCRReader
from
ocr_reader
import
OCRReader
,
DetResizeForTest
from
ocr_reader
import
OCRReader
,
DetResizeForTest
,
ArgsParser
from
paddle_serving_app.reader
import
Sequential
,
ResizeByFactor
from
paddle_serving_app.reader
import
Sequential
,
ResizeByFactor
from
paddle_serving_app.reader
import
Div
,
Normalize
,
Transpose
from
paddle_serving_app.reader
import
Div
,
Normalize
,
Transpose
from
paddle_serving_app.reader
import
DBPostProcess
,
FilterBoxes
,
GetRotateCropImage
,
SortedBoxes
from
paddle_serving_app.reader
import
DBPostProcess
,
FilterBoxes
,
GetRotateCropImage
,
SortedBoxes
import
yaml
from
argparse
import
ArgumentParser
,
RawDescriptionHelpFormatter
_LOGGER
=
logging
.
getLogger
()
_LOGGER
=
logging
.
getLogger
()
class
ArgsParser
(
ArgumentParser
):
def
__init__
(
self
):
super
(
ArgsParser
,
self
).
__init__
(
formatter_class
=
RawDescriptionHelpFormatter
)
self
.
add_argument
(
"-c"
,
"--config"
,
help
=
"configuration file to use"
)
self
.
add_argument
(
"-o"
,
"--opt"
,
nargs
=
'+'
,
help
=
"set configuration options"
)
def
parse_args
(
self
,
argv
=
None
):
args
=
super
(
ArgsParser
,
self
).
parse_args
(
argv
)
assert
args
.
config
is
not
None
,
\
"Please specify --config=configure_file_path."
args
.
conf_dict
=
self
.
_parse_opt
(
args
.
opt
,
args
.
config
)
return
args
def
_parse_helper
(
self
,
v
):
if
v
.
isnumeric
():
if
"."
in
v
:
v
=
float
(
v
)
else
:
v
=
int
(
v
)
elif
v
==
"True"
or
v
==
"False"
:
v
=
(
v
==
"True"
)
return
v
def
_parse_opt
(
self
,
opts
,
conf_path
):
f
=
open
(
conf_path
)
config
=
yaml
.
load
(
f
,
Loader
=
yaml
.
Loader
)
if
not
opts
:
return
config
for
s
in
opts
:
s
=
s
.
strip
()
k
,
v
=
s
.
split
(
'='
)
v
=
self
.
_parse_helper
(
v
)
print
(
k
,
v
,
type
(
v
))
cur
=
config
parent
=
cur
for
kk
in
k
.
split
(
"."
):
if
kk
not
in
cur
:
cur
[
kk
]
=
{}
parent
=
cur
cur
=
cur
[
kk
]
else
:
parent
=
cur
cur
=
cur
[
kk
]
parent
[
k
.
split
(
"."
)[
-
1
]]
=
v
return
config
class
DetOp
(
Op
):
class
DetOp
(
Op
):
def
init_op
(
self
):
def
init_op
(
self
):
...
...
deploy/pdserving/web_service_rec.py
浏览文件 @
455a21ca
...
@@ -18,10 +18,9 @@ import numpy as np
...
@@ -18,10 +18,9 @@ import numpy as np
import
cv2
import
cv2
import
base64
import
base64
# from paddle_serving_app.reader import OCRReader
# from paddle_serving_app.reader import OCRReader
from
ocr_reader
import
OCRReader
,
DetResizeForTest
from
ocr_reader
import
OCRReader
,
DetResizeForTest
,
ArgsParser
from
paddle_serving_app.reader
import
Sequential
,
ResizeByFactor
from
paddle_serving_app.reader
import
Sequential
,
ResizeByFactor
from
paddle_serving_app.reader
import
Div
,
Normalize
,
Transpose
from
paddle_serving_app.reader
import
Div
,
Normalize
,
Transpose
from
web_service_det
import
ArgsParser
_LOGGER
=
logging
.
getLogger
()
_LOGGER
=
logging
.
getLogger
()
...
@@ -79,10 +78,11 @@ class RecOp(Op):
...
@@ -79,10 +78,11 @@ class RecOp(Op):
class
OcrService
(
WebService
):
class
OcrService
(
WebService
):
def
get_pipeline_response
(
self
,
read_op
):
def
get_pipeline_response
(
self
,
read_op
):
rec_op
=
RecOp
(
name
=
"rec"
,
input_ops
=
[
read_op
])
rec_op
=
RecOp
(
name
=
"rec"
,
input_ops
=
[
read_op
])
print
(
"rec op:"
,
rec_op
)
return
rec_op
return
rec_op
uci_service
=
OcrService
(
name
=
"ocr"
)
uci_service
=
OcrService
(
name
=
"ocr"
)
FLAGS
=
ArgsParser
().
parse_args
()
FLAGS
=
ArgsParser
().
parse_args
()
uci_service
.
prepare_pipeline_config
(
yml_dict
=
FLAGS
.
conf_dict
)
uci_service
.
prepare_pipeline_config
(
yml_dict
=
FLAGS
.
conf_dict
)
uci_service
.
run_service
()
uci_service
.
run_service
()
\ No newline at end of file
test_tipc/configs/ch_ppocr_mobile_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
浏览文件 @
455a21ca
...
@@ -8,8 +8,8 @@ trans_model:-m paddle_serving_client.convert
...
@@ -8,8 +8,8 @@ trans_model:-m paddle_serving_client.convert
--serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/
--serving_server:./deploy/pdserving/ppocr_det_mobile_2.0_serving/
--serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/
--serving_client:./deploy/pdserving/ppocr_det_mobile_2.0_client/
serving_dir:./deploy/pdserving
serving_dir:./deploy/pdserving
web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=
1
web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=
"1"
op.det.local_service_conf.devices:
null|0
op.det.local_service_conf.devices:
"0"|null
op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True
op.det.local_service_conf.use_trt:False|True
...
...
test_tipc/configs/ch_ppocr_mobile_v2.0_rec/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
浏览文件 @
455a21ca
...
@@ -9,7 +9,7 @@ trans_model:-m paddle_serving_client.convert
...
@@ -9,7 +9,7 @@ trans_model:-m paddle_serving_client.convert
--serving_client:./deploy/pdserving/ppocr_rec_mobile_2.0_client/
--serving_client:./deploy/pdserving/ppocr_rec_mobile_2.0_client/
serving_dir:./deploy/pdserving
serving_dir:./deploy/pdserving
web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1
web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1
op.rec.local_service_conf.devices:
null|0
op.rec.local_service_conf.devices:
"0"|null
op.rec.local_service_conf.use_mkldnn:True|False
op.rec.local_service_conf.use_mkldnn:True|False
op.rec.local_service_conf.thread_num:1|6
op.rec.local_service_conf.thread_num:1|6
op.rec.local_service_conf.use_trt:False|True
op.rec.local_service_conf.use_trt:False|True
...
...
test_tipc/configs/ch_ppocr_server_v2.0_det/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
浏览文件 @
455a21ca
...
@@ -9,10 +9,10 @@ trans_model:-m paddle_serving_client.convert
...
@@ -9,10 +9,10 @@ trans_model:-m paddle_serving_client.convert
--serving_client:./deploy/pdserving/ppocr_det_server_2.0_client/
--serving_client:./deploy/pdserving/ppocr_det_server_2.0_client/
serving_dir:./deploy/pdserving
serving_dir:./deploy/pdserving
web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1
web_service:web_service_det.py --config=config.yml --opt op.det.concurrency=1
op.det.local_service_conf.devices:
null|0
op.det.local_service_conf.devices:
"0"|null
op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.use_mkldnn:True|False
op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.thread_num:1|6
op.det.local_service_conf.use_trt:False|True
op.det.local_service_conf.use_trt:False|True
op.det.local_service_conf.precision:fp32|fp16|int8
op.det.local_service_conf.precision:fp32|fp16|int8
pipline:pipeline_rpc_client.py|pipeline_http_client.py
pipline:pipeline_rpc_client.py|pipeline_http_client.py
--image_dir:../../doc/imgs_words_en
--image_dir:../../doc/imgs
\ No newline at end of file
\ No newline at end of file
test_tipc/configs/ch_ppocr_server_v2.0_rec/model_linux_gpu_normal_normal_serving_python_linux_gpu_cpu.txt
浏览文件 @
455a21ca
===========================serving_params===========================
===========================serving_params===========================
model_name:ocr_rec_server
model_name:ocr_rec_server
python:python3.7
python:python3.7
|cpp
trans_model:-m paddle_serving_client.convert
trans_model:-m paddle_serving_client.convert
--dirname:./inference/ch_ppocr_server_v2.0_rec_infer/
--dirname:./inference/ch_ppocr_server_v2.0_rec_infer/
--model_filename:inference.pdmodel
--model_filename:inference.pdmodel
...
@@ -9,7 +9,7 @@ trans_model:-m paddle_serving_client.convert
...
@@ -9,7 +9,7 @@ trans_model:-m paddle_serving_client.convert
--serving_client:./deploy/pdserving/ppocr_rec_server_2.0_client/
--serving_client:./deploy/pdserving/ppocr_rec_server_2.0_client/
serving_dir:./deploy/pdserving
serving_dir:./deploy/pdserving
web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1
web_service:web_service_rec.py --config=config.yml --opt op.rec.concurrency=1
op.rec.local_service_conf.devices:
null|0
op.rec.local_service_conf.devices:
"0"|null
op.rec.local_service_conf.use_mkldnn:True|False
op.rec.local_service_conf.use_mkldnn:True|False
op.rec.local_service_conf.thread_num:1|6
op.rec.local_service_conf.thread_num:1|6
op.rec.local_service_conf.use_trt:False|True
op.rec.local_service_conf.use_trt:False|True
...
...
test_tipc/prepare.sh
浏览文件 @
455a21ca
...
@@ -308,10 +308,9 @@ if [ ${MODE} = "serving_infer" ];then
...
@@ -308,10 +308,9 @@ if [ ${MODE} = "serving_infer" ];then
IFS
=
'|'
IFS
=
'|'
array
=(
${
python_name_list
}
)
array
=(
${
python_name_list
}
)
python_name
=
${
array
[0]
}
python_name
=
${
array
[0]
}
wget
-nc
https://paddle-serving.bj.bcebos.com/chain/paddle_serving_server_gpu-0.0.0.post101-py3-none-any.whl
${
python_name
}
-m
pip
install
paddle-serving-server-gpu
==
0.8.3.post101
${
python_name
}
-m
pip
install install
paddle_serving_server_gpu-0.0.0.post101-py3-none-any.whl
${
python_name
}
-m
pip
install
paddle_serving_client
==
0.8.3
${
python_name
}
-m
pip
install
paddle_serving_client
==
0.6.1
${
python_name
}
-m
pip
install
paddle-serving-app
==
0.8.3
${
python_name
}
-m
pip
install
paddle-serving-app
==
0.6.3
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar
wget
-nc
-P
./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar
...
...
test_tipc/test_serving.sh
浏览文件 @
455a21ca
...
@@ -62,25 +62,30 @@ function func_serving(){
...
@@ -62,25 +62,30 @@ function func_serving(){
unset
https_proxy
unset
https_proxy
unset
http_proxy
unset
http_proxy
for
python
in
${
python_list
[*]
}
;
do
for
python
in
${
python_list
[*]
}
;
do
echo
${
python
}
if
[
${
python
}
=
"cpp"
]
;
then
if
[
${
python
}
=
"cpp"
]
;
then
for
use_gpu
in
${
web_use_gpu_list
[*]
}
;
do
for
use_gpu
in
${
web_use_gpu_list
[*]
}
;
do
if
[
${
use_gpu
}
=
"null"
]
;
then
if
[
${
use_gpu
}
=
"null"
]
;
then
web_service_cpp_cmd
=
"
${
python
}
-m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293"
web_service_cpp_cmd
=
"
${
python_list
[0]
}
-m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293"
eval
$web_service_cmd
eval
$web_service_cpp_cmd
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
web_service_cpp_cmd
}
"
"
${
status_log
}
"
sleep
2s
sleep
2s
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd
=
"
${
python
}
ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
pipeline_cmd
=
"
${
python
_list
[0]
}
ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval
$pipeline_cmd
eval
$pipeline_cmd
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
sleep
2s
sleep
2s
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
else
else
web_service_cpp_cmd
=
"
${
python
}
-m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293 --gpu_id=0"
web_service_cpp_cmd
=
"
${
python
_list
[0]
}
-m paddle_serving_server.serve --model ppocr_det_mobile_2.0_serving/ ppocr_rec_mobile_2.0_serving/ --port 9293 --gpu_id=0"
eval
$web_service_cmd
eval
$web_service_c
pp_c
md
sleep
2s
sleep
2s
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd
=
"
${
python
}
ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
pipeline_cmd
=
"
${
python
_list
[0]
}
ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval
$pipeline_cmd
eval
$pipeline_cmd
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
sleep
2s
sleep
2s
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
...
@@ -88,14 +93,17 @@ function func_serving(){
...
@@ -88,14 +93,17 @@ function func_serving(){
done
done
else
else
# python serving
# python serving
echo
${
web_use_gpu_list
[*]
}
for
use_gpu
in
${
web_use_gpu_list
[*]
}
;
do
for
use_gpu
in
${
web_use_gpu_list
[*]
}
;
do
echo
${
ues_gpu
}
echo
${
ues_gpu
}
if
[
${
use_gpu
}
=
"null"
]
;
then
if
[
${
use_gpu
}
=
"null"
]
;
then
for
use_mkldnn
in
${
web_use_mkldnn_list
[*]
}
;
do
for
use_mkldnn
in
${
web_use_mkldnn_list
[*]
}
;
do
for
threads
in
${
web_cpu_threads_list
[*]
}
;
do
for
threads
in
${
web_cpu_threads_list
[*]
}
;
do
set_cpu_threads
=
$(
func_set_params
"
${
web_cpu_threads_key
}
"
"
${
threads
}
"
)
set_cpu_threads
=
$(
func_set_params
"
${
web_cpu_threads_key
}
"
"
${
threads
}
"
)
web_service_cmd
=
"
${
python
}
${
web_service_py
}
${
web_use_gpu_key
}
=
${
use_gpu
}
${
web_use_mkldnn_key
}
=
${
use_mkldnn
}
${
set_cpu_threads
}
&"
web_service_cmd
=
"
${
python
}
${
web_service_py
}
${
web_use_gpu_key
}
=
""
${
web_use_mkldnn_key
}
=
${
use_mkldnn
}
${
set_cpu_threads
}
&"
eval
$web_service_cmd
eval
$web_service_cmd
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
sleep
2s
sleep
2s
for
pipeline
in
${
pipeline_py
[*]
}
;
do
for
pipeline
in
${
pipeline_py
[*]
}
;
do
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpu_
${
pipeline
%_client*
}
_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_batchsize_1.log"
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpu_
${
pipeline
%_client*
}
_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_batchsize_1.log"
...
@@ -128,6 +136,8 @@ function func_serving(){
...
@@ -128,6 +136,8 @@ function func_serving(){
set_precision
=
$(
func_set_params
"
${
web_precision_key
}
"
"
${
precision
}
"
)
set_precision
=
$(
func_set_params
"
${
web_precision_key
}
"
"
${
precision
}
"
)
web_service_cmd
=
"
${
python
}
${
web_service_py
}
${
web_use_gpu_key
}
=
${
use_gpu
}
${
set_tensorrt
}
${
set_precision
}
& "
web_service_cmd
=
"
${
python
}
${
web_service_py
}
${
web_use_gpu_key
}
=
${
use_gpu
}
${
set_tensorrt
}
${
set_precision
}
& "
eval
$web_service_cmd
eval
$web_service_cmd
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
sleep
2s
sleep
2s
for
pipeline
in
${
pipeline_py
[*]
}
;
do
for
pipeline
in
${
pipeline_py
[*]
}
;
do
...
@@ -151,15 +161,15 @@ function func_serving(){
...
@@ -151,15 +161,15 @@ function func_serving(){
}
}
#
set cuda device
#set cuda device
GPUID
=
$2
GPUID
=
$2
if
[
${#
GPUID
}
-le
0
]
;
then
if
[
${#
GPUID
}
-le
0
]
;
then
env
=
"
"
env
=
"
export CUDA_VISIBLE_DEVICES=0
"
else
else
env
=
"export CUDA_VISIBLE_DEVICES=
${
GPUID
}
"
env
=
"export CUDA_VISIBLE_DEVICES=
${
GPUID
}
"
fi
fi
set
CUDA_VISIBLE_DEVICES
eval
$env
eval
$env
echo
$env
echo
"################### run test ###################"
echo
"################### run test ###################"
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录