Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
f27a9cf2
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
大约 1 年 前同步成功
通知
115
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
f27a9cf2
编写于
6月 12, 2022
作者:
H
HydrogenSulfate
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add cpp serving infer(except PPShiTu)
上级
ac821bbe
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
52 addition
and
36 deletion
+52
-36
deploy/paddleserving/preprocess/serving_client_conf.prototxt
deploy/paddleserving/preprocess/serving_client_conf.prototxt
+14
-0
test_tipc/prepare.sh
test_tipc/prepare.sh
+5
-4
test_tipc/test_serving_infer.sh
test_tipc/test_serving_infer.sh
+33
-32
未找到文件。
deploy/paddleserving/preprocess/serving_client_conf.prototxt
0 → 100644
浏览文件 @
f27a9cf2
feed_var {
name: "inputs"
alias_name: "inputs"
is_lod_tensor: false
feed_type: 20
shape: 1
}
fetch_var {
name: "save_infer_model/scale_0.tmp_1"
alias_name: "prediction"
is_lod_tensor: false
fetch_type: 1
shape: 1000
}
\ No newline at end of file
test_tipc/prepare.sh
浏览文件 @
f27a9cf2
...
...
@@ -199,15 +199,16 @@ fi
if
[[
${
MODE
}
=
"serving_infer"
]]
;
then
# prepare serving env
${
python_name
}
-m
pip
install
paddle_serving_client
==
0.9.0
${
python_name
}
-m
pip
install
paddle-serving-app
==
0.9.0
python_name
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
${
python_name
}
-m
pip
install
paddle_serving_client
==
0.9.0
-i
https://pypi.tuna.tsinghua.edu.cn/simple
${
python_name
}
-m
pip
install
paddle-serving-app
==
0.9.0
-i
https://pypi.tuna.tsinghua.edu.cn/simple
python_name
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
if
[[
${
FILENAME
}
=
~
"cpp"
]]
;
then
pushd
./deploy/paddleserving
bash build_server.sh
# bash build_server.sh ${python_name}
popd
else
${
python_name
}
-m
pip
install install
paddle-serving-server-gpu
==
0.9.0.post10
2
${
python_name
}
-m
pip
install install
paddle-serving-server-gpu
==
0.9.0.post10
1
-i
https://pypi.tuna.tsinghua.edu.cn/simple
fi
if
[[
${
model_name
}
=
~
"ShiTu"
]]
;
then
cls_inference_model_url
=
$(
func_parser_value
"
${
lines
[3]
}
"
)
...
...
test_tipc/test_serving_infer.sh
浏览文件 @
f27a9cf2
...
...
@@ -38,8 +38,9 @@ pipeline_py=$(func_parser_value "${lines[13]}")
function
func_serving_cls
(){
LOG_PATH
=
"
../../
test_tipc/output/
${
model_name
}
"
LOG_PATH
=
"test_tipc/output/
${
model_name
}
"
mkdir
-p
${
LOG_PATH
}
LOG_PATH
=
"../../
${
LOG_PATH
}
"
status_log
=
"
${
LOG_PATH
}
/results_serving.log"
IFS
=
'|'
...
...
@@ -54,6 +55,7 @@ function func_serving_cls(){
if
[[
${
python_
}
=
~
"python"
]]
;
then
trans_model_cmd
=
"
${
python_
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
"
eval
$trans_model_cmd
# echo $PWD - $trans_model_cmd
break
fi
done
...
...
@@ -61,6 +63,7 @@ function func_serving_cls(){
# modify the alias_name of fetch_var to "outputs"
server_fetch_var_line_cmd
=
"sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name:
\"
prediction
\"
/'
${
serving_server_value
}
/serving_server_conf.prototxt"
eval
${
server_fetch_var_line_cmd
}
client_fetch_var_line_cmd
=
"sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name:
\"
prediction
\"
/'
${
serving_client_value
}
/serving_client_conf.prototxt"
eval
${
client_fetch_var_line_cmd
}
...
...
@@ -81,44 +84,51 @@ function func_serving_cls(){
break
fi
done
set_pipeline_py_feed_var_cmd
=
"sed -i '/feed/,/img}/s/{.*: img}/{
${
feed_var_name
}
: img}/'
${
pipeline_py
}
"
echo
$PWD
-
$set_pipeline_py_feed_var_cmd
eval
${
set_pipeline_py_feed_var_cmd
}
set_client_feed_type_cmd
=
"sed -i '/feed_type/,/: .*/s/feed_type: .*/feed_type: 20/'
${
serving_client_value
}
/serving_client_conf.prototxt"
eval
${
set_client_feed_type_cmd
}
set_client_shape_cmd
=
"sed -i '/shape: 3/,/shape: 3/s/shape: 3/shape: 1/'
${
serving_client_value
}
/serving_client_conf.prototxt"
eval
${
set_client_shape_cmd
}
set_client_shape224_cmd
=
"sed -i '/shape: 224/,/shape: 224/s/shape: 224//'
${
serving_client_value
}
/serving_client_conf.prototxt"
eval
${
set_client_shape224_cmd
}
set_client_shape224_cmd
=
"sed -i '/shape: 224/,/shape: 224/s/shape: 224//'
${
serving_client_value
}
/serving_client_conf.prototxt"
eval
${
set_client_shape224_cmd
}
serving_client_dir_name
=
$(
func_get_url_file_name
"
$serving_client_value
"
)
set_pipeline_load_config_cmd
=
"sed -i '/load_client_config/,/.prototxt/s/.
\/
.*
\/
serving_client_conf.prototxt/.
\/
${
serving_client_dir_name
}
\/
serving_client_conf.prototxt/'
${
pipeline_py
}
"
eval
${
set_pipeline_load_config_cmd
}
set_pipeline_feed_var_cmd
=
"sed -i '/feed=/,/: image}/s/feed={.*: image}/feed={
${
feed_var_name
}
: image}/'
${
pipeline_py
}
"
eval
${
set_pipeline_feed_var_cmd
}
serving_server_dir_name
=
$(
func_get_url_file_name
"
$serving_server_value
"
)
set_client_config_line_cmd
=
"sed -i '/client/,/serving_server_conf.prototxt/s/.
\/
.*
\/
serving_server_conf.prototxt/.
\/
${
serving_server_dir_name
}
\/
serving_server_conf.prototxt/'
${
pipeline_py
}
"
echo
$PWD
-
$set_client_config_line_cmd
eval
${
set_client_config_line_cmd
}
for
use_gpu
in
${
web_use_gpu_list
[*]
}
;
do
if
[
${
use_gpu
}
=
"null"
]
;
then
web_service_cpp_cmd
=
"
${
python_
}
-m paddle_serving_server.serve --model
${
serving_server_dir_name
}
--port 9292 &"
echo
$PWD
-
$web_service_cpp_cmd
if
[[
${
use_gpu
}
=
"null"
]]
;
then
web_service_cpp_cmd
=
"
${
python_
}
-m paddle_serving_server.serve --model
${
serving_server_dir_name
}
--op GeneralClasOp --port 9292 &"
eval
${
web_service_cpp_cmd
}
sleep
5s
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_cpu_pipeline_batchsize_1.log"
pipeline_cmd
=
"
${
python_
}
test_cpp_serving_client.py >
${
_save_log_path
}
2>&1 "
echo
${
pipeline_cmd
}
eval
${
pipeline_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
${
last_status
}
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
ps ux |
grep
-E
'serving_server|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
sleep
5s
else
web_service_cpp_cmd
=
"
${
python_
}
-m paddle_serving_server.serve --model
${
serving_server_dir_name
}
--port 9292 --gpu_id=
${
use_gpu
}
&"
echo
$PWD
-
$web_service_cpp_cmd
web_service_cpp_cmd
=
"
${
python_
}
-m paddle_serving_server.serve --model
${
serving_server_dir_name
}
--op GeneralClasOp --port 9292 --gpu_id=
${
use_gpu
}
&"
eval
${
web_service_cpp_cmd
}
sleep
5
s
sleep
8
s
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_gpu_pipeline_batchsize_1.log"
pipeline_cmd
=
"
${
python_
}
test_cpp_serving_client.py >
${
_save_log_path
}
2>&1 "
echo
$PWD
-
$pipeline_cmd
eval
$pipeline_cmd
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
${
last_status
}
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
ps ux |
grep
-E
'serving_server|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
sleep
5s
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
fi
done
else
...
...
@@ -154,7 +164,7 @@ function func_serving_cls(){
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5s
done
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
elif
[
${
use_gpu
}
-eq
0
]
;
then
if
[[
${
_flag_quant
}
=
"False"
]]
&&
[[
${
precision
}
=
~
"int8"
]]
;
then
continue
...
...
@@ -186,7 +196,7 @@ function func_serving_cls(){
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5s
done
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
else
echo
"Does not support hardware [
${
use_gpu
}
] other than CPU and GPU Currently!"
fi
...
...
@@ -270,16 +280,13 @@ function func_serving_rec(){
if
[[
${
FILENAME
}
=
~
"cpp"
]]
;
then
det_serving_client_dir_name
=
$(
func_get_url_file_name
"
$det_serving_client_value
"
)
set_det_client_config_line_cmd
=
"sed -i '/MainbodyDetect/,/serving_client_conf.prototxt/s/models
\/
.*
\/
serving_client_conf.prototxt/models
\/
${
det_serving_client_dir_name
}
\/
serving_client_conf.prototxt/'
${
pipeline_py
}
"
echo
$PWD
-
$set_det_client_config_line_cmd
eval
${
set_det_client_config_line_cmd
}
cls_serving_client_dir_name
=
$(
func_get_url_file_name
"
$cls_serving_client_value
"
)
set_cls_client_config_line_cmd
=
"sed -i '/ObjectRecognition/,/serving_client_conf.prototxt/s/models
\/
.*
\/
serving_client_conf.prototxt/models
\/
${
cls_serving_client_dir_name
}
\/
serving_client_conf.prototxt/'
${
pipeline_py
}
"
echo
$PWD
-
$set_cls_client_config_line_cmd
eval
${
set_cls_client_config_line_cmd
}
set_pipeline_py_feed_var_cmd
=
"sed -i '/ObjectRecognition/,/feed={
\"
x
\"
: batch_imgs}/s/{.*: batch_imgs}/{
${
feed_var_name
}
: batch_imgs}/'
${
pipeline_py
}
"
echo
$PWD
-
$set_pipeline_py_feed_var_cmd
eval
${
set_pipeline_py_feed_var_cmd
}
for
use_gpu
in
${
web_use_gpu_list
[*]
}
;
do
...
...
@@ -290,19 +297,16 @@ function func_serving_rec(){
cls_serving_server_dir_name
=
$(
func_get_url_file_name
"
$cls_serving_server_value
"
)
web_service_cpp_cmd2
=
"
${
python_interp
}
-m paddle_serving_server.serve --model ../../models/
${
cls_serving_server_dir_name
}
--port 9294 >>log_feature_extraction.txt &"
echo
$PWD
-
$web_service_cpp_cmd
eval
$web_service_cpp_cmd
echo
$PWD
-
$web_service_cpp_cmd2
eval
$web_service_cpp_cmd2
sleep
5s
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_cpu_batchsize_1.log"
pipeline_cmd
=
"
${
python_interp
}
test_cpp_serving_client.py >
${
_save_log_path
}
2>&1 "
echo
${
pipeline_cmd
}
eval
${
pipeline_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
${
last_status
}
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
ps ux |
grep
-E
'serving_server|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
sleep
5s
else
det_serving_server_dir_name
=
$(
func_get_url_file_name
"
$det_serving_server_value
"
)
...
...
@@ -310,19 +314,16 @@ function func_serving_rec(){
cls_serving_server_dir_name
=
$(
func_get_url_file_name
"
$cls_serving_server_value
"
)
web_service_cpp_cmd2
=
"
${
python_interp
}
-m paddle_serving_server.serve --model ../../models/
${
cls_serving_server_dir_name
}
--port 9294 --gpu_id=
${
use_gpu
}
>>log_feature_extraction.txt &"
echo
$PWD
-
$web_service_cpp_cmd
eval
$web_service_cpp_cmd
echo
$PWD
-
$web_service_cpp_cmd2
eval
$web_service_cpp_cmd2
sleep
5s
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_gpu_batchsize_1.log"
pipeline_cmd
=
"
${
python_interp
}
test_cpp_serving_client.py >
${
_save_log_path
}
2>&1 "
echo
${
pipeline_cmd
}
eval
${
pipeline_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
${
last_status
}
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
ps ux |
grep
-E
'serving_server|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
sleep
5s
fi
done
...
...
@@ -353,7 +354,7 @@ function func_serving_rec(){
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5s
done
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
elif
[
${
use_gpu
}
-eq
0
]
;
then
if
[[
${
_flag_quant
}
=
"False"
]]
&&
[[
${
precision
}
=
~
"int8"
]]
;
then
continue
...
...
@@ -385,7 +386,7 @@ function func_serving_rec(){
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
10s
done
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
else
echo
"Does not support hardware [
${
use_gpu
}
] other than CPU and GPU Currently!"
fi
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录