Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
8a6acfbd
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
大约 1 年 前同步成功
通知
115
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
8a6acfbd
编写于
6月 13, 2022
作者:
W
Wei Shengyu
提交者:
GitHub
6月 13, 2022
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #2006 from HydrogenSulfate/fix_paddle2onnx_tipc
Fix paddle2onnx tipc
上级
810588ce
55a066ed
变更
16
隐藏空白更改
内联
并排
Showing
16 changed file
with
48 addition
and
50 deletion
+48
-50
test_tipc/benchmark_train.sh
test_tipc/benchmark_train.sh
+2
-2
test_tipc/common_func.sh
test_tipc/common_func.sh
+4
-3
test_tipc/config/PP-ShiTu/PPShiTu_mainbody_det_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
..._linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
+1
-1
test_tipc/config/PPHGNet/PPHGNet_small_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
..._linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
+1
-0
test_tipc/config/PPHGNet/PPHGNet_tiny_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
..._linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
+1
-0
test_tipc/config/PPLCNet/PPLCNet_x0_25_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
..._linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
+1
-0
test_tipc/config/PPLCNet/PPLCNet_x0_35_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
..._linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
+5
-5
test_tipc/config/PPLCNet/PPLCNet_x0_5_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
..._linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
+5
-5
test_tipc/config/ResNet/ResNet50_vd_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
..._linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
+2
-2
test_tipc/prepare.sh
test_tipc/prepare.sh
+2
-2
test_tipc/test_inference_cpp.sh
test_tipc/test_inference_cpp.sh
+4
-4
test_tipc/test_inference_jeston.sh
test_tipc/test_inference_jeston.sh
+1
-1
test_tipc/test_lite_arm_cpu_cpp.sh
test_tipc/test_lite_arm_cpu_cpp.sh
+1
-1
test_tipc/test_paddle2onnx.sh
test_tipc/test_paddle2onnx.sh
+4
-10
test_tipc/test_serving_infer.sh
test_tipc/test_serving_infer.sh
+8
-8
test_tipc/test_train_inference_python.sh
test_tipc/test_train_inference_python.sh
+6
-6
未找到文件。
test_tipc/benchmark_train.sh
浏览文件 @
8a6acfbd
...
@@ -225,7 +225,7 @@ for batch_size in ${batch_size_list[*]}; do
...
@@ -225,7 +225,7 @@ for batch_size in ${batch_size_list[*]}; do
echo
$cmd
echo
$cmd
eval
$cmd
eval
$cmd
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
else
else
IFS
=
";"
IFS
=
";"
unset_env
=
`
unset
CUDA_VISIBLE_DEVICES
`
unset_env
=
`
unset
CUDA_VISIBLE_DEVICES
`
...
@@ -261,7 +261,7 @@ for batch_size in ${batch_size_list[*]}; do
...
@@ -261,7 +261,7 @@ for batch_size in ${batch_size_list[*]}; do
echo
$cmd
echo
$cmd
eval
$cmd
eval
$cmd
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
fi
fi
done
done
done
done
...
...
test_tipc/common_func.sh
浏览文件 @
8a6acfbd
...
@@ -38,6 +38,7 @@ function func_set_params(){
...
@@ -38,6 +38,7 @@ function func_set_params(){
function
func_parser_params
(){
function
func_parser_params
(){
strs
=
$1
strs
=
$1
MODE
=
$2
IFS
=
":"
IFS
=
":"
array
=(
${
strs
}
)
array
=(
${
strs
}
)
key
=
${
array
[0]
}
key
=
${
array
[0]
}
...
@@ -64,10 +65,10 @@ function status_check(){
...
@@ -64,10 +65,10 @@ function status_check(){
last_status
=
$1
# the exit code
last_status
=
$1
# the exit code
run_command
=
$2
run_command
=
$2
run_log
=
$3
run_log
=
$3
model_name
=
$4
if
[
$last_status
-eq
0
]
;
then
if
[
$last_status
-eq
0
]
;
then
echo
-e
"
\0
33[33m Run successfully with command -
${
run_command
}
!
\0
33[0m"
|
tee
-a
${
run_log
}
echo
-e
"
\0
33[33m Run successfully with command -
${
model_name
}
-
${
run_command
}
!
\0
33[0m"
|
tee
-a
${
run_log
}
else
else
echo
-e
"
\0
33[33m Run failed with command -
${
run_command
}
!
\0
33[0m"
|
tee
-a
${
run_log
}
echo
-e
"
\0
33[33m Run failed with command -
${
model_name
}
-
${
run_command
}
!
\0
33[0m"
|
tee
-a
${
run_log
}
fi
fi
}
}
test_tipc/config/PP-ShiTu/PPShiTu_mainbody_det_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
浏览文件 @
8a6acfbd
...
@@ -6,7 +6,7 @@ python:python3.7
...
@@ -6,7 +6,7 @@ python:python3.7
--model_filename:inference.pdmodel
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--params_filename:inference.pdiparams
--save_file:./deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/inference.onnx
--save_file:./deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/inference.onnx
--opset_version:1
0
--opset_version:1
1
--enable_onnx_checker:True
--enable_onnx_checker:True
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar
inference:./python/predict_cls.py
inference:./python/predict_cls.py
...
...
test_tipc/config/PPHGNet/PPHGNet_small_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
浏览文件 @
8a6acfbd
...
@@ -8,6 +8,7 @@ python:python3.7
...
@@ -8,6 +8,7 @@ python:python3.7
--save_file:./deploy/models/PPHGNet_small_infer/inference.onnx
--save_file:./deploy/models/PPHGNet_small_infer/inference.onnx
--opset_version:10
--opset_version:10
--enable_onnx_checker:True
--enable_onnx_checker:True
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_small_infer.tar
inference:./python/predict_cls.py
inference:./python/predict_cls.py
Global.use_onnx:True
Global.use_onnx:True
Global.inference_model_dir:./models/PPHGNet_small_infer
Global.inference_model_dir:./models/PPHGNet_small_infer
...
...
test_tipc/config/PPHGNet/PPHGNet_tiny_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
浏览文件 @
8a6acfbd
...
@@ -8,6 +8,7 @@ python:python3.7
...
@@ -8,6 +8,7 @@ python:python3.7
--save_file:./deploy/models/PPHGNet_tiny_infer/inference.onnx
--save_file:./deploy/models/PPHGNet_tiny_infer/inference.onnx
--opset_version:10
--opset_version:10
--enable_onnx_checker:True
--enable_onnx_checker:True
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_tiny_infer.tar
inference:./python/predict_cls.py
inference:./python/predict_cls.py
Global.use_onnx:True
Global.use_onnx:True
Global.inference_model_dir:./models/PPHGNet_tiny_infer
Global.inference_model_dir:./models/PPHGNet_tiny_infer
...
...
test_tipc/config/PPLCNet/PPLCNet_x0_25_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
浏览文件 @
8a6acfbd
...
@@ -8,6 +8,7 @@ python:python3.7
...
@@ -8,6 +8,7 @@ python:python3.7
--save_file:./deploy/models/PPLCNet_x0_25_infer/inference.onnx
--save_file:./deploy/models/PPLCNet_x0_25_infer/inference.onnx
--opset_version:10
--opset_version:10
--enable_onnx_checker:True
--enable_onnx_checker:True
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_25_infer.tar
inference:./python/predict_cls.py
inference:./python/predict_cls.py
Global.use_onnx:True
Global.use_onnx:True
Global.inference_model_dir:./models/PPLCNet_x0_25_infer
Global.inference_model_dir:./models/PPLCNet_x0_25_infer
...
...
test_tipc/config/PPLCNet/PPLCNet_x0_35_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
浏览文件 @
8a6acfbd
===========================paddle2onnx_params===========================
===========================paddle2onnx_params===========================
model_name:PPLCNet_x0_
2
5
model_name:PPLCNet_x0_
3
5
python:python3.7
python:python3.7
2onnx: paddle2onnx
2onnx: paddle2onnx
--model_dir:./deploy/models/PPLCNet_x0_
2
5_infer/
--model_dir:./deploy/models/PPLCNet_x0_
3
5_infer/
--model_filename:inference.pdmodel
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--params_filename:inference.pdiparams
--save_file:./deploy/models/PPLCNet_x0_
2
5_infer/inference.onnx
--save_file:./deploy/models/PPLCNet_x0_
3
5_infer/inference.onnx
--opset_version:10
--opset_version:10
--enable_onnx_checker:True
--enable_onnx_checker:True
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_
2
5_infer.tar
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_
3
5_infer.tar
inference:./python/predict_cls.py
inference:./python/predict_cls.py
Global.use_onnx:True
Global.use_onnx:True
Global.inference_model_dir:./models/PPLCNet_x0_
2
5_infer
Global.inference_model_dir:./models/PPLCNet_x0_
3
5_infer
Global.use_gpu:False
Global.use_gpu:False
-c:configs/inference_cls.yaml
-c:configs/inference_cls.yaml
\ No newline at end of file
test_tipc/config/PPLCNet/PPLCNet_x0_5_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
浏览文件 @
8a6acfbd
===========================paddle2onnx_params===========================
===========================paddle2onnx_params===========================
model_name:PP
-ShiTu_mainbody_det
model_name:PP
LCNet_x0_5
python:python3.7
python:python3.7
2onnx: paddle2onnx
2onnx: paddle2onnx
--model_dir:./deploy/models/
picodet_PPLCNet_x2_5_mainbody_lite_v1.0
_infer/
--model_dir:./deploy/models/
PPLCNet_x0_5
_infer/
--model_filename:inference.pdmodel
--model_filename:inference.pdmodel
--params_filename:inference.pdiparams
--params_filename:inference.pdiparams
--save_file:./deploy/models/
picodet_PPLCNet_x2_5_mainbody_lite_v1.0
_infer/inference.onnx
--save_file:./deploy/models/
PPLCNet_x0_5
_infer/inference.onnx
--opset_version:10
--opset_version:10
--enable_onnx_checker:True
--enable_onnx_checker:True
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/
rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0
_infer.tar
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/
inference/PPLCNet_x0_5
_infer.tar
inference:./python/predict_cls.py
inference:./python/predict_cls.py
Global.use_onnx:True
Global.use_onnx:True
Global.inference_model_dir:./models/
picodet_PPLCNet_x2_5_mainbody_lite_v1.0
_infer
Global.inference_model_dir:./models/
PPLCNet_x0_5
_infer
Global.use_gpu:False
Global.use_gpu:False
-c:configs/inference_cls.yaml
-c:configs/inference_cls.yaml
\ No newline at end of file
test_tipc/config/ResNet/ResNet50_vd_linux_gpu_normal_normal_paddle2onnx_python_linux_cpu.txt
浏览文件 @
8a6acfbd
...
@@ -9,8 +9,8 @@ python:python3.7
...
@@ -9,8 +9,8 @@ python:python3.7
--opset_version:10
--opset_version:10
--enable_onnx_checker:True
--enable_onnx_checker:True
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar
inference:
python/predict_cls.py -c configs/inference_cls.yaml
inference:
./python/predict_cls.py
Global.use_onnx:True
Global.use_onnx:True
Global.inference_model_dir:models/ResNet50_vd_infer/
Global.inference_model_dir:
./
models/ResNet50_vd_infer/
Global.use_gpu:False
Global.use_gpu:False
-c:configs/inference_cls.yaml
-c:configs/inference_cls.yaml
test_tipc/prepare.sh
浏览文件 @
8a6acfbd
...
@@ -200,7 +200,7 @@ fi
...
@@ -200,7 +200,7 @@ fi
if
[[
${
MODE
}
=
"serving_infer"
]]
;
then
if
[[
${
MODE
}
=
"serving_infer"
]]
;
then
# prepare serving env
# prepare serving env
python_name
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
python_name
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
${
python_name
}
-m
pip
install
install
paddle-serving-server-gpu
==
0.7.0.post102
${
python_name
}
-m
pip
install
paddle-serving-server-gpu
==
0.7.0.post102
${
python_name
}
-m
pip
install
paddle_serving_client
==
0.7.0
${
python_name
}
-m
pip
install
paddle_serving_client
==
0.7.0
${
python_name
}
-m
pip
install
paddle-serving-app
==
0.7.0
${
python_name
}
-m
pip
install
paddle-serving-app
==
0.7.0
if
[[
${
model_name
}
=
~
"ShiTu"
]]
;
then
if
[[
${
model_name
}
=
~
"ShiTu"
]]
;
then
...
@@ -231,7 +231,7 @@ if [[ ${MODE} = "paddle2onnx_infer" ]]; then
...
@@ -231,7 +231,7 @@ if [[ ${MODE} = "paddle2onnx_infer" ]]; then
inference_model_url
=
$(
func_parser_value
"
${
lines
[10]
}
"
)
inference_model_url
=
$(
func_parser_value
"
${
lines
[10]
}
"
)
tar_name
=
${
inference_model_url
##*/
}
tar_name
=
${
inference_model_url
##*/
}
${
python_name
}
-m
pip
install
install
paddle2onnx
${
python_name
}
-m
pip
install
paddle2onnx
${
python_name
}
-m
pip
install
onnxruntime
${
python_name
}
-m
pip
install
onnxruntime
cd
deploy
cd
deploy
mkdir
models
mkdir
models
...
...
test_tipc/test_inference_cpp.sh
浏览文件 @
8a6acfbd
...
@@ -63,7 +63,7 @@ function func_shitu_cpp_inference(){
...
@@ -63,7 +63,7 @@ function func_shitu_cpp_inference(){
command
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
command
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
eval
$command
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
done
done
done
done
done
done
...
@@ -87,7 +87,7 @@ function func_shitu_cpp_inference(){
...
@@ -87,7 +87,7 @@ function func_shitu_cpp_inference(){
command
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
command
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
eval
$command
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
done
done
done
done
done
done
...
@@ -125,7 +125,7 @@ function func_cls_cpp_inference(){
...
@@ -125,7 +125,7 @@ function func_cls_cpp_inference(){
command1
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
command1
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
eval
${
command1
}
eval
${
command1
}
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
command1
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
command1
}
"
"
${
status_log
}
"
"
${
model_name
}
"
done
done
done
done
done
done
...
@@ -148,7 +148,7 @@ function func_cls_cpp_inference(){
...
@@ -148,7 +148,7 @@ function func_cls_cpp_inference(){
command
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
command
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
eval
$command
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
done
done
done
done
done
done
...
...
test_tipc/test_inference_jeston.sh
浏览文件 @
8a6acfbd
...
@@ -71,7 +71,7 @@ if [ ${MODE} = "whole_infer" ]; then
...
@@ -71,7 +71,7 @@ if [ ${MODE} = "whole_infer" ]; then
echo
$export_cmd
echo
$export_cmd
eval
$export_cmd
eval
$export_cmd
status_export
=
$?
status_export
=
$?
status_check
$status_export
"
${
export_cmd
}
"
"
${
status_log
}
"
status_check
$status_export
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
else
else
save_infer_dir
=
${
infer_model
}
save_infer_dir
=
${
infer_model
}
fi
fi
...
...
test_tipc/test_lite_arm_cpu_cpp.sh
浏览文件 @
8a6acfbd
...
@@ -67,7 +67,7 @@ function func_test_tipc(){
...
@@ -67,7 +67,7 @@ function func_test_tipc(){
eval
${
command1
}
eval
${
command1
}
command2
=
"adb shell 'export LD_LIBRARY_PATH=
${
lite_arm_work_path
}
;
${
real_inference_cmd
}
' >
${
_save_log_path
}
2>&1"
command2
=
"adb shell 'export LD_LIBRARY_PATH=
${
lite_arm_work_path
}
;
${
real_inference_cmd
}
' >
${
_save_log_path
}
2>&1"
eval
${
command2
}
eval
${
command2
}
status_check
$?
"
${
command2
}
"
"
${
status_log
}
"
status_check
$?
"
${
command2
}
"
"
${
status_log
}
"
"
${
model_name
}
"
done
done
done
done
done
done
...
...
test_tipc/test_paddle2onnx.sh
浏览文件 @
8a6acfbd
...
@@ -3,13 +3,6 @@ source test_tipc/common_func.sh
...
@@ -3,13 +3,6 @@ source test_tipc/common_func.sh
FILENAME
=
$1
FILENAME
=
$1
dataline
=
$(
cat
${
FILENAME
}
)
lines
=(
${
dataline
}
)
# common params
model_name
=
$(
func_parser_value
"
${
lines
[1]
}
"
)
python
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
# parser params
# parser params
dataline
=
$(
awk
'NR==1, NR==16{print}'
$FILENAME
)
dataline
=
$(
awk
'NR==1, NR==16{print}'
$FILENAME
)
IFS
=
$'
\n
'
IFS
=
$'
\n
'
...
@@ -43,7 +36,7 @@ inference_config_key=$(func_parser_key "${lines[15]}")
...
@@ -43,7 +36,7 @@ inference_config_key=$(func_parser_key "${lines[15]}")
inference_config_value
=
$(
func_parser_value
"
${
lines
[15]
}
"
)
inference_config_value
=
$(
func_parser_value
"
${
lines
[15]
}
"
)
LOG_PATH
=
"./test_tipc/output/
${
model_name
}
"
LOG_PATH
=
"./test_tipc/output/
${
model_name
}
"
mkdir
-p
./test_tipc/output
mkdir
-p
${
LOG_PATH
}
status_log
=
"
${
LOG_PATH
}
/results_paddle2onnx.log"
status_log
=
"
${
LOG_PATH
}
/results_paddle2onnx.log"
...
@@ -62,7 +55,8 @@ function func_paddle2onnx(){
...
@@ -62,7 +55,8 @@ function func_paddle2onnx(){
trans_model_cmd
=
"
${
padlle2onnx_cmd
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_save_model
}
${
set_opset_version
}
${
set_enable_onnx_checker
}
"
trans_model_cmd
=
"
${
padlle2onnx_cmd
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_save_model
}
${
set_opset_version
}
${
set_enable_onnx_checker
}
"
eval
$trans_model_cmd
eval
$trans_model_cmd
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
trans_model_cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
trans_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
# python inference
# python inference
set_model_dir
=
$(
func_set_params
"
${
inference_model_dir_key
}
"
"
${
inference_model_dir_value
}
"
)
set_model_dir
=
$(
func_set_params
"
${
inference_model_dir_key
}
"
"
${
inference_model_dir_value
}
"
)
set_use_onnx
=
$(
func_set_params
"
${
use_onnx_key
}
"
"
${
use_onnx_value
}
"
)
set_use_onnx
=
$(
func_set_params
"
${
use_onnx_key
}
"
"
${
use_onnx_value
}
"
)
...
@@ -70,7 +64,7 @@ function func_paddle2onnx(){
...
@@ -70,7 +64,7 @@ function func_paddle2onnx(){
set_inference_config
=
$(
func_set_params
"
${
inference_config_key
}
"
"
${
inference_config_value
}
"
)
set_inference_config
=
$(
func_set_params
"
${
inference_config_key
}
"
"
${
inference_config_value
}
"
)
infer_model_cmd
=
"cd deploy &&
${
python
}
${
inference_py
}
-o
${
set_model_dir
}
-o
${
set_use_onnx
}
-o
${
set_hardware
}
${
set_inference_config
}
>
${
_save_log_path
}
2>&1 && cd ../"
infer_model_cmd
=
"cd deploy &&
${
python
}
${
inference_py
}
-o
${
set_model_dir
}
-o
${
set_use_onnx
}
-o
${
set_hardware
}
${
set_inference_config
}
>
${
_save_log_path
}
2>&1 && cd ../"
eval
$infer_model_cmd
eval
$infer_model_cmd
status_check
$last_status
"
${
infer_model_cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
infer_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
}
}
...
...
test_tipc/test_serving_infer.sh
浏览文件 @
8a6acfbd
...
@@ -88,7 +88,7 @@ function func_serving_cls(){
...
@@ -88,7 +88,7 @@ function func_serving_cls(){
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd
=
"
${
python
}
ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
pipeline_cmd
=
"
${
python
}
ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval
$pipeline_cmd
eval
$pipeline_cmd
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5s
sleep
5s
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
else
else
...
@@ -98,7 +98,7 @@ function func_serving_cls(){
...
@@ -98,7 +98,7 @@ function func_serving_cls(){
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd
=
"
${
python
}
ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
pipeline_cmd
=
"
${
python
}
ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval
$pipeline_cmd
eval
$pipeline_cmd
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5s
sleep
5s
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
fi
fi
...
@@ -124,7 +124,7 @@ function func_serving_cls(){
...
@@ -124,7 +124,7 @@ function func_serving_cls(){
eval
$pipeline_cmd
eval
$pipeline_cmd
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5s
sleep
5s
done
done
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
...
@@ -156,7 +156,7 @@ function func_serving_cls(){
...
@@ -156,7 +156,7 @@ function func_serving_cls(){
eval
$pipeline_cmd
eval
$pipeline_cmd
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5s
sleep
5s
done
done
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
...
@@ -250,7 +250,7 @@ function func_serving_rec(){
...
@@ -250,7 +250,7 @@ function func_serving_rec(){
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd
=
"
${
python
}
ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
pipeline_cmd
=
"
${
python
}
ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval
$pipeline_cmd
eval
$pipeline_cmd
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5s
sleep
5s
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
else
else
...
@@ -260,7 +260,7 @@ function func_serving_rec(){
...
@@ -260,7 +260,7 @@ function func_serving_rec(){
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd
=
"
${
python
}
ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
pipeline_cmd
=
"
${
python
}
ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval
$pipeline_cmd
eval
$pipeline_cmd
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5s
sleep
5s
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
fi
fi
...
@@ -286,7 +286,7 @@ function func_serving_rec(){
...
@@ -286,7 +286,7 @@ function func_serving_rec(){
eval
$pipeline_cmd
eval
$pipeline_cmd
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5s
sleep
5s
done
done
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
...
@@ -318,7 +318,7 @@ function func_serving_rec(){
...
@@ -318,7 +318,7 @@ function func_serving_rec(){
eval
$pipeline_cmd
eval
$pipeline_cmd
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
10s
sleep
10s
done
done
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
...
...
test_tipc/test_train_inference_python.sh
浏览文件 @
8a6acfbd
...
@@ -126,7 +126,7 @@ function func_inference(){
...
@@ -126,7 +126,7 @@ function func_inference(){
eval
$command
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"../
${
status_log
}
"
status_check
$last_status
"
${
command
}
"
"../
${
status_log
}
"
"
${
model_name
}
"
done
done
done
done
done
done
...
@@ -151,7 +151,7 @@ function func_inference(){
...
@@ -151,7 +151,7 @@ function func_inference(){
eval
$command
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"../
${
status_log
}
"
status_check
$last_status
"
${
command
}
"
"../
${
status_log
}
"
"
${
model_name
}
"
done
done
done
done
done
done
...
@@ -198,7 +198,7 @@ elif [[ ${MODE} = "klquant_whole_infer" ]]; then
...
@@ -198,7 +198,7 @@ elif [[ ${MODE} = "klquant_whole_infer" ]]; then
command
=
"
${
python
}
${
kl_quant_cmd_value
}
"
command
=
"
${
python
}
${
kl_quant_cmd_value
}
"
eval
$command
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
cd
inference/quant_post_static_model
cd
inference/quant_post_static_model
ln
-s
__model__ inference.pdmodel
ln
-s
__model__ inference.pdmodel
ln
-s
__params__ inference.pdiparams
ln
-s
__params__ inference.pdiparams
...
@@ -301,7 +301,7 @@ else
...
@@ -301,7 +301,7 @@ else
# export FLAGS_cudnn_deterministic=True
# export FLAGS_cudnn_deterministic=True
sleep
5
sleep
5
eval
$cmd
eval
$cmd
status_check
$?
"
${
cmd
}
"
"
${
status_log
}
"
status_check
$?
"
${
cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5
sleep
5
if
[[
$FILENAME
==
*
GeneralRecognition
*
]]
;
then
if
[[
$FILENAME
==
*
GeneralRecognition
*
]]
;
then
...
@@ -318,7 +318,7 @@ else
...
@@ -318,7 +318,7 @@ else
set_eval_params1
=
$(
func_set_params
"
${
eval_key1
}
"
"
${
eval_value1
}
"
)
set_eval_params1
=
$(
func_set_params
"
${
eval_key1
}
"
"
${
eval_value1
}
"
)
eval_cmd
=
"
${
python
}
${
eval_py
}
${
set_eval_pretrain
}
${
set_use_gpu
}
${
set_eval_params1
}
"
eval_cmd
=
"
${
python
}
${
eval_py
}
${
set_eval_pretrain
}
${
set_use_gpu
}
${
set_eval_params1
}
"
eval
$eval_cmd
eval
$eval_cmd
status_check
$?
"
${
eval_cmd
}
"
"
${
status_log
}
"
status_check
$?
"
${
eval_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5
sleep
5
fi
fi
# run export model
# run export model
...
@@ -333,7 +333,7 @@ else
...
@@ -333,7 +333,7 @@ else
set_save_infer_key
=
$(
func_set_params
"
${
save_infer_key
}
"
"
${
save_infer_path
}
"
)
set_save_infer_key
=
$(
func_set_params
"
${
save_infer_key
}
"
"
${
save_infer_path
}
"
)
export_cmd
=
"
${
python
}
${
run_export
}
${
set_export_weight
}
${
set_save_infer_key
}
"
export_cmd
=
"
${
python
}
${
run_export
}
${
set_export_weight
}
${
set_save_infer_key
}
"
eval
$export_cmd
eval
$export_cmd
status_check
$?
"
${
export_cmd
}
"
"
${
status_log
}
"
status_check
$?
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
#run inference
#run inference
eval
$env
eval
$env
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录