Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
ec21470a
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
大约 1 年 前同步成功
通知
115
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
ec21470a
编写于
9月 10, 2022
作者:
C
cuicheng01
提交者:
GitHub
9月 10, 2022
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #2258 from TingquanGao/dev/tipc_fix_log_path
tipc: fix log path
上级
5819c2e5
0cea5504
变更
10
显示空白变更内容
内联
并排
Showing
10 changed file
with
149 addition
and
107 deletion
+149
-107
test_tipc/common_func.sh
test_tipc/common_func.sh
+3
-2
test_tipc/prepare.sh
test_tipc/prepare.sh
+2
-2
test_tipc/test_inference_cpp.sh
test_tipc/test_inference_cpp.sh
+6
-6
test_tipc/test_inference_jeston.sh
test_tipc/test_inference_jeston.sh
+3
-2
test_tipc/test_lite_arm_cpu_cpp.sh
test_tipc/test_lite_arm_cpu_cpp.sh
+4
-4
test_tipc/test_paddle2onnx.sh
test_tipc/test_paddle2onnx.sh
+9
-6
test_tipc/test_ptq_inference_python.sh
test_tipc/test_ptq_inference_python.sh
+5
-4
test_tipc/test_serving_infer_cpp.sh
test_tipc/test_serving_infer_cpp.sh
+38
-24
test_tipc/test_serving_infer_python.sh
test_tipc/test_serving_infer_python.sh
+41
-28
test_tipc/test_train_inference_python.sh
test_tipc/test_train_inference_python.sh
+38
-29
未找到文件。
test_tipc/common_func.sh
浏览文件 @
ec21470a
...
...
@@ -77,9 +77,10 @@ function status_check(){
run_command
=
$2
run_log
=
$3
model_name
=
$4
log_path
=
$5
if
[
$last_status
-eq
0
]
;
then
echo
-e
"
\0
33[33m Run successfully with command -
${
model_name
}
-
${
run_command
}
!
\0
33[0m"
|
tee
-a
${
run_log
}
echo
-e
"
\0
33[33m Run successfully with command -
${
model_name
}
-
${
run_command
}
-
${
log_path
}
!
\0
33[0m"
|
tee
-a
${
run_log
}
else
echo
-e
"
\0
33[33m Run failed with command -
${
model_name
}
-
${
run_command
}
!
\0
33[0m"
|
tee
-a
${
run_log
}
echo
-e
"
\0
33[33m Run failed with command -
${
model_name
}
-
${
run_command
}
-
${
log_path
}
!
\0
33[0m"
|
tee
-a
${
run_log
}
fi
}
test_tipc/prepare.sh
浏览文件 @
ec21470a
...
...
@@ -323,7 +323,7 @@ if [[ ${MODE} = "paddle2onnx_infer" ]]; then
# prepare paddle2onnx env
python_name
=
$(
func_parser_value
"
${
lines
[2]
}
"
)
inference_model_url
=
$(
func_parser_value
"
${
lines
[10]
}
"
)
tar_name
=
$
{
inference_model_url
##*/
}
tar_name
=
$
(
func_get_url_file_name
"
$inference_model_url
"
)
${
python_name
}
-m
pip
install
onnx
${
python_name
}
-m
pip
install
paddle2onnx
...
...
test_tipc/test_inference_cpp.sh
浏览文件 @
ec21470a
...
...
@@ -37,7 +37,8 @@ cpp_benchmark_value=$(func_parser_value "${lines[16]}")
generate_yaml_cmd
=
$(
func_parser_value
"
${
lines
[17]
}
"
)
transform_index_cmd
=
$(
func_parser_value
"
${
lines
[18]
}
"
)
LOG_PATH
=
"./test_tipc/output/
${
model_name
}
/
${
MODE
}
"
CLS_ROOT_PATH
=
$(
pwd
)
LOG_PATH
=
"
${
CLS_ROOT_PATH
}
/test_tipc/output/
${
model_name
}
/
${
MODE
}
"
mkdir
-p
${
LOG_PATH
}
status_log
=
"
${
LOG_PATH
}
/results_cpp.log"
# generate_yaml_cmd="python3 test_tipc/generate_cpp_yaml.py"
...
...
@@ -70,7 +71,7 @@ function func_shitu_cpp_inference(){
command
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
done
done
done
...
...
@@ -94,7 +95,7 @@ function func_shitu_cpp_inference(){
command
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
done
done
done
...
...
@@ -126,13 +127,12 @@ function func_cls_cpp_inference(){
precison
=
"int8"
fi
_save_log_path
=
"
${
_log_path
}
/cpp_infer_cpu_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_precision_
${
precision
}
_batchsize_
${
batch_size
}
.log"
command
=
"
${
generate_yaml_cmd
}
--type cls --batch_size
${
batch_size
}
--mkldnn
${
use_mkldnn
}
--gpu
${
use_gpu
}
--cpu_thread
${
threads
}
--tensorrt False --precision
${
precision
}
--data_dir
${
_img_dir
}
--benchmark True --cls_model_dir
${
cpp_infer_model_dir
}
--gpu_id
${
GPUID
}
"
eval
$command
command1
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
eval
${
command1
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
command1
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
command1
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
done
done
done
...
...
@@ -155,7 +155,7 @@ function func_cls_cpp_inference(){
command
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
done
done
done
...
...
test_tipc/test_inference_jeston.sh
浏览文件 @
ec21470a
...
...
@@ -42,7 +42,8 @@ infer_key1=$(func_parser_key "${lines[17]}")
infer_value1
=
$(
func_parser_value
"
${
lines
[17]
}
"
)
LOG_PATH
=
"./test_tipc/output"
CLS_ROOT_PATH
=
$(
pwd
)
LOG_PATH
=
"
${
CLS_ROOT_PATH
}
/test_tipc/output"
mkdir
-p
${
LOG_PATH
}
status_log
=
"
${
LOG_PATH
}
/results_python.log"
...
...
@@ -71,7 +72,7 @@ if [ ${MODE} = "whole_infer" ]; then
echo
$export_cmd
eval
$export_cmd
status_export
=
$?
status_check
$status_export
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$status_export
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
""
else
save_infer_dir
=
${
infer_model
}
fi
...
...
test_tipc/test_lite_arm_cpu_cpp.sh
浏览文件 @
ec21470a
#!/bin/bash
source
test_tipc/common_func.sh
current_path
=
$PWD
IFS
=
$'
\n
'
...
...
@@ -33,7 +32,8 @@ num_threads_list=$(func_parser_value_lite "${tipc_lines[5]}" ":")
batch_size_list
=
$(
func_parser_value_lite
"
${
tipc_lines
[6]
}
"
":"
)
precision_list
=
$(
func_parser_value_lite
"
${
tipc_lines
[7]
}
"
":"
)
LOG_PATH
=
${
current_path
}
"/output"
CLS_ROOT_PATH
=
$(
pwd
)
LOG_PATH
=
"
${
CLS_ROOT_PATH
}
/output"
mkdir
-p
${
LOG_PATH
}
status_log
=
"
${
LOG_PATH
}
/results.log"
...
...
@@ -67,7 +67,7 @@ function func_test_tipc(){
eval
${
command1
}
command2
=
"adb shell 'export LD_LIBRARY_PATH=
${
lite_arm_work_path
}
;
${
real_inference_cmd
}
' >
${
_save_log_path
}
2>&1"
eval
${
command2
}
status_check
$?
"
${
command2
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$?
"
${
command2
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
done
done
done
...
...
test_tipc/test_paddle2onnx.sh
浏览文件 @
ec21470a
...
...
@@ -2,7 +2,7 @@
source
test_tipc/common_func.sh
FILENAME
=
$1
MODE
=
$2
MODE
=
"paddle2onnx_infer"
# parser params
dataline
=
$(
awk
'NR==1, NR==16{print}'
$FILENAME
)
...
...
@@ -36,7 +36,8 @@ inference_hardware_value=$(func_parser_value "${lines[14]}")
inference_config_key
=
$(
func_parser_key
"
${
lines
[15]
}
"
)
inference_config_value
=
$(
func_parser_value
"
${
lines
[15]
}
"
)
LOG_PATH
=
"./test_tipc/output/
${
model_name
}
/
${
MODE
}
"
CLS_ROOT_PATH
=
$(
pwd
)
LOG_PATH
=
"
${
CLS_ROOT_PATH
}
/test_tipc/output/
${
model_name
}
/
${
MODE
}
"
mkdir
-p
${
LOG_PATH
}
status_log
=
"
${
LOG_PATH
}
/results_paddle2onnx.log"
...
...
@@ -46,27 +47,29 @@ function func_paddle2onnx(){
_script
=
$1
# paddle2onnx
_save_log_path
=
".
${
LOG_PATH
}
/paddle2onnx_infer_cpu.log"
set_dirname
=
$(
func_set_params
"
${
infer_model_dir_key
}
"
"
${
infer_model_dir_value
}
"
)
set_model_filename
=
$(
func_set_params
"
${
model_filename_key
}
"
"
${
model_filename_value
}
"
)
set_params_filename
=
$(
func_set_params
"
${
params_filename_key
}
"
"
${
params_filename_value
}
"
)
set_save_model
=
$(
func_set_params
"
${
save_file_key
}
"
"
${
save_file_value
}
"
)
set_opset_version
=
$(
func_set_params
"
${
opset_version_key
}
"
"
${
opset_version_value
}
"
)
set_enable_onnx_checker
=
$(
func_set_params
"
${
enable_onnx_checker_key
}
"
"
${
enable_onnx_checker_value
}
"
)
trans_model_cmd
=
"
${
padlle2onnx_cmd
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_save_model
}
${
set_opset_version
}
${
set_enable_onnx_checker
}
--enable_dev_version=False"
trans_log
=
"
${
LOG_PATH
}
/trans_model.log"
trans_model_cmd
=
"
${
padlle2onnx_cmd
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_save_model
}
${
set_opset_version
}
${
set_enable_onnx_checker
}
--enable_dev_version=False >
${
trans_log
}
2>&1"
eval
$trans_model_cmd
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
trans_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
trans_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
trans_log
}
"
# python inference
if
[[
${
inference_py
}
!=
"null"
]]
;
then
_save_log_path
=
"
${
LOG_PATH
}
/paddle2onnx_infer_cpu.log"
set_model_dir
=
$(
func_set_params
"
${
inference_model_dir_key
}
"
"
${
inference_model_dir_value
}
"
)
set_use_onnx
=
$(
func_set_params
"
${
use_onnx_key
}
"
"
${
use_onnx_value
}
"
)
set_hardware
=
$(
func_set_params
"
${
inference_hardware_key
}
"
"
${
inference_hardware_value
}
"
)
set_inference_config
=
$(
func_set_params
"
${
inference_config_key
}
"
"
${
inference_config_value
}
"
)
infer_model_cmd
=
"cd deploy &&
${
python
}
${
inference_py
}
-o
${
set_model_dir
}
-o
${
set_use_onnx
}
-o
${
set_hardware
}
${
set_inference_config
}
>
${
_save_log_path
}
2>&1 && cd ../"
eval
$infer_model_cmd
status_check
$last_status
"
${
infer_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
infer_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
fi
}
...
...
test_tipc/test_ptq_inference_python.sh
浏览文件 @
ec21470a
...
...
@@ -94,7 +94,8 @@ if [[ $MODE = 'benchmark_train' ]]; then
epoch_num
=
1
fi
LOG_PATH
=
"./test_tipc/output/
${
model_name
}
/
${
MODE
}
"
CLS_ROOT_PATH
=
$(
pwd
)
LOG_PATH
=
"
${
CLS_ROOT_PATH
}
/test_tipc/output/
${
model_name
}
/
${
MODE
}
"
mkdir
-p
${
LOG_PATH
}
status_log
=
"
${
LOG_PATH
}
/results_python.log"
...
...
@@ -123,7 +124,7 @@ function func_inference() {
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"
../
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
done
done
done
...
...
@@ -145,7 +146,7 @@ function func_inference() {
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"
../
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
done
done
done
...
...
@@ -168,6 +169,6 @@ if [ ${kl_quant_cmd_value} != "null" ] && [ ${kl_quant_cmd_value} != "False" ];
ln
-s
__params__ inference.pdiparams
cd
../../deploy
is_quant
=
True
func_inference
"
${
python
}
"
"
${
inference_py
}
"
"../
${
infer_model_dir_list
}
/quant_post_static_model"
"
../
${
LOG_PATH
}
"
"
${
infer_img_dir
}
"
${
is_quant
}
func_inference
"
${
python
}
"
"
${
inference_py
}
"
"../
${
infer_model_dir_list
}
/quant_post_static_model"
"
${
LOG_PATH
}
"
"
${
infer_img_dir
}
"
${
is_quant
}
cd
..
fi
test_tipc/test_serving_infer_cpp.sh
浏览文件 @
ec21470a
...
...
@@ -38,10 +38,10 @@ pipeline_py=$(func_parser_value "${lines[13]}")
function
func_serving_cls
(){
LOG_PATH
=
"test_tipc/output/
${
model_name
}
"
CLS_ROOT_PATH
=
$(
pwd
)
LOG_PATH
=
"
${
CLS_ROOT_PATH
}
/test_tipc/output/
${
model_name
}
/serving_infer"
mkdir
-p
${
LOG_PATH
}
LOG_PATH
=
"../../
${
LOG_PATH
}
"
status_log
=
"
${
LOG_PATH
}
/results_serving.log"
status_log
=
"
${
LOG_PATH
}
/results_cpp_serving.log"
IFS
=
'|'
# pdserving
...
...
@@ -53,8 +53,11 @@ function func_serving_cls(){
for
python_
in
${
python
[*]
}
;
do
if
[[
${
python_
}
=
~
"python"
]]
;
then
trans_model_cmd
=
"
${
python_
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
"
trans_log
=
"
${
LOG_PATH
}
/cpp_trans_model.log"
trans_model_cmd
=
"
${
python_
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
>
${
trans_log
}
2>&1"
eval
${
trans_model_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
trans_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
trans_log
}
"
break
fi
done
...
...
@@ -102,32 +105,34 @@ function func_serving_cls(){
for
use_gpu
in
${
web_use_gpu_list
[*]
}
;
do
if
[[
${
use_gpu
}
=
"null"
]]
;
then
web_service_cpp_cmd
=
"
${
python_
}
-m paddle_serving_server.serve --model
${
serving_server_dir_name
}
--op GeneralClasOp --port 9292 &"
server_log_path
=
"
${
LOG_PATH
}
/cpp_server_cpu.log"
web_service_cpp_cmd
=
"nohup
${
python_
}
-m paddle_serving_server.serve --model
${
serving_server_dir_name
}
--op GeneralClasOp --port 9292 >
${
server_log_path
}
2>&1 &"
eval
${
web_service_cpp_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
web_service_cpp_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
web_service_cpp_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
server_log_path
}
"
sleep
5s
_save_log_path
=
"
${
LOG_PATH
}
/
server_infer_cpp_cpu_pipeline_batchsize_1
.log"
_save_log_path
=
"
${
LOG_PATH
}
/
cpp_client_cpu
.log"
pipeline_cmd
=
"
${
python_
}
test_cpp_serving_client.py >
${
_save_log_path
}
2>&1 "
eval
${
pipeline_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
${
last_status
}
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
${
last_status
}
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
sleep
5s
else
web_service_cpp_cmd
=
"
${
python_
}
-m paddle_serving_server.serve --model
${
serving_server_dir_name
}
--op GeneralClasOp --port 9292 --gpu_id=
${
use_gpu
}
&"
server_log_path
=
"
${
LOG_PATH
}
/cpp_server_gpu.log"
web_service_cpp_cmd
=
"nohup
${
python_
}
-m paddle_serving_server.serve --model
${
serving_server_dir_name
}
--op GeneralClasOp --port 9292 --gpu_id=
${
use_gpu
}
>
${
server_log_path
}
2>&1 &"
eval
${
web_service_cpp_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
web_service_cpp_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
web_service_cpp_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
server_log_path
}
"
sleep
8s
_save_log_path
=
"
${
LOG_PATH
}
/
server_infer_cpp_gpu_pipeline_batchsize_1
.log"
_save_log_path
=
"
${
LOG_PATH
}
/
cpp_client_gpu
.log"
pipeline_cmd
=
"
${
python_
}
test_cpp_serving_client.py >
${
_save_log_path
}
2>&1 "
eval
${
pipeline_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
${
last_status
}
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
${
last_status
}
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
sleep
5s
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
fi
...
...
@@ -136,10 +141,11 @@ function func_serving_cls(){
function
func_serving_rec
(){
LOG_PATH
=
"test_tipc/output/
${
model_name
}
"
CLS_ROOT_PATH
=
$(
pwd
)
LOG_PATH
=
"
${
CLS_ROOT_PATH
}
/test_tipc/output/
${
model_name
}
/serving_infer"
mkdir
-p
${
LOG_PATH
}
LOG_PATH
=
"../../../
${
LOG_PATH
}
"
status_log
=
"
${
LOG_PATH
}
/results_serving.log"
status_log
=
"
${
LOG_PATH
}
/results_cpp_serving.log
"
trans_model_py
=
$(
func_parser_value
"
${
lines
[5]
}
"
)
cls_infer_model_dir_key
=
$(
func_parser_key
"
${
lines
[6]
}
"
)
cls_infer_model_dir_value
=
$(
func_parser_value
"
${
lines
[6]
}
"
)
...
...
@@ -181,16 +187,22 @@ function func_serving_rec(){
set_params_filename
=
$(
func_set_params
"
${
params_filename_key
}
"
"
${
params_filename_value
}
"
)
set_serving_server
=
$(
func_set_params
"
${
cls_serving_server_key
}
"
"
${
cls_serving_server_value
}
"
)
set_serving_client
=
$(
func_set_params
"
${
cls_serving_client_key
}
"
"
${
cls_serving_client_value
}
"
)
cls_trans_model_cmd
=
"
${
python_interp
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
"
trans_cls_log
=
"
${
LOG_PATH
}
/cpp_trans_model_cls.log"
cls_trans_model_cmd
=
"
${
python_interp
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
>
${
trans_cls_log
}
2>&1"
eval
${
cls_trans_model_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
cls_trans_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
trans_cls_log
}
"
set_dirname
=
$(
func_set_params
"
${
det_infer_model_dir_key
}
"
"
${
det_infer_model_dir_value
}
"
)
set_model_filename
=
$(
func_set_params
"
${
model_filename_key
}
"
"
${
model_filename_value
}
"
)
set_params_filename
=
$(
func_set_params
"
${
params_filename_key
}
"
"
${
params_filename_value
}
"
)
set_serving_server
=
$(
func_set_params
"
${
det_serving_server_key
}
"
"
${
det_serving_server_value
}
"
)
set_serving_client
=
$(
func_set_params
"
${
det_serving_client_key
}
"
"
${
det_serving_client_value
}
"
)
det_trans_model_cmd
=
"
${
python_interp
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
"
trans_det_log
=
"
${
LOG_PATH
}
/cpp_trans_model_det.log"
det_trans_model_cmd
=
"
${
python_interp
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
>
${
trans_det_log
}
2>&1"
eval
${
det_trans_model_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
det_trans_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
trans_det_log
}
"
OLD_IFS
=
"
${
IFS
}
"
IFS
=
'/'
...
...
@@ -225,32 +237,34 @@ function func_serving_rec(){
for
use_gpu
in
${
web_use_gpu_list
[*]
}
;
do
if
[
${
use_gpu
}
=
"null"
]
;
then
det_serving_server_dir_name
=
$(
func_get_url_file_name
"
$det_serving_server_value
"
)
web_service_cpp_cmd
=
"
${
python_interp
}
-m paddle_serving_server.serve --model ../../
${
det_serving_server_value
}
../../
${
cls_serving_server_value
}
--op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 &"
server_log_path
=
"
${
LOG_PATH
}
/cpp_server_cpu.log"
web_service_cpp_cmd
=
"nohup
${
python_interp
}
-m paddle_serving_server.serve --model ../../
${
det_serving_server_value
}
../../
${
cls_serving_server_value
}
--op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 >
${
server_log_path
}
2>&1 &"
eval
${
web_service_cpp_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
web_service_cpp_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
web_service_cpp_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
server_log_path
}
"
sleep
5s
_save_log_path
=
"
${
LOG_PATH
}
/
server_infer_cpp_cpu_batchsize_1
.log"
_save_log_path
=
"
${
LOG_PATH
}
/
cpp_client_cpu
.log"
pipeline_cmd
=
"
${
python_interp
}
${
pipeline_py
}
>
${
_save_log_path
}
2>&1 "
eval
${
pipeline_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
${
last_status
}
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
${
last_status
}
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
sleep
5s
else
det_serving_server_dir_name
=
$(
func_get_url_file_name
"
$det_serving_server_value
"
)
server_log_path
=
"
${
LOG_PATH
}
/cpp_server_gpu.log"
web_service_cpp_cmd
=
"
${
python_interp
}
-m paddle_serving_server.serve --model ../../
${
det_serving_server_value
}
../../
${
cls_serving_server_value
}
--op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 --gpu_id=
${
use_gpu
}
&"
eval
${
web_service_cpp_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
web_service_cpp_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
web_service_cpp_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
${
server_log_path
}
sleep
5s
_save_log_path
=
"
${
LOG_PATH
}
/
server_infer_cpp_gpu_batchsize_1
.log"
_save_log_path
=
"
${
LOG_PATH
}
/
cpp_client_gpu
.log"
pipeline_cmd
=
"
${
python_interp
}
${
pipeline_py
}
>
${
_save_log_path
}
2>&1 "
eval
${
pipeline_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
${
last_status
}
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
${
last_status
}
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
sleep
5s
fi
...
...
test_tipc/test_serving_infer_python.sh
浏览文件 @
ec21470a
...
...
@@ -36,13 +36,16 @@ web_service_py=$(func_parser_value "${lines[11]}")
web_use_gpu_key
=
$(
func_parser_key
"
${
lines
[12]
}
"
)
web_use_gpu_list
=
$(
func_parser_value
"
${
lines
[12]
}
"
)
pipeline_py
=
$(
func_parser_value
"
${
lines
[13]
}
"
)
use_mkldnn
=
"False"
threads
=
"1"
function
func_serving_cls
(){
LOG_PATH
=
"test_tipc/output/
${
model_name
}
/
${
MODE
}
"
CLS_ROOT_PATH
=
$(
pwd
)
LOG_PATH
=
"
${
CLS_ROOT_PATH
}
/test_tipc/output/
${
model_name
}
/
${
MODE
}
"
mkdir
-p
${
LOG_PATH
}
LOG_PATH
=
"../../
${
LOG_PATH
}
"
status_log
=
"
${
LOG_PATH
}
/results_serving.log"
IFS
=
'|'
# pdserving
...
...
@@ -54,8 +57,11 @@ function func_serving_cls(){
for
python_
in
${
python
[*]
}
;
do
if
[[
${
python_
}
=
~
"python"
]]
;
then
trans_model_cmd
=
"
${
python_
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
"
trans_log
=
"
${
LOG_PATH
}
/python_trans_model.log"
trans_model_cmd
=
"
${
python_
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
>
${
trans_log
}
2>&1"
eval
${
trans_model_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
trans_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
trans_log
}
"
break
fi
done
...
...
@@ -96,19 +102,19 @@ function func_serving_cls(){
devices_line
=
27
set_devices_cmd
=
"sed -i '
${
devices_line
}
s/devices: .*/devices:
\"\"
/' config.yml"
eval
${
set_devices_cmd
}
web_service_cmd
=
"
${
python_
}
${
web_service_py
}
&"
server_log_path
=
"
${
LOG_PATH
}
/python_server_cpu.log"
web_service_cmd
=
"
nohup
${
python_
}
${
web_service_py
}
>
${
server_log_path
}
2>&1
&"
eval
${
web_service_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
server_log_path
}
"
sleep
5s
for
pipeline
in
${
pipeline_py
[*]
}
;
do
_save_log_path
=
"
${
LOG_PATH
}
/
server_infer_cpu_
${
pipeline
%_client*
}
_batchsize_1.log"
_save_log_path
=
"
${
LOG_PATH
}
/
python_client_cpu_
${
pipeline
%_client*
}
_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_batchsize_1.log"
pipeline_cmd
=
"
${
python_
}
${
pipeline
}
>
${
_save_log_path
}
2>&1 "
eval
${
pipeline_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
sleep
5s
done
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
...
...
@@ -130,19 +136,19 @@ function func_serving_cls(){
devices_line
=
27
set_devices_cmd
=
"sed -i '
${
devices_line
}
s/devices: .*/devices:
\"
${
use_gpu
}
\"
/' config.yml"
eval
${
set_devices_cmd
}
web_service_cmd
=
"
${
python_
}
${
web_service_py
}
&
"
server_log_path
=
"
${
LOG_PATH
}
/python_server_gpu_usetrt_
${
use_trt
}
_precision_
${
precision
}
.log"
web_service_cmd
=
"
nohup
${
python_
}
${
web_service_py
}
>
${
server_log_path
}
2>&1 &
"
eval
${
web_service_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
server_log_path
}
"
sleep
5s
for
pipeline
in
${
pipeline_py
[*]
}
;
do
_save_log_path
=
"
${
LOG_PATH
}
/
server_infer_gpu_
${
pipeline
%_client*
}
_batchsize_1.log"
_save_log_path
=
"
${
LOG_PATH
}
/
python_client_gpu_
${
pipeline
%_client*
}
_usetrt_
${
use_trt
}
_precision_
${
precision
}
_batchsize_1.log"
pipeline_cmd
=
"
${
python_
}
${
pipeline
}
>
${
_save_log_path
}
2>&1"
eval
${
pipeline_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
sleep
5s
done
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
...
...
@@ -154,10 +160,11 @@ function func_serving_cls(){
function
func_serving_rec
(){
LOG_PATH
=
"test_tipc/output/
${
model_name
}
/
${
MODE
}
"
CLS_ROOT_PATH
=
$(
pwd
)
LOG_PATH
=
"
${
CLS_ROOT_PATH
}
/test_tipc/output/
${
model_name
}
/
${
MODE
}
"
mkdir
-p
${
LOG_PATH
}
LOG_PATH
=
"../../../
${
LOG_PATH
}
"
status_log
=
"
${
LOG_PATH
}
/results_serving.log"
trans_model_py
=
$(
func_parser_value
"
${
lines
[5]
}
"
)
cls_infer_model_dir_key
=
$(
func_parser_key
"
${
lines
[6]
}
"
)
cls_infer_model_dir_value
=
$(
func_parser_value
"
${
lines
[6]
}
"
)
...
...
@@ -199,16 +206,22 @@ function func_serving_rec(){
set_params_filename
=
$(
func_set_params
"
${
params_filename_key
}
"
"
${
params_filename_value
}
"
)
set_serving_server
=
$(
func_set_params
"
${
cls_serving_server_key
}
"
"
${
cls_serving_server_value
}
"
)
set_serving_client
=
$(
func_set_params
"
${
cls_serving_client_key
}
"
"
${
cls_serving_client_value
}
"
)
cls_trans_model_cmd
=
"
${
python_interp
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
"
trans_cls_log
=
"
${
LOG_PATH
}
/python_trans_model_cls.log"
cls_trans_model_cmd
=
"
${
python_interp
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
>
${
trans_cls_log
}
2>&1"
eval
${
cls_trans_model_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
cls_trans_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
trans_cls_log
}
"
set_dirname
=
$(
func_set_params
"
${
det_infer_model_dir_key
}
"
"
${
det_infer_model_dir_value
}
"
)
set_model_filename
=
$(
func_set_params
"
${
model_filename_key
}
"
"
${
model_filename_value
}
"
)
set_params_filename
=
$(
func_set_params
"
${
params_filename_key
}
"
"
${
params_filename_value
}
"
)
set_serving_server
=
$(
func_set_params
"
${
det_serving_server_key
}
"
"
${
det_serving_server_value
}
"
)
set_serving_client
=
$(
func_set_params
"
${
det_serving_client_key
}
"
"
${
det_serving_client_value
}
"
)
det_trans_model_cmd
=
"
${
python_interp
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
"
trans_det_log
=
"
${
LOG_PATH
}
/python_trans_model_det.log"
det_trans_model_cmd
=
"
${
python_interp
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
>
${
trans_det_log
}
2>&1"
eval
${
det_trans_model_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
det_trans_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
trans_det_log
}
"
# modify the alias_name of fetch_var to "outputs"
server_fetch_var_line_cmd
=
"sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name:
\"
features
\"
/'
$cls_serving_server_value
/serving_server_conf.prototxt"
...
...
@@ -239,19 +252,19 @@ function func_serving_rec(){
devices_line
=
27
set_devices_cmd
=
"sed -i '
${
devices_line
}
s/devices: .*/devices:
\"\"
/' config.yml"
eval
${
set_devices_cmd
}
web_service_cmd
=
"
${
python
}
${
web_service_py
}
&"
server_log_path
=
"
${
LOG_PATH
}
/python_server_cpu.log"
web_service_cmd
=
"
nohup
${
python
}
${
web_service_py
}
>
${
server_log_path
}
2>&1
&"
eval
${
web_service_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
server_log_path
}
"
sleep
5s
for
pipeline
in
${
pipeline_py
[*]
}
;
do
_save_log_path
=
"
${
LOG_PATH
}
/
server_infer_cpu_
${
pipeline
%_client*
}
_batchsize_1.log"
pipeline_cmd
=
"
${
python
}
${
pipeline
}
>
${
_save_log_path
}
2>&1
"
_save_log_path
=
"
${
LOG_PATH
}
/
python_client_cpu_
${
pipeline
%_client*
}
_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_batchsize_1.log"
pipeline_cmd
=
"
${
python
}
${
pipeline
}
>
${
_save_log_path
}
2>&1"
eval
${
pipeline_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
sleep
5s
done
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
...
...
@@ -273,19 +286,19 @@ function func_serving_rec(){
devices_line
=
27
set_devices_cmd
=
"sed -i '
${
devices_line
}
s/devices: .*/devices:
\"
${
use_gpu
}
\"
/' config.yml"
eval
${
set_devices_cmd
}
web_service_cmd
=
"
${
python
}
${
web_service_py
}
&
"
server_log_path
=
"
${
LOG_PATH
}
/python_server_gpu_usetrt_
${
use_trt
}
_precision_
${
precision
}
.log"
web_service_cmd
=
"
nohup
${
python
}
${
web_service_py
}
>
${
server_log_path
}
2>&1 &
"
eval
${
web_service_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
server_log_path
}
"
sleep
10s
for
pipeline
in
${
pipeline_py
[*]
}
;
do
_save_log_path
=
"
${
LOG_PATH
}
/
server_infer_gpu_
${
pipeline
%_client*
}
_batchsize_1.log"
_save_log_path
=
"
${
LOG_PATH
}
/
python_client_gpu_
${
pipeline
%_client*
}
_usetrt_
${
use_trt
}
_precision_
${
precision
}
_batchsize_1.log"
pipeline_cmd
=
"
${
python
}
${
pipeline
}
>
${
_save_log_path
}
2>&1"
eval
${
pipeline_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
sleep
10s
done
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
...
...
test_tipc/test_train_inference_python.sh
浏览文件 @
ec21470a
...
...
@@ -95,7 +95,8 @@ if [[ $MODE = 'benchmark_train' ]]; then
epoch_num
=
1
fi
LOG_PATH
=
"./test_tipc/output/
${
model_name
}
/
${
MODE
}
"
CLS_ROOT_PATH
=
$(
pwd
)
LOG_PATH
=
"
${
CLS_ROOT_PATH
}
/test_tipc/output/
${
model_name
}
/
${
MODE
}
"
mkdir
-p
${
LOG_PATH
}
status_log
=
"
${
LOG_PATH
}
/results_python.log"
...
...
@@ -107,13 +108,15 @@ function func_inference() {
_log_path
=
$4
_img_dir
=
$5
_flag_quant
=
$6
_gpu
=
$7
# inference
for
use_gpu
in
${
use_gpu_list
[*]
}
;
do
if
[
${
use_gpu
}
=
"False"
]
||
[
${
use_gpu
}
=
"cpu"
]
;
then
for
use_mkldnn
in
${
use_mkldnn_list
[*]
}
;
do
for
threads
in
${
cpu_threads_list
[*]
}
;
do
for
batch_size
in
${
batch_size_list
[*]
}
;
do
_save_log_path
=
"
${
_log_path
}
/infer_cpu_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_batchsize_
${
batch_size
}
.log"
for
precision
in
${
precision_list
[*]
}
;
do
_save_log_path
=
"
${
_log_path
}
/python_infer_cpu_gpus_
${
_gpu
}
_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_precision_
${
precision
}
_batchsize_
${
batch_size
}
.log"
set_infer_data
=
$(
func_set_params
"
${
image_dir_key
}
"
"
${
_img_dir
}
"
)
set_benchmark
=
$(
func_set_params
"
${
benchmark_key
}
"
"
${
benchmark_value
}
"
)
set_batchsize
=
$(
func_set_params
"
${
batch_size_key
}
"
"
${
batch_size
}
"
)
...
...
@@ -124,7 +127,8 @@ function func_inference() {
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"../
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
done
done
done
done
...
...
@@ -135,7 +139,7 @@ function func_inference() {
continue
fi
for
batch_size
in
${
batch_size_list
[*]
}
;
do
_save_log_path
=
"
${
_log_path
}
/
infer_gpu
_usetrt_
${
use_trt
}
_precision_
${
precision
}
_batchsize_
${
batch_size
}
.log"
_save_log_path
=
"
${
_log_path
}
/
python_infer_gpu_gpus_
${
_gpu
}
_usetrt_
${
use_trt
}
_precision_
${
precision
}
_batchsize_
${
batch_size
}
.log"
set_infer_data
=
$(
func_set_params
"
${
image_dir_key
}
"
"
${
_img_dir
}
"
)
set_benchmark
=
$(
func_set_params
"
${
benchmark_key
}
"
"
${
benchmark_value
}
"
)
set_batchsize
=
$(
func_set_params
"
${
batch_size_key
}
"
"
${
batch_size
}
"
)
...
...
@@ -146,7 +150,7 @@ function func_inference() {
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"
../
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
done
done
done
...
...
@@ -161,17 +165,19 @@ if [[ ${MODE} = "whole_infer" ]]; then
# for kl_quant
if
[
${
kl_quant_cmd_value
}
!=
"null"
]
&&
[
${
kl_quant_cmd_value
}
!=
"False"
]
;
then
echo
"kl_quant"
command
=
"
${
python
}
${
kl_quant_cmd_value
}
"
log_path
=
"
${
LOG_PATH
}
/export.log"
command
=
"
${
python
}
${
kl_quant_cmd_value
}
>
${
log_path
}
2>&1"
echo
${
command
}
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
log_path
}
"
cd
${
infer_model_dir_list
}
/quant_post_static_model
ln
-s
__model__
inference.pdmodel
ln
-s
__params__
inference.pdiparams
ln
-s
model.pdmodel
inference.pdmodel
ln
-s
model.pdiparams
inference.pdiparams
cd
../../deploy
is_quant
=
True
func_inference
"
${
python
}
"
"
${
inference_py
}
"
"../
${
infer_model_dir_list
}
/quant_post_static_model"
"../
${
LOG_PATH
}
"
"
${
infer_img_dir
}
"
${
is_quant
}
gpu
=
0
func_inference
"
${
python
}
"
"
${
inference_py
}
"
"../
${
infer_model_dir_list
}
/quant_post_static_model"
"
${
LOG_PATH
}
"
"
${
infer_img_dir
}
"
"
${
is_quant
}
"
"
${
gpu
}
"
cd
..
fi
else
...
...
@@ -240,7 +246,7 @@ else
if
[
${#
ips
}
-le
15
]
;
then
# if length of ips >= 15, then it is seen as multi-machine
# 15 is the min length of ips info for multi-machine: 0.0.0.0,0.0.0.0
save_log
=
"
${
LOG_PATH
}
/
${
trainer
}
_gpus_
${
gpu
}
_autocast_
${
autocast
}
"
save_log
=
"
${
LOG_PATH
}
/
${
trainer
}
_gpus_
${
gpu
}
_autocast_
${
autocast
}
_nodes_1
"
nodes
=
1
else
IFS
=
","
...
...
@@ -268,7 +274,8 @@ else
# export FLAGS_cudnn_deterministic=True
sleep
5
eval
$cmd
status_check
$?
"
${
cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
eval
"cat
${
save_log
}
/
${
model_name
}
/train.log >>
${
save_log
}
.log"
status_check
$?
"
${
cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
save_log
}
.log"
sleep
5
if
[[
$FILENAME
==
*
GeneralRecognition
*
]]
;
then
...
...
@@ -283,9 +290,10 @@ else
# run eval
if
[
${
eval_py
}
!=
"null"
]
;
then
set_eval_params1
=
$(
func_set_params
"
${
eval_key1
}
"
"
${
eval_value1
}
"
)
eval_cmd
=
"
${
python
}
${
eval_py
}
${
set_eval_pretrain
}
${
set_use_gpu
}
${
set_eval_params1
}
"
eval_log_path
=
"
${
LOG_PATH
}
/
${
trainer
}
_gpus_
${
gpu
}
_autocast_
${
autocast
}
_nodes_
${
nodes
}
_eval.log"
eval_cmd
=
"
${
python
}
${
eval_py
}
${
set_eval_pretrain
}
${
set_use_gpu
}
${
set_eval_params1
}
>
${
eval_log_path
}
2>&1"
eval
$eval_cmd
status_check
$?
"
${
eval_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$?
"
${
eval_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
eval_log_path
}
"
sleep
5
fi
# run export model
...
...
@@ -298,15 +306,16 @@ else
set_export_weight
=
$(
func_set_params
"
${
export_weight
}
"
"
${
save_log
}
/
${
model_name
}
/
${
train_model_name
}
"
)
fi
set_save_infer_key
=
$(
func_set_params
"
${
save_infer_key
}
"
"
${
save_infer_path
}
"
)
export_cmd
=
"
${
python
}
${
run_export
}
${
set_export_weight
}
${
set_save_infer_key
}
"
export_log_path
=
"
${
LOG_PATH
}
/
${
trainer
}
_gpus_
${
gpu
}
_autocast_
${
autocast
}
_nodes_
${
nodes
}
_export.log"
export_cmd
=
"
${
python
}
${
run_export
}
${
set_export_weight
}
${
set_save_infer_key
}
>
${
export_log_path
}
2>&1"
eval
$export_cmd
status_check
$?
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$?
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
export_log_path
}
"
#run inference
#
run inference
eval
$env
save_infer_path
=
"
${
save_log
}
"
cd
deploy
func_inference
"
${
python
}
"
"
${
inference_py
}
"
"
../
${
save_infer_path
}
"
"../
${
LOG_PATH
}
"
"
${
infer_img_dir
}
"
"
${
flag_quant
}
"
func_inference
"
${
python
}
"
"
${
inference_py
}
"
"
${
save_infer_path
}
"
"
${
LOG_PATH
}
"
"
${
infer_img_dir
}
"
"
${
flag_quant
}
"
"
${
gpu
}
"
cd
..
fi
eval
"unset CUDA_VISIBLE_DEVICES"
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录