Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
b3d26a98
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
大约 2 年 前同步成功
通知
118
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b3d26a98
编写于
9月 05, 2022
作者:
G
gaotingquan
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
tipc: fix log path
上级
44b55fb6
变更
6
显示空白变更内容
内联
并排
Showing
6 changed file
with
116 addition
and
81 deletion
+116
-81
test_tipc/common_func.sh
test_tipc/common_func.sh
+3
-2
test_tipc/test_inference_cpp.sh
test_tipc/test_inference_cpp.sh
+4
-5
test_tipc/test_paddle2onnx.sh
test_tipc/test_paddle2onnx.sh
+6
-4
test_tipc/test_serving_infer_cpp.sh
test_tipc/test_serving_infer_cpp.sh
+34
-21
test_tipc/test_serving_infer_python.sh
test_tipc/test_serving_infer_python.sh
+35
-24
test_tipc/test_train_inference_python.sh
test_tipc/test_train_inference_python.sh
+34
-25
未找到文件。
test_tipc/common_func.sh
浏览文件 @
b3d26a98
...
@@ -77,9 +77,10 @@ function status_check(){
...
@@ -77,9 +77,10 @@ function status_check(){
run_command
=
$2
run_command
=
$2
run_log
=
$3
run_log
=
$3
model_name
=
$4
model_name
=
$4
log_path
=
$5
if
[
$last_status
-eq
0
]
;
then
if
[
$last_status
-eq
0
]
;
then
echo
-e
"
\0
33[33m Run successfully with command -
${
model_name
}
-
${
run_command
}
!
\0
33[0m"
|
tee
-a
${
run_log
}
echo
-e
"
\0
33[33m Run successfully with command -
${
model_name
}
-
${
run_command
}
-
${
log_path
}
!
\0
33[0m"
|
tee
-a
${
run_log
}
else
else
echo
-e
"
\0
33[33m Run failed with command -
${
model_name
}
-
${
run_command
}
!
\0
33[0m"
|
tee
-a
${
run_log
}
echo
-e
"
\0
33[33m Run failed with command -
${
model_name
}
-
${
run_command
}
-
${
log_path
}
!
\0
33[0m"
|
tee
-a
${
run_log
}
fi
fi
}
}
test_tipc/test_inference_cpp.sh
浏览文件 @
b3d26a98
...
@@ -70,7 +70,7 @@ function func_shitu_cpp_inference(){
...
@@ -70,7 +70,7 @@ function func_shitu_cpp_inference(){
command
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
command
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
eval
$command
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
done
done
done
done
done
done
...
@@ -94,7 +94,7 @@ function func_shitu_cpp_inference(){
...
@@ -94,7 +94,7 @@ function func_shitu_cpp_inference(){
command
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
command
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
eval
$command
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
done
done
done
done
done
done
...
@@ -126,13 +126,12 @@ function func_cls_cpp_inference(){
...
@@ -126,13 +126,12 @@ function func_cls_cpp_inference(){
precison
=
"int8"
precison
=
"int8"
fi
fi
_save_log_path
=
"
${
_log_path
}
/cpp_infer_cpu_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_precision_
${
precision
}
_batchsize_
${
batch_size
}
.log"
_save_log_path
=
"
${
_log_path
}
/cpp_infer_cpu_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_precision_
${
precision
}
_batchsize_
${
batch_size
}
.log"
command
=
"
${
generate_yaml_cmd
}
--type cls --batch_size
${
batch_size
}
--mkldnn
${
use_mkldnn
}
--gpu
${
use_gpu
}
--cpu_thread
${
threads
}
--tensorrt False --precision
${
precision
}
--data_dir
${
_img_dir
}
--benchmark True --cls_model_dir
${
cpp_infer_model_dir
}
--gpu_id
${
GPUID
}
"
command
=
"
${
generate_yaml_cmd
}
--type cls --batch_size
${
batch_size
}
--mkldnn
${
use_mkldnn
}
--gpu
${
use_gpu
}
--cpu_thread
${
threads
}
--tensorrt False --precision
${
precision
}
--data_dir
${
_img_dir
}
--benchmark True --cls_model_dir
${
cpp_infer_model_dir
}
--gpu_id
${
GPUID
}
"
eval
$command
eval
$command
command1
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
command1
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
eval
${
command1
}
eval
${
command1
}
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
command1
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
command1
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
done
done
done
done
done
done
...
@@ -155,7 +154,7 @@ function func_cls_cpp_inference(){
...
@@ -155,7 +154,7 @@ function func_cls_cpp_inference(){
command
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
command
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
eval
$command
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
done
done
done
done
done
done
...
...
test_tipc/test_paddle2onnx.sh
浏览文件 @
b3d26a98
...
@@ -46,27 +46,29 @@ function func_paddle2onnx(){
...
@@ -46,27 +46,29 @@ function func_paddle2onnx(){
_script
=
$1
_script
=
$1
# paddle2onnx
# paddle2onnx
_save_log_path
=
".
${
LOG_PATH
}
/paddle2onnx_infer_cpu.log"
set_dirname
=
$(
func_set_params
"
${
infer_model_dir_key
}
"
"
${
infer_model_dir_value
}
"
)
set_dirname
=
$(
func_set_params
"
${
infer_model_dir_key
}
"
"
${
infer_model_dir_value
}
"
)
set_model_filename
=
$(
func_set_params
"
${
model_filename_key
}
"
"
${
model_filename_value
}
"
)
set_model_filename
=
$(
func_set_params
"
${
model_filename_key
}
"
"
${
model_filename_value
}
"
)
set_params_filename
=
$(
func_set_params
"
${
params_filename_key
}
"
"
${
params_filename_value
}
"
)
set_params_filename
=
$(
func_set_params
"
${
params_filename_key
}
"
"
${
params_filename_value
}
"
)
set_save_model
=
$(
func_set_params
"
${
save_file_key
}
"
"
${
save_file_value
}
"
)
set_save_model
=
$(
func_set_params
"
${
save_file_key
}
"
"
${
save_file_value
}
"
)
set_opset_version
=
$(
func_set_params
"
${
opset_version_key
}
"
"
${
opset_version_value
}
"
)
set_opset_version
=
$(
func_set_params
"
${
opset_version_key
}
"
"
${
opset_version_value
}
"
)
set_enable_onnx_checker
=
$(
func_set_params
"
${
enable_onnx_checker_key
}
"
"
${
enable_onnx_checker_value
}
"
)
set_enable_onnx_checker
=
$(
func_set_params
"
${
enable_onnx_checker_key
}
"
"
${
enable_onnx_checker_value
}
"
)
trans_model_cmd
=
"
${
padlle2onnx_cmd
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_save_model
}
${
set_opset_version
}
${
set_enable_onnx_checker
}
--enable_dev_version=False"
trans_log
=
"
${
LOG_PATH
}
/trans_model.log"
trans_model_cmd
=
"
${
padlle2onnx_cmd
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_save_model
}
${
set_opset_version
}
${
set_enable_onnx_checker
}
--enable_dev_version=False >
${
trans_log
}
2>&1"
eval
$trans_model_cmd
eval
$trans_model_cmd
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
trans_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
trans_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
trans_log
}
"
# python inference
# python inference
if
[[
${
inference_py
}
!=
"null"
]]
;
then
if
[[
${
inference_py
}
!=
"null"
]]
;
then
_save_log_path
=
".
${
LOG_PATH
}
/paddle2onnx_infer_cpu.log"
set_model_dir
=
$(
func_set_params
"
${
inference_model_dir_key
}
"
"
${
inference_model_dir_value
}
"
)
set_model_dir
=
$(
func_set_params
"
${
inference_model_dir_key
}
"
"
${
inference_model_dir_value
}
"
)
set_use_onnx
=
$(
func_set_params
"
${
use_onnx_key
}
"
"
${
use_onnx_value
}
"
)
set_use_onnx
=
$(
func_set_params
"
${
use_onnx_key
}
"
"
${
use_onnx_value
}
"
)
set_hardware
=
$(
func_set_params
"
${
inference_hardware_key
}
"
"
${
inference_hardware_value
}
"
)
set_hardware
=
$(
func_set_params
"
${
inference_hardware_key
}
"
"
${
inference_hardware_value
}
"
)
set_inference_config
=
$(
func_set_params
"
${
inference_config_key
}
"
"
${
inference_config_value
}
"
)
set_inference_config
=
$(
func_set_params
"
${
inference_config_key
}
"
"
${
inference_config_value
}
"
)
infer_model_cmd
=
"cd deploy &&
${
python
}
${
inference_py
}
-o
${
set_model_dir
}
-o
${
set_use_onnx
}
-o
${
set_hardware
}
${
set_inference_config
}
>
${
_save_log_path
}
2>&1 && cd ../"
infer_model_cmd
=
"cd deploy &&
${
python
}
${
inference_py
}
-o
${
set_model_dir
}
-o
${
set_use_onnx
}
-o
${
set_hardware
}
${
set_inference_config
}
>
${
_save_log_path
}
2>&1 && cd ../"
eval
$infer_model_cmd
eval
$infer_model_cmd
status_check
$last_status
"
${
infer_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
infer_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
fi
fi
}
}
...
...
test_tipc/test_serving_infer_cpp.sh
浏览文件 @
b3d26a98
...
@@ -38,10 +38,10 @@ pipeline_py=$(func_parser_value "${lines[13]}")
...
@@ -38,10 +38,10 @@ pipeline_py=$(func_parser_value "${lines[13]}")
function
func_serving_cls
(){
function
func_serving_cls
(){
LOG_PATH
=
"test_tipc/output/
${
model_name
}
"
LOG_PATH
=
"test_tipc/output/
${
model_name
}
/
${
MODE
}
/cpp
"
mkdir
-p
${
LOG_PATH
}
mkdir
-p
${
LOG_PATH
}
LOG_PATH
=
"../../
${
LOG_PATH
}
"
LOG_PATH
=
"../../
${
LOG_PATH
}
"
status_log
=
"
${
LOG_PATH
}
/results_serving.log"
status_log
=
"
${
LOG_PATH
}
/results_
cpp_
serving.log"
IFS
=
'|'
IFS
=
'|'
# pdserving
# pdserving
...
@@ -53,8 +53,11 @@ function func_serving_cls(){
...
@@ -53,8 +53,11 @@ function func_serving_cls(){
for
python_
in
${
python
[*]
}
;
do
for
python_
in
${
python
[*]
}
;
do
if
[[
${
python_
}
=
~
"python"
]]
;
then
if
[[
${
python_
}
=
~
"python"
]]
;
then
trans_model_cmd
=
"
${
python_
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
"
trans_log
=
"
${
LOG_PATH
}
/cpp_trans_model.log"
trans_model_cmd
=
"
${
python_
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
>
${
trans_log
}
2>&1"
eval
${
trans_model_cmd
}
eval
${
trans_model_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
trans_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
trans_log
}
"
break
break
fi
fi
done
done
...
@@ -102,32 +105,34 @@ function func_serving_cls(){
...
@@ -102,32 +105,34 @@ function func_serving_cls(){
for
use_gpu
in
${
web_use_gpu_list
[*]
}
;
do
for
use_gpu
in
${
web_use_gpu_list
[*]
}
;
do
if
[[
${
use_gpu
}
=
"null"
]]
;
then
if
[[
${
use_gpu
}
=
"null"
]]
;
then
web_service_cpp_cmd
=
"
${
python_
}
-m paddle_serving_server.serve --model
${
serving_server_dir_name
}
--op GeneralClasOp --port 9292 &"
server_log_path
=
"
${
LOG_PATH
}
/cpp_server_cpu.log"
web_service_cpp_cmd
=
"nohup
${
python_
}
-m paddle_serving_server.serve --model
${
serving_server_dir_name
}
--op GeneralClasOp --port 9292 >
${
server_log_path
}
2>&1 &"
eval
${
web_service_cpp_cmd
}
eval
${
web_service_cpp_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
web_service_cpp_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
web_service_cpp_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
server_log_path
}
"
sleep
5s
sleep
5s
_save_log_path
=
"
${
LOG_PATH
}
/
server_infer_cpp_cpu_pipeline_batchsize_1
.log"
_save_log_path
=
"
${
LOG_PATH
}
/
cpp_client_cpu
.log"
pipeline_cmd
=
"
${
python_
}
test_cpp_serving_client.py >
${
_save_log_path
}
2>&1 "
pipeline_cmd
=
"
${
python_
}
test_cpp_serving_client.py >
${
_save_log_path
}
2>&1 "
eval
${
pipeline_cmd
}
eval
${
pipeline_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
${
last_status
}
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
${
last_status
}
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
sleep
5s
sleep
5s
else
else
web_service_cpp_cmd
=
"
${
python_
}
-m paddle_serving_server.serve --model
${
serving_server_dir_name
}
--op GeneralClasOp --port 9292 --gpu_id=
${
use_gpu
}
&"
server_log_path
=
"
${
LOG_PATH
}
/cpp_server_gpu.log"
web_service_cpp_cmd
=
"nohup
${
python_
}
-m paddle_serving_server.serve --model
${
serving_server_dir_name
}
--op GeneralClasOp --port 9292 --gpu_id=
${
use_gpu
}
>
${
server_log_path
}
2>&1 &"
eval
${
web_service_cpp_cmd
}
eval
${
web_service_cpp_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
web_service_cpp_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
web_service_cpp_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
server_log_path
}
"
sleep
8s
sleep
8s
_save_log_path
=
"
${
LOG_PATH
}
/
server_infer_cpp_gpu_pipeline_batchsize_1
.log"
_save_log_path
=
"
${
LOG_PATH
}
/
cpp_client_gpu
.log"
pipeline_cmd
=
"
${
python_
}
test_cpp_serving_client.py >
${
_save_log_path
}
2>&1 "
pipeline_cmd
=
"
${
python_
}
test_cpp_serving_client.py >
${
_save_log_path
}
2>&1 "
eval
${
pipeline_cmd
}
eval
${
pipeline_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
${
last_status
}
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
${
last_status
}
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
sleep
5s
sleep
5s
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
fi
fi
...
@@ -139,7 +144,7 @@ function func_serving_rec(){
...
@@ -139,7 +144,7 @@ function func_serving_rec(){
LOG_PATH
=
"test_tipc/output/
${
model_name
}
"
LOG_PATH
=
"test_tipc/output/
${
model_name
}
"
mkdir
-p
${
LOG_PATH
}
mkdir
-p
${
LOG_PATH
}
LOG_PATH
=
"../../../
${
LOG_PATH
}
"
LOG_PATH
=
"../../../
${
LOG_PATH
}
"
status_log
=
"
${
LOG_PATH
}
/results_serving.log"
status_log
=
"
${
LOG_PATH
}
/results_
cpp_
serving.log"
trans_model_py
=
$(
func_parser_value
"
${
lines
[5]
}
"
)
trans_model_py
=
$(
func_parser_value
"
${
lines
[5]
}
"
)
cls_infer_model_dir_key
=
$(
func_parser_key
"
${
lines
[6]
}
"
)
cls_infer_model_dir_key
=
$(
func_parser_key
"
${
lines
[6]
}
"
)
cls_infer_model_dir_value
=
$(
func_parser_value
"
${
lines
[6]
}
"
)
cls_infer_model_dir_value
=
$(
func_parser_value
"
${
lines
[6]
}
"
)
...
@@ -181,16 +186,22 @@ function func_serving_rec(){
...
@@ -181,16 +186,22 @@ function func_serving_rec(){
set_params_filename
=
$(
func_set_params
"
${
params_filename_key
}
"
"
${
params_filename_value
}
"
)
set_params_filename
=
$(
func_set_params
"
${
params_filename_key
}
"
"
${
params_filename_value
}
"
)
set_serving_server
=
$(
func_set_params
"
${
cls_serving_server_key
}
"
"
${
cls_serving_server_value
}
"
)
set_serving_server
=
$(
func_set_params
"
${
cls_serving_server_key
}
"
"
${
cls_serving_server_value
}
"
)
set_serving_client
=
$(
func_set_params
"
${
cls_serving_client_key
}
"
"
${
cls_serving_client_value
}
"
)
set_serving_client
=
$(
func_set_params
"
${
cls_serving_client_key
}
"
"
${
cls_serving_client_value
}
"
)
cls_trans_model_cmd
=
"
${
python_interp
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
"
trans_cls_log
=
"
${
LOG_PATH
}
/cpp_trans_model_cls.log"
cls_trans_model_cmd
=
"
${
python_interp
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
>
${
trans_cls_log
}
2>&1"
eval
${
cls_trans_model_cmd
}
eval
${
cls_trans_model_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
cls_trans_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
trans_cls_log
}
"
set_dirname
=
$(
func_set_params
"
${
det_infer_model_dir_key
}
"
"
${
det_infer_model_dir_value
}
"
)
set_dirname
=
$(
func_set_params
"
${
det_infer_model_dir_key
}
"
"
${
det_infer_model_dir_value
}
"
)
set_model_filename
=
$(
func_set_params
"
${
model_filename_key
}
"
"
${
model_filename_value
}
"
)
set_model_filename
=
$(
func_set_params
"
${
model_filename_key
}
"
"
${
model_filename_value
}
"
)
set_params_filename
=
$(
func_set_params
"
${
params_filename_key
}
"
"
${
params_filename_value
}
"
)
set_params_filename
=
$(
func_set_params
"
${
params_filename_key
}
"
"
${
params_filename_value
}
"
)
set_serving_server
=
$(
func_set_params
"
${
det_serving_server_key
}
"
"
${
det_serving_server_value
}
"
)
set_serving_server
=
$(
func_set_params
"
${
det_serving_server_key
}
"
"
${
det_serving_server_value
}
"
)
set_serving_client
=
$(
func_set_params
"
${
det_serving_client_key
}
"
"
${
det_serving_client_value
}
"
)
set_serving_client
=
$(
func_set_params
"
${
det_serving_client_key
}
"
"
${
det_serving_client_value
}
"
)
det_trans_model_cmd
=
"
${
python_interp
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
"
trans_det_log
=
"
${
LOG_PATH
}
/cpp_trans_model_det.log"
det_trans_model_cmd
=
"
${
python_interp
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
>
${
trans_det_log
}
2>&1"
eval
${
det_trans_model_cmd
}
eval
${
det_trans_model_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
det_trans_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
trans_det_log
}
"
OLD_IFS
=
"
${
IFS
}
"
OLD_IFS
=
"
${
IFS
}
"
IFS
=
'/'
IFS
=
'/'
...
@@ -225,32 +236,34 @@ function func_serving_rec(){
...
@@ -225,32 +236,34 @@ function func_serving_rec(){
for
use_gpu
in
${
web_use_gpu_list
[*]
}
;
do
for
use_gpu
in
${
web_use_gpu_list
[*]
}
;
do
if
[
${
use_gpu
}
=
"null"
]
;
then
if
[
${
use_gpu
}
=
"null"
]
;
then
det_serving_server_dir_name
=
$(
func_get_url_file_name
"
$det_serving_server_value
"
)
det_serving_server_dir_name
=
$(
func_get_url_file_name
"
$det_serving_server_value
"
)
web_service_cpp_cmd
=
"
${
python_interp
}
-m paddle_serving_server.serve --model ../../
${
det_serving_server_value
}
../../
${
cls_serving_server_value
}
--op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 &"
server_log_path
=
"
${
LOG_PATH
}
/cpp_server_cpu.log"
web_service_cpp_cmd
=
"nohup
${
python_interp
}
-m paddle_serving_server.serve --model ../../
${
det_serving_server_value
}
../../
${
cls_serving_server_value
}
--op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 >
${
server_log_path
}
2>&1 &"
eval
${
web_service_cpp_cmd
}
eval
${
web_service_cpp_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
web_service_cpp_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
web_service_cpp_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
server_log_path
}
"
sleep
5s
sleep
5s
_save_log_path
=
"
${
LOG_PATH
}
/
server_infer_cpp_cpu_batchsize_1
.log"
_save_log_path
=
"
${
LOG_PATH
}
/
cpp_client_cpu
.log"
pipeline_cmd
=
"
${
python_interp
}
${
pipeline_py
}
>
${
_save_log_path
}
2>&1 "
pipeline_cmd
=
"
${
python_interp
}
${
pipeline_py
}
>
${
_save_log_path
}
2>&1 "
eval
${
pipeline_cmd
}
eval
${
pipeline_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
${
last_status
}
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
${
last_status
}
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
sleep
5s
sleep
5s
else
else
det_serving_server_dir_name
=
$(
func_get_url_file_name
"
$det_serving_server_value
"
)
det_serving_server_dir_name
=
$(
func_get_url_file_name
"
$det_serving_server_value
"
)
server_log_path
=
"
${
LOG_PATH
}
/cpp_server_gpu.log"
web_service_cpp_cmd
=
"
${
python_interp
}
-m paddle_serving_server.serve --model ../../
${
det_serving_server_value
}
../../
${
cls_serving_server_value
}
--op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 --gpu_id=
${
use_gpu
}
&"
web_service_cpp_cmd
=
"
${
python_interp
}
-m paddle_serving_server.serve --model ../../
${
det_serving_server_value
}
../../
${
cls_serving_server_value
}
--op GeneralPicodetOp GeneralFeatureExtractOp --port 9400 --gpu_id=
${
use_gpu
}
&"
eval
${
web_service_cpp_cmd
}
eval
${
web_service_cpp_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
web_service_cpp_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
web_service_cpp_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
${
server_log_path
}
sleep
5s
sleep
5s
_save_log_path
=
"
${
LOG_PATH
}
/
server_infer_cpp_gpu_batchsize_1
.log"
_save_log_path
=
"
${
LOG_PATH
}
/
cpp_client_gpu
.log"
pipeline_cmd
=
"
${
python_interp
}
${
pipeline_py
}
>
${
_save_log_path
}
2>&1 "
pipeline_cmd
=
"
${
python_interp
}
${
pipeline_py
}
>
${
_save_log_path
}
2>&1 "
eval
${
pipeline_cmd
}
eval
${
pipeline_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
${
last_status
}
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
${
last_status
}
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
sleep
5s
sleep
5s
fi
fi
...
...
test_tipc/test_serving_infer_python.sh
浏览文件 @
b3d26a98
...
@@ -54,8 +54,11 @@ function func_serving_cls(){
...
@@ -54,8 +54,11 @@ function func_serving_cls(){
for
python_
in
${
python
[*]
}
;
do
for
python_
in
${
python
[*]
}
;
do
if
[[
${
python_
}
=
~
"python"
]]
;
then
if
[[
${
python_
}
=
~
"python"
]]
;
then
trans_model_cmd
=
"
${
python_
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
"
trans_log
=
"
${
LOG_PATH
}
/python_trans_model.log"
trans_model_cmd
=
"
${
python_
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
>
${
trans_cls_log
}
2>&1"
eval
${
trans_model_cmd
}
eval
${
trans_model_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
trans_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
trans_log
}
"
break
break
fi
fi
done
done
...
@@ -96,19 +99,20 @@ function func_serving_cls(){
...
@@ -96,19 +99,20 @@ function func_serving_cls(){
devices_line
=
27
devices_line
=
27
set_devices_cmd
=
"sed -i '
${
devices_line
}
s/devices: .*/devices:
\"\"
/' config.yml"
set_devices_cmd
=
"sed -i '
${
devices_line
}
s/devices: .*/devices:
\"\"
/' config.yml"
eval
${
set_devices_cmd
}
eval
${
set_devices_cmd
}
server_log_path
=
"
${
LOG_PATH
}
/python_server_cpu.log"
web_service_cmd
=
"
${
python_
}
${
web_service_py
}
&"
web_service_cmd
=
"
nohup
${
python_
}
${
web_service_py
}
>
${
server_log_path
}
2>&1
&"
eval
${
web_service_cmd
}
eval
${
web_service_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
server_log_path
}
"
sleep
5s
sleep
5s
for
pipeline
in
${
pipeline_py
[*]
}
;
do
for
pipeline
in
${
pipeline_py
[*]
}
;
do
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpu_
${
pipeline
%_client*
}
_batchsize_1.log"
# TODO
_save_log_path
=
"
${
LOG_PATH
}
/python_client_cpu_
${
pipeline
%_client*
}
_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_batchsize_1.log"
pipeline_cmd
=
"
${
python_
}
${
pipeline
}
>
${
_save_log_path
}
2>&1 "
pipeline_cmd
=
"
${
python_
}
${
pipeline
}
>
${
_save_log_path
}
2>&1 "
eval
${
pipeline_cmd
}
eval
${
pipeline_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
sleep
5s
sleep
5s
done
done
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
...
@@ -130,19 +134,19 @@ function func_serving_cls(){
...
@@ -130,19 +134,19 @@ function func_serving_cls(){
devices_line
=
27
devices_line
=
27
set_devices_cmd
=
"sed -i '
${
devices_line
}
s/devices: .*/devices:
\"
${
use_gpu
}
\"
/' config.yml"
set_devices_cmd
=
"sed -i '
${
devices_line
}
s/devices: .*/devices:
\"
${
use_gpu
}
\"
/' config.yml"
eval
${
set_devices_cmd
}
eval
${
set_devices_cmd
}
server_log_path
=
"
${
LOG_PATH
}
/python_server_gpu_usetrt_
${
use_trt
}
_precision_
${
precision
}
.log"
web_service_cmd
=
"
${
python_
}
${
web_service_py
}
&
"
web_service_cmd
=
"
nohup
${
python_
}
${
web_service_py
}
>
${
server_log_path
}
2>&1 &
"
eval
${
web_service_cmd
}
eval
${
web_service_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
server_log_path
}
"
sleep
5s
sleep
5s
for
pipeline
in
${
pipeline_py
[*]
}
;
do
for
pipeline
in
${
pipeline_py
[*]
}
;
do
_save_log_path
=
"
${
LOG_PATH
}
/
server_infer_gpu_
${
pipeline
%_client*
}
_batchsize_1.log"
_save_log_path
=
"
${
LOG_PATH
}
/
python_client_gpu_
${
pipeline
%_client*
}
_usetrt_
${
use_trt
}
_precision_
${
precision
}
_batchsize_1.log"
pipeline_cmd
=
"
${
python_
}
${
pipeline
}
>
${
_save_log_path
}
2>&1"
pipeline_cmd
=
"
${
python_
}
${
pipeline
}
>
${
_save_log_path
}
2>&1"
eval
${
pipeline_cmd
}
eval
${
pipeline_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
sleep
5s
sleep
5s
done
done
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
...
@@ -199,16 +203,22 @@ function func_serving_rec(){
...
@@ -199,16 +203,22 @@ function func_serving_rec(){
set_params_filename
=
$(
func_set_params
"
${
params_filename_key
}
"
"
${
params_filename_value
}
"
)
set_params_filename
=
$(
func_set_params
"
${
params_filename_key
}
"
"
${
params_filename_value
}
"
)
set_serving_server
=
$(
func_set_params
"
${
cls_serving_server_key
}
"
"
${
cls_serving_server_value
}
"
)
set_serving_server
=
$(
func_set_params
"
${
cls_serving_server_key
}
"
"
${
cls_serving_server_value
}
"
)
set_serving_client
=
$(
func_set_params
"
${
cls_serving_client_key
}
"
"
${
cls_serving_client_value
}
"
)
set_serving_client
=
$(
func_set_params
"
${
cls_serving_client_key
}
"
"
${
cls_serving_client_value
}
"
)
cls_trans_model_cmd
=
"
${
python_interp
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
"
trans_cls_log
=
"
${
LOG_PATH
}
/python_trans_model_cls.log"
cls_trans_model_cmd
=
"
${
python_interp
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
>
${
trans_cls_log
}
2>&1"
eval
${
cls_trans_model_cmd
}
eval
${
cls_trans_model_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
cls_trans_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
trans_cls_log
}
"
set_dirname
=
$(
func_set_params
"
${
det_infer_model_dir_key
}
"
"
${
det_infer_model_dir_value
}
"
)
set_dirname
=
$(
func_set_params
"
${
det_infer_model_dir_key
}
"
"
${
det_infer_model_dir_value
}
"
)
set_model_filename
=
$(
func_set_params
"
${
model_filename_key
}
"
"
${
model_filename_value
}
"
)
set_model_filename
=
$(
func_set_params
"
${
model_filename_key
}
"
"
${
model_filename_value
}
"
)
set_params_filename
=
$(
func_set_params
"
${
params_filename_key
}
"
"
${
params_filename_value
}
"
)
set_params_filename
=
$(
func_set_params
"
${
params_filename_key
}
"
"
${
params_filename_value
}
"
)
set_serving_server
=
$(
func_set_params
"
${
det_serving_server_key
}
"
"
${
det_serving_server_value
}
"
)
set_serving_server
=
$(
func_set_params
"
${
det_serving_server_key
}
"
"
${
det_serving_server_value
}
"
)
set_serving_client
=
$(
func_set_params
"
${
det_serving_client_key
}
"
"
${
det_serving_client_value
}
"
)
set_serving_client
=
$(
func_set_params
"
${
det_serving_client_key
}
"
"
${
det_serving_client_value
}
"
)
det_trans_model_cmd
=
"
${
python_interp
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
"
trans_det_log
=
"
${
LOG_PATH
}
/python_trans_model_det.log"
det_trans_model_cmd
=
"
${
python_interp
}
${
trans_model_py
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_serving_server
}
${
set_serving_client
}
>
${
trans_det_log
}
2>&1"
eval
${
det_trans_model_cmd
}
eval
${
det_trans_model_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
det_trans_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
trans_det_log
}
"
# modify the alias_name of fetch_var to "outputs"
# modify the alias_name of fetch_var to "outputs"
server_fetch_var_line_cmd
=
"sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name:
\"
features
\"
/'
$cls_serving_server_value
/serving_server_conf.prototxt"
server_fetch_var_line_cmd
=
"sed -i '/fetch_var/,/is_lod_tensor/s/alias_name: .*/alias_name:
\"
features
\"
/'
$cls_serving_server_value
/serving_server_conf.prototxt"
...
@@ -239,19 +249,20 @@ function func_serving_rec(){
...
@@ -239,19 +249,20 @@ function func_serving_rec(){
devices_line
=
27
devices_line
=
27
set_devices_cmd
=
"sed -i '
${
devices_line
}
s/devices: .*/devices:
\"\"
/' config.yml"
set_devices_cmd
=
"sed -i '
${
devices_line
}
s/devices: .*/devices:
\"\"
/' config.yml"
eval
${
set_devices_cmd
}
eval
${
set_devices_cmd
}
server_log_path
=
"
${
LOG_PATH
}
/python_server_cpu.log"
web_service_cmd
=
"
${
python
}
${
web_service_py
}
&"
web_service_cmd
=
"
nohup
${
python
}
${
web_service_py
}
>
${
server_log_path
}
2>&1
&"
eval
${
web_service_cmd
}
eval
${
web_service_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
server_log_path
}
"
sleep
5s
sleep
5s
for
pipeline
in
${
pipeline_py
[*]
}
;
do
for
pipeline
in
${
pipeline_py
[*]
}
;
do
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpu_
${
pipeline
%_client*
}
_batchsize_1.log"
# TODO
pipeline_cmd
=
"
${
python
}
${
pipeline
}
>
${
_save_log_path
}
2>&1 "
_save_log_path
=
"
${
LOG_PATH
}
/python_client_cpu_
${
pipeline
%_client*
}
_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_batchsize_1.log"
pipeline_cmd
=
"
${
python
}
${
pipeline
}
>
${
_save_log_path
}
2>&1"
eval
${
pipeline_cmd
}
eval
${
pipeline_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
sleep
5s
sleep
5s
done
done
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
...
@@ -273,19 +284,19 @@ function func_serving_rec(){
...
@@ -273,19 +284,19 @@ function func_serving_rec(){
devices_line
=
27
devices_line
=
27
set_devices_cmd
=
"sed -i '
${
devices_line
}
s/devices: .*/devices:
\"
${
use_gpu
}
\"
/' config.yml"
set_devices_cmd
=
"sed -i '
${
devices_line
}
s/devices: .*/devices:
\"
${
use_gpu
}
\"
/' config.yml"
eval
${
set_devices_cmd
}
eval
${
set_devices_cmd
}
server_log_path
=
"
${
LOG_PATH
}
/python_server_gpu_usetrt_
${
use_trt
}
_precision_
${
precision
}
.log"
web_service_cmd
=
"
${
python
}
${
web_service_py
}
&
"
web_service_cmd
=
"
nohup
${
python
}
${
web_service_py
}
>
${
server_log_path
}
2>&1 &
"
eval
${
web_service_cmd
}
eval
${
web_service_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
server_log_path
}
"
sleep
10s
sleep
10s
for
pipeline
in
${
pipeline_py
[*]
}
;
do
for
pipeline
in
${
pipeline_py
[*]
}
;
do
_save_log_path
=
"
${
LOG_PATH
}
/
server_infer_gpu_
${
pipeline
%_client*
}
_batchsize_1.log"
_save_log_path
=
"
${
LOG_PATH
}
/
python_client_gpu_
${
pipeline
%_client*
}
_usetrt_
${
use_trt
}
_precision_
${
precision
}
_batchsize_1.log"
pipeline_cmd
=
"
${
python
}
${
pipeline
}
>
${
_save_log_path
}
2>&1"
pipeline_cmd
=
"
${
python
}
${
pipeline
}
>
${
_save_log_path
}
2>&1"
eval
${
pipeline_cmd
}
eval
${
pipeline_cmd
}
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
sleep
10s
sleep
10s
done
done
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
eval
"
${
python_
}
-m paddle_serving_server.serve stop"
...
...
test_tipc/test_train_inference_python.sh
浏览文件 @
b3d26a98
...
@@ -107,13 +107,15 @@ function func_inference() {
...
@@ -107,13 +107,15 @@ function func_inference() {
_log_path
=
$4
_log_path
=
$4
_img_dir
=
$5
_img_dir
=
$5
_flag_quant
=
$6
_flag_quant
=
$6
_gpu
=
$7
# inference
# inference
for
use_gpu
in
${
use_gpu_list
[*]
}
;
do
for
use_gpu
in
${
use_gpu_list
[*]
}
;
do
if
[
${
use_gpu
}
=
"False"
]
||
[
${
use_gpu
}
=
"cpu"
]
;
then
if
[
${
use_gpu
}
=
"False"
]
||
[
${
use_gpu
}
=
"cpu"
]
;
then
for
use_mkldnn
in
${
use_mkldnn_list
[*]
}
;
do
for
use_mkldnn
in
${
use_mkldnn_list
[*]
}
;
do
for
threads
in
${
cpu_threads_list
[*]
}
;
do
for
threads
in
${
cpu_threads_list
[*]
}
;
do
for
batch_size
in
${
batch_size_list
[*]
}
;
do
for
batch_size
in
${
batch_size_list
[*]
}
;
do
_save_log_path
=
"
${
_log_path
}
/infer_cpu_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_batchsize_
${
batch_size
}
.log"
for
precision
in
${
precision_list
[*]
}
;
do
_save_log_path
=
"
${
_log_path
}
/python_infer_cpu_gpus_
${
_gpu
}
_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_precision_
${
precision
}
_batchsize_
${
batch_size
}
.log"
set_infer_data
=
$(
func_set_params
"
${
image_dir_key
}
"
"
${
_img_dir
}
"
)
set_infer_data
=
$(
func_set_params
"
${
image_dir_key
}
"
"
${
_img_dir
}
"
)
set_benchmark
=
$(
func_set_params
"
${
benchmark_key
}
"
"
${
benchmark_value
}
"
)
set_benchmark
=
$(
func_set_params
"
${
benchmark_key
}
"
"
${
benchmark_value
}
"
)
set_batchsize
=
$(
func_set_params
"
${
batch_size_key
}
"
"
${
batch_size
}
"
)
set_batchsize
=
$(
func_set_params
"
${
batch_size_key
}
"
"
${
batch_size
}
"
)
...
@@ -124,7 +126,8 @@ function func_inference() {
...
@@ -124,7 +126,8 @@ function func_inference() {
eval
$command
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"../
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
command
}
"
"../
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
done
done
done
done
done
done
done
...
@@ -135,7 +138,7 @@ function func_inference() {
...
@@ -135,7 +138,7 @@ function func_inference() {
continue
continue
fi
fi
for
batch_size
in
${
batch_size_list
[*]
}
;
do
for
batch_size
in
${
batch_size_list
[*]
}
;
do
_save_log_path
=
"
${
_log_path
}
/
infer_gpu
_usetrt_
${
use_trt
}
_precision_
${
precision
}
_batchsize_
${
batch_size
}
.log"
_save_log_path
=
"
${
_log_path
}
/
python_infer_gpu_gpus_
${
_gpu
}
_usetrt_
${
use_trt
}
_precision_
${
precision
}
_batchsize_
${
batch_size
}
.log"
set_infer_data
=
$(
func_set_params
"
${
image_dir_key
}
"
"
${
_img_dir
}
"
)
set_infer_data
=
$(
func_set_params
"
${
image_dir_key
}
"
"
${
_img_dir
}
"
)
set_benchmark
=
$(
func_set_params
"
${
benchmark_key
}
"
"
${
benchmark_value
}
"
)
set_benchmark
=
$(
func_set_params
"
${
benchmark_key
}
"
"
${
benchmark_value
}
"
)
set_batchsize
=
$(
func_set_params
"
${
batch_size_key
}
"
"
${
batch_size
}
"
)
set_batchsize
=
$(
func_set_params
"
${
batch_size_key
}
"
"
${
batch_size
}
"
)
...
@@ -146,7 +149,7 @@ function func_inference() {
...
@@ -146,7 +149,7 @@ function func_inference() {
eval
$command
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"../
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
command
}
"
"../
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
done
done
done
done
done
done
...
@@ -161,17 +164,20 @@ if [[ ${MODE} = "whole_infer" ]]; then
...
@@ -161,17 +164,20 @@ if [[ ${MODE} = "whole_infer" ]]; then
# for kl_quant
# for kl_quant
if
[
${
kl_quant_cmd_value
}
!=
"null"
]
&&
[
${
kl_quant_cmd_value
}
!=
"False"
]
;
then
if
[
${
kl_quant_cmd_value
}
!=
"null"
]
&&
[
${
kl_quant_cmd_value
}
!=
"False"
]
;
then
echo
"kl_quant"
echo
"kl_quant"
command
=
"
${
python
}
${
kl_quant_cmd_value
}
"
# TODO
log_path
=
"
${
LOG_PATH
}
_.log"
command
=
"
${
python
}
${
kl_quant_cmd_value
}
>
${
log_path
}
2>&1"
echo
${
command
}
echo
${
command
}
eval
$command
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
log_path
}
"
cd
${
infer_model_dir_list
}
/quant_post_static_model
cd
${
infer_model_dir_list
}
/quant_post_static_model
ln
-s
__model__ inference.pdmodel
ln
-s
__model__ inference.pdmodel
ln
-s
__params__ inference.pdiparams
ln
-s
__params__ inference.pdiparams
cd
../../deploy
cd
../../deploy
is_quant
=
True
is_quant
=
True
func_inference
"
${
python
}
"
"
${
inference_py
}
"
"../
${
infer_model_dir_list
}
/quant_post_static_model"
"../
${
LOG_PATH
}
"
"
${
infer_img_dir
}
"
${
is_quant
}
gpu
=
0
func_inference
"
${
python
}
"
"
${
inference_py
}
"
"../
${
infer_model_dir_list
}
/quant_post_static_model"
"../
${
LOG_PATH
}
"
"
${
infer_img_dir
}
"
"
${
is_quant
}
"
"
${
gpu
}
"
cd
..
cd
..
fi
fi
else
else
...
@@ -240,7 +246,7 @@ else
...
@@ -240,7 +246,7 @@ else
if
[
${#
ips
}
-le
15
]
;
then
if
[
${#
ips
}
-le
15
]
;
then
# if length of ips >= 15, then it is seen as multi-machine
# if length of ips >= 15, then it is seen as multi-machine
# 15 is the min length of ips info for multi-machine: 0.0.0.0,0.0.0.0
# 15 is the min length of ips info for multi-machine: 0.0.0.0,0.0.0.0
save_log
=
"
${
LOG_PATH
}
/
${
trainer
}
_gpus_
${
gpu
}
_autocast_
${
autocast
}
"
save_log
=
"
${
LOG_PATH
}
/
${
trainer
}
_gpus_
${
gpu
}
_autocast_
${
autocast
}
_nodes_1
"
nodes
=
1
nodes
=
1
else
else
IFS
=
","
IFS
=
","
...
@@ -268,7 +274,8 @@ else
...
@@ -268,7 +274,8 @@ else
# export FLAGS_cudnn_deterministic=True
# export FLAGS_cudnn_deterministic=True
sleep
5
sleep
5
eval
$cmd
eval
$cmd
status_check
$?
"
${
cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
eval
"cat
${
save_log
}
/train.log >>
${
save_log
}
.log"
status_check
$?
"
${
cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
save_log
}
.log""
sleep 5
sleep 5
if [[
$FILENAME
== *GeneralRecognition* ]]; then
if [[
$FILENAME
== *GeneralRecognition* ]]; then
...
@@ -283,9 +290,10 @@ else
...
@@ -283,9 +290,10 @@ else
# run eval
# run eval
if [
${
eval_py
}
!= "
null
" ]; then
if [
${
eval_py
}
!= "
null
" ]; then
set_eval_params1=
$(
func_set_params
"
${
eval_key1
}
"
"
${
eval_value1
}
"
)
set_eval_params1=
$(
func_set_params
"
${
eval_key1
}
"
"
${
eval_value1
}
"
)
eval_cmd
=
"
${
python
}
${
eval_py
}
${
set_eval_pretrain
}
${
set_use_gpu
}
${
set_eval_params1
}
"
eval_log_path="
${
LOG_PATH
}
/
${
trainer
}
_gpus_
${
gpu
}
_autocast_
${
autocast
}
_nodes_
${
nodes
}
_eval.log
"
eval_cmd="
${
python
}
${
eval_py
}
${
set_eval_pretrain
}
${
set_use_gpu
}
${
set_eval_params1
}
>
${
eval_log_path
}
2>&1
"
eval
$eval_cmd
eval
$eval_cmd
status_check
$?
"
${
eval_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$?
"
${
eval_cmd
}
" "
${
status_log
}
" "
${
model_name
}
"
"
${
eval_log_path
}
"
sleep 5
sleep 5
fi
fi
# run export model
# run export model
...
@@ -298,15 +306,16 @@ else
...
@@ -298,15 +306,16 @@ else
set_export_weight=
$(
func_set_params
"
${
export_weight
}
"
"
${
save_log
}
/
${
model_name
}
/
${
train_model_name
}
"
)
set_export_weight=
$(
func_set_params
"
${
export_weight
}
"
"
${
save_log
}
/
${
model_name
}
/
${
train_model_name
}
"
)
fi
fi
set_save_infer_key=
$(
func_set_params
"
${
save_infer_key
}
"
"
${
save_infer_path
}
"
)
set_save_infer_key=
$(
func_set_params
"
${
save_infer_key
}
"
"
${
save_infer_path
}
"
)
export_cmd
=
"
${
python
}
${
run_export
}
${
set_export_weight
}
${
set_save_infer_key
}
"
export_log_path="
${
LOG_PATH
}
/
${
trainer
}
_gpus_
${
gpu
}
_autocast_
${
autocast
}
_nodes_
${
nodes
}
_export.log
"
export_cmd="
${
python
}
${
run_export
}
${
set_export_weight
}
${
set_save_infer_key
}
>
${
export_log_path
}
2>&1
"
eval
$export_cmd
eval
$export_cmd
status_check
$?
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$?
"
${
export_cmd
}
" "
${
status_log
}
" "
${
model_name
}
"
"
${
export_log_path
}
"
#run inference
#
run inference
eval
$env
eval
$env
save_infer_path="
${
save_log
}
"
save_infer_path="
${
save_log
}
"
cd deploy
cd deploy
func_inference
"
${
python
}
"
"
${
inference_py
}
"
"../
${
save_infer_path
}
"
"../
${
LOG_PATH
}
"
"
${
infer_img_dir
}
"
"
${
flag_quant
}
"
func_inference "
${
python
}
" "
${
inference_py
}
" "
../
${
save_infer_path
}
" "
../
${
LOG_PATH
}
" "
${
infer_img_dir
}
" "
${
flag_quant
}
"
"
${
gpu
}
"
cd ..
cd ..
fi
fi
eval "
unset
CUDA_VISIBLE_DEVICES
"
eval "
unset
CUDA_VISIBLE_DEVICES
"
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录