Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
s920243400
PaddleDetection
提交
25e858cb
P
PaddleDetection
项目概览
s920243400
/
PaddleDetection
与 Fork 源项目一致
Fork自
PaddlePaddle / PaddleDetection
通知
2
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleDetection
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
25e858cb
编写于
8月 17, 2022
作者:
Z
zhengya01
提交者:
GitHub
8月 17, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add log_path in result.log (#6668)
上级
28199de7
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
30 addition
and
27 deletion
+30
-27
test_tipc/test_inference_cpp.sh
test_tipc/test_inference_cpp.sh
+3
-3
test_tipc/test_paddle2onnx.sh
test_tipc/test_paddle2onnx.sh
+3
-3
test_tipc/test_ptq_inference_python.sh
test_tipc/test_ptq_inference_python.sh
+3
-3
test_tipc/test_serving_infer_cpp.sh
test_tipc/test_serving_infer_cpp.sh
+3
-3
test_tipc/test_serving_infer_python.sh
test_tipc/test_serving_infer_python.sh
+3
-3
test_tipc/test_train_inference_python.sh
test_tipc/test_train_inference_python.sh
+12
-10
test_tipc/utils_func.sh
test_tipc/utils_func.sh
+3
-2
未找到文件。
test_tipc/test_inference_cpp.sh
浏览文件 @
25e858cb
...
@@ -86,7 +86,7 @@ function func_cpp_inference(){
...
@@ -86,7 +86,7 @@ function func_cpp_inference(){
eval
$command
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
done
done
done
done
done
done
...
@@ -112,7 +112,7 @@ function func_cpp_inference(){
...
@@ -112,7 +112,7 @@ function func_cpp_inference(){
eval
$command
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
done
done
done
done
else
else
...
@@ -209,7 +209,7 @@ for infer_mode in ${cpp_infer_mode_list[*]}; do
...
@@ -209,7 +209,7 @@ for infer_mode in ${cpp_infer_mode_list[*]}; do
eval
"
${
export_cmd
}
>
${
export_log_path
}
2>&1"
eval
"
${
export_cmd
}
>
${
export_log_path
}
2>&1"
status_export
=
$?
status_export
=
$?
cat
${
export_log_path
}
cat
${
export_log_path
}
status_check
$status_export
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$status_export
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
export_log_path
}
"
fi
fi
#run inference
#run inference
...
...
test_tipc/test_paddle2onnx.sh
浏览文件 @
25e858cb
...
@@ -81,7 +81,7 @@ function func_paddle2onnx_inference(){
...
@@ -81,7 +81,7 @@ function func_paddle2onnx_inference(){
eval
"
${
trans_model_cmd
}
>
${
trans_log_path
}
2>&1"
eval
"
${
trans_model_cmd
}
>
${
trans_log_path
}
2>&1"
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
cat
${
trans_log_path
}
cat
${
trans_log_path
}
status_check
$last_status
"
${
trans_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
trans_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
trans_log_path
}
"
# python inference
# python inference
echo
"################### run onnx infer ###################"
echo
"################### run onnx infer ###################"
...
@@ -94,7 +94,7 @@ function func_paddle2onnx_inference(){
...
@@ -94,7 +94,7 @@ function func_paddle2onnx_inference(){
eval
"
${
infer_model_cmd
}
>
${
_save_log_path
}
2>&1"
eval
"
${
infer_model_cmd
}
>
${
_save_log_path
}
2>&1"
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
cat
${
_save_log_path
}
cat
${
_save_log_path
}
status_check
$last_status
"
${
infer_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
infer_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
}
}
export
Count
=
0
export
Count
=
0
...
@@ -120,7 +120,7 @@ for infer_mode in ${infer_mode_list[*]}; do
...
@@ -120,7 +120,7 @@ for infer_mode in ${infer_mode_list[*]}; do
eval
"
${
export_cmd
}
>
${
export_log_path
}
2>&1"
eval
"
${
export_cmd
}
>
${
export_log_path
}
2>&1"
status_export
=
$?
status_export
=
$?
cat
${
export_log_path
}
cat
${
export_log_path
}
status_check
$status_export
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$status_export
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
export_log_path
}
"
fi
fi
#run inference
#run inference
...
...
test_tipc/test_ptq_inference_python.sh
浏览文件 @
25e858cb
...
@@ -72,7 +72,7 @@ function func_ptq_inference(){
...
@@ -72,7 +72,7 @@ function func_ptq_inference(){
eval
$command
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
done
done
done
done
done
done
...
@@ -87,7 +87,7 @@ function func_ptq_inference(){
...
@@ -87,7 +87,7 @@ function func_ptq_inference(){
eval
$command
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
done
done
fi
fi
done
done
...
@@ -108,7 +108,7 @@ echo $ptq_cmd
...
@@ -108,7 +108,7 @@ echo $ptq_cmd
eval
"
${
ptq_cmd
}
>
${
export_log_path
}
2>&1"
eval
"
${
ptq_cmd
}
>
${
export_log_path
}
2>&1"
status_export
=
$?
status_export
=
$?
cat
${
export_log_path
}
cat
${
export_log_path
}
status_check
$status_export
"
${
ptq_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$status_export
"
${
ptq_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
export_log_path
}
"
#run inference
#run inference
set_export_model_dir
=
$(
func_set_params
"
${
model_dir_key
}
"
"
${
save_export_value
}
/
${
model_name
}
"
)
set_export_model_dir
=
$(
func_set_params
"
${
model_dir_key
}
"
"
${
save_export_value
}
/
${
model_name
}
"
)
...
...
test_tipc/test_serving_infer_cpp.sh
浏览文件 @
25e858cb
...
@@ -80,14 +80,14 @@ function func_serving_inference(){
...
@@ -80,14 +80,14 @@ function func_serving_inference(){
eval
$web_service_cmd
eval
$web_service_cmd
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
cat
${
server_log_path
}
cat
${
server_log_path
}
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
server_log_path
}
"
sleep
5s
sleep
5s
# run http client
# run http client
http_client_cmd
=
"
${
_python
}
${
http_client_py
}
${
_set_client_model_dir
}
${
_set_image_file
}
${
set_http_client_params1
}
>
${
client_log_path
}
2>&1"
http_client_cmd
=
"
${
_python
}
${
http_client_py
}
${
_set_client_model_dir
}
${
_set_image_file
}
${
set_http_client_params1
}
>
${
client_log_path
}
2>&1"
eval
$http_client_cmd
eval
$http_client_cmd
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
cat
${
client_log_path
}
cat
${
client_log_path
}
status_check
$last_status
"
${
http_client_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
http_client_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
client_log_path
}
"
ps ux |
grep
-i
${
port_value
}
|
awk
'{print $2}'
| xargs
kill
-s
9
ps ux |
grep
-i
${
port_value
}
|
awk
'{print $2}'
| xargs
kill
-s
9
sleep
2s
sleep
2s
done
done
...
@@ -117,7 +117,7 @@ for infer_mode in ${infer_mode_list[*]}; do
...
@@ -117,7 +117,7 @@ for infer_mode in ${infer_mode_list[*]}; do
eval
"
${
export_cmd
}
>
${
export_log_path
}
2>&1"
eval
"
${
export_cmd
}
>
${
export_log_path
}
2>&1"
status_export
=
$?
status_export
=
$?
cat
${
export_log_path
}
cat
${
export_log_path
}
status_check
$status_export
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$status_export
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
export_log_path
}
"
fi
fi
#run inference
#run inference
...
...
test_tipc/test_serving_infer_python.sh
浏览文件 @
25e858cb
...
@@ -71,14 +71,14 @@ function func_serving_inference(){
...
@@ -71,14 +71,14 @@ function func_serving_inference(){
eval
$web_service_cmd
eval
$web_service_cmd
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
cat
${
server_log_path
}
cat
${
server_log_path
}
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
web_service_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
server_log_path
}
"
sleep
5s
sleep
5s
# run http client
# run http client
http_client_cmd
=
"
${
_python
}
${
_client_script
}
${
_set_image_file
}
${
set_http_client_params1
}
>
${
client_log_path
}
2>&1"
http_client_cmd
=
"
${
_python
}
${
_client_script
}
${
_set_image_file
}
${
set_http_client_params1
}
>
${
client_log_path
}
2>&1"
eval
$http_client_cmd
eval
$http_client_cmd
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
cat
${
client_log_path
}
cat
${
client_log_path
}
status_check
$last_status
"
${
http_client_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
http_client_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
client_log_path
}
"
ps ux |
grep
-E
'web_service'
|
awk
'{print $2}'
| xargs
kill
-s
9
ps ux |
grep
-E
'web_service'
|
awk
'{print $2}'
| xargs
kill
-s
9
sleep
2s
sleep
2s
done
done
...
@@ -117,7 +117,7 @@ for infer_mode in ${infer_mode_list[*]}; do
...
@@ -117,7 +117,7 @@ for infer_mode in ${infer_mode_list[*]}; do
eval
"
${
export_cmd
}
>
${
export_log_path
}
2>&1"
eval
"
${
export_cmd
}
>
${
export_log_path
}
2>&1"
status_export
=
$?
status_export
=
$?
cat
${
export_log_path
}
cat
${
export_log_path
}
status_check
$status_export
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$status_export
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
export_log_path
}
"
fi
fi
#run inference
#run inference
...
...
test_tipc/test_train_inference_python.sh
浏览文件 @
25e858cb
...
@@ -105,6 +105,7 @@ function func_inference(){
...
@@ -105,6 +105,7 @@ function func_inference(){
_log_path
=
$4
_log_path
=
$4
_img_dir
=
$5
_img_dir
=
$5
_flag_quant
=
$6
_flag_quant
=
$6
_gpu
=
$7
# inference
# inference
for
use_gpu
in
${
use_gpu_list
[*]
}
;
do
for
use_gpu
in
${
use_gpu_list
[*]
}
;
do
if
[
${
use_gpu
}
=
"False"
]
||
[
${
use_gpu
}
=
"cpu"
]
;
then
if
[
${
use_gpu
}
=
"False"
]
||
[
${
use_gpu
}
=
"cpu"
]
;
then
...
@@ -114,7 +115,7 @@ function func_inference(){
...
@@ -114,7 +115,7 @@ function func_inference(){
fi
fi
for
threads
in
${
cpu_threads_list
[*]
}
;
do
for
threads
in
${
cpu_threads_list
[*]
}
;
do
for
batch_size
in
${
batch_size_list
[*]
}
;
do
for
batch_size
in
${
batch_size_list
[*]
}
;
do
_save_log_path
=
"
${
_log_path
}
/python_infer_cpu_usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_mode_paddle_batchsize_
${
batch_size
}
.log"
_save_log_path
=
"
${
_log_path
}
/python_infer_cpu_
gpus_
${
gpu
}
_
usemkldnn_
${
use_mkldnn
}
_threads_
${
threads
}
_mode_paddle_batchsize_
${
batch_size
}
.log"
set_infer_data
=
$(
func_set_params
"
${
image_dir_key
}
"
"
${
_img_dir
}
"
)
set_infer_data
=
$(
func_set_params
"
${
image_dir_key
}
"
"
${
_img_dir
}
"
)
set_benchmark
=
$(
func_set_params
"
${
benchmark_key
}
"
"
${
benchmark_value
}
"
)
set_benchmark
=
$(
func_set_params
"
${
benchmark_key
}
"
"
${
benchmark_value
}
"
)
set_batchsize
=
$(
func_set_params
"
${
batch_size_key
}
"
"
${
batch_size
}
"
)
set_batchsize
=
$(
func_set_params
"
${
batch_size_key
}
"
"
${
batch_size
}
"
)
...
@@ -125,7 +126,7 @@ function func_inference(){
...
@@ -125,7 +126,7 @@ function func_inference(){
eval
$command
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
done
done
done
done
done
done
...
@@ -140,7 +141,7 @@ function func_inference(){
...
@@ -140,7 +141,7 @@ function func_inference(){
fi
fi
fi
fi
for
batch_size
in
${
batch_size_list
[*]
}
;
do
for
batch_size
in
${
batch_size_list
[*]
}
;
do
_save_log_path
=
"
${
_log_path
}
/python_infer_gpu_mode_
${
precision
}
_batchsize_
${
batch_size
}
.log"
_save_log_path
=
"
${
_log_path
}
/python_infer_gpu_
gpus_
${
gpu
}
_
mode_
${
precision
}
_batchsize_
${
batch_size
}
.log"
set_infer_data
=
$(
func_set_params
"
${
image_dir_key
}
"
"
${
_img_dir
}
"
)
set_infer_data
=
$(
func_set_params
"
${
image_dir_key
}
"
"
${
_img_dir
}
"
)
set_benchmark
=
$(
func_set_params
"
${
benchmark_key
}
"
"
${
benchmark_value
}
"
)
set_benchmark
=
$(
func_set_params
"
${
benchmark_key
}
"
"
${
benchmark_value
}
"
)
set_batchsize
=
$(
func_set_params
"
${
batch_size_key
}
"
"
${
batch_size
}
"
)
set_batchsize
=
$(
func_set_params
"
${
batch_size_key
}
"
"
${
batch_size
}
"
)
...
@@ -151,7 +152,7 @@ function func_inference(){
...
@@ -151,7 +152,7 @@ function func_inference(){
eval
$command
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
_save_log_path
}
"
done
done
done
done
else
else
...
@@ -171,6 +172,7 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then
...
@@ -171,6 +172,7 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then
eval
$env
eval
$env
Count
=
0
Count
=
0
gpu
=
0
IFS
=
"|"
IFS
=
"|"
infer_quant_flag
=(
${
infer_is_quant_list
}
)
infer_quant_flag
=(
${
infer_is_quant_list
}
)
for
infer_mode
in
${
infer_mode_list
[*]
}
;
do
for
infer_mode
in
${
infer_mode_list
[*]
}
;
do
...
@@ -198,12 +200,12 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then
...
@@ -198,12 +200,12 @@ if [ ${MODE} = "whole_infer" ] || [ ${MODE} = "klquant_whole_infer" ]; then
export_cmd
=
"
${
python
}
${
run_export
}
${
set_export_weight
}
${
set_filename
}
${
set_save_export_dir
}
"
export_cmd
=
"
${
python
}
${
run_export
}
${
set_export_weight
}
${
set_filename
}
${
set_save_export_dir
}
"
echo
$export_cmd
echo
$export_cmd
eval
$export_cmd
eval
$export_cmd
status_check
$?
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$?
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
#run inference
#run inference
save_export_model_dir
=
"
${
save_export_value
}
/
${
model_name
}
"
save_export_model_dir
=
"
${
save_export_value
}
/
${
model_name
}
"
is_quant
=
${
infer_quant_flag
[Count]
}
is_quant
=
${
infer_quant_flag
[Count]
}
func_inference
"
${
python
}
"
"
${
inference_py
}
"
"
${
save_export_model_dir
}
"
"
${
LOG_PATH
}
"
"
${
infer_img_dir
}
"
${
is_quant
}
func_inference
"
${
python
}
"
"
${
inference_py
}
"
"
${
save_export_model_dir
}
"
"
${
LOG_PATH
}
"
"
${
infer_img_dir
}
"
${
is_quant
}
"{gpu}"
Count
=
$((${
Count
}
+
1
))
Count
=
$((${
Count
}
+
1
))
done
done
else
else
...
@@ -304,7 +306,7 @@ else
...
@@ -304,7 +306,7 @@ else
eval
"
${
cmd
}
>
${
train_log_path
}
2>&1"
eval
"
${
cmd
}
>
${
train_log_path
}
2>&1"
last_status
=
$?
last_status
=
$?
cat
${
train_log_path
}
cat
${
train_log_path
}
status_check
$last_status
"
${
cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
train_log_path
}
"
set_eval_trained_weight
=
$(
func_set_params
"
${
export_weight_key
}
"
"
${
save_log
}
/
${
model_name
}
/
${
train_model_name
}
"
)
set_eval_trained_weight
=
$(
func_set_params
"
${
export_weight_key
}
"
"
${
save_log
}
/
${
model_name
}
/
${
train_model_name
}
"
)
# run eval
# run eval
...
@@ -315,7 +317,7 @@ else
...
@@ -315,7 +317,7 @@ else
eval
"
${
eval_cmd
}
>
${
eval_log_path
}
2>&1"
eval
"
${
eval_cmd
}
>
${
eval_log_path
}
2>&1"
last_status
=
$?
last_status
=
$?
cat
${
eval_log_path
}
cat
${
eval_log_path
}
status_check
$last_status
"
${
eval_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
eval_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
eval_log_path
}
"
fi
fi
# run export model
# run export model
if
[
${
run_export
}
!=
"null"
]
;
then
if
[
${
run_export
}
!=
"null"
]
;
then
...
@@ -336,7 +338,7 @@ else
...
@@ -336,7 +338,7 @@ else
eval
"
${
export_cmd
}
>
${
export_log_path
}
2>&1"
eval
"
${
export_cmd
}
>
${
export_log_path
}
2>&1"
last_status
=
$?
last_status
=
$?
cat
${
export_log_path
}
cat
${
export_log_path
}
status_check
$last_status
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
export_log_path
}
"
#run inference
#run inference
if
[
${
export_onnx_key
}
!=
"export_onnx"
]
;
then
if
[
${
export_onnx_key
}
!=
"export_onnx"
]
;
then
...
@@ -344,7 +346,7 @@ else
...
@@ -344,7 +346,7 @@ else
eval
"cp
${
save_export_model_dir
}
/*
${
save_log
}
/"
eval
"cp
${
save_export_model_dir
}
/*
${
save_log
}
/"
fi
fi
eval
$env
eval
$env
func_inference
"
${
python
}
"
"
${
inference_py
}
"
"
${
save_export_model_dir
}
"
"
${
LOG_PATH
}
"
"
${
train_infer_img_dir
}
"
"
${
flag_quant
}
"
func_inference
"
${
python
}
"
"
${
inference_py
}
"
"
${
save_export_model_dir
}
"
"
${
LOG_PATH
}
"
"
${
train_infer_img_dir
}
"
"
${
flag_quant
}
"
"{gpu}"
eval
"unset CUDA_VISIBLE_DEVICES"
eval
"unset CUDA_VISIBLE_DEVICES"
fi
fi
...
...
test_tipc/utils_func.sh
浏览文件 @
25e858cb
...
@@ -51,9 +51,10 @@ function status_check(){
...
@@ -51,9 +51,10 @@ function status_check(){
run_command
=
$2
run_command
=
$2
run_log
=
$3
run_log
=
$3
model_name
=
$4
model_name
=
$4
log_path
=
$5
if
[
$last_status
-eq
0
]
;
then
if
[
$last_status
-eq
0
]
;
then
echo
-e
"
\0
33[33m Run successfully with command -
${
model_name
}
-
${
run_command
}
!
\0
33[0m"
|
tee
-a
${
run_log
}
echo
-e
"
\0
33[33m Run successfully with command -
${
model_name
}
-
${
run_command
}
-
${
log_path
}
\0
33[0m"
|
tee
-a
${
run_log
}
else
else
echo
-e
"
\0
33[33m Run failed with command -
${
model_name
}
-
${
run_command
}
!
\0
33[0m"
|
tee
-a
${
run_log
}
echo
-e
"
\0
33[33m Run failed with command -
${
model_name
}
-
${
run_command
}
-
${
log_path
}
\0
33[0m"
|
tee
-a
${
run_log
}
fi
fi
}
}
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录