Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
e0a4307a
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
大约 1 年 前同步成功
通知
115
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e0a4307a
编写于
6月 09, 2022
作者:
H
HydrogenSulfate
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update status_check function
上级
1dee07fa
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
24 addition
and
24 deletion
+24
-24
test_tipc/benchmark_train.sh
test_tipc/benchmark_train.sh
+2
-2
test_tipc/test_inference_cpp.sh
test_tipc/test_inference_cpp.sh
+4
-4
test_tipc/test_inference_jeston.sh
test_tipc/test_inference_jeston.sh
+1
-1
test_tipc/test_lite_arm_cpu_cpp.sh
test_tipc/test_lite_arm_cpu_cpp.sh
+1
-1
test_tipc/test_paddle2onnx.sh
test_tipc/test_paddle2onnx.sh
+2
-2
test_tipc/test_serving_infer.sh
test_tipc/test_serving_infer.sh
+8
-8
test_tipc/test_train_inference_python.sh
test_tipc/test_train_inference_python.sh
+6
-6
未找到文件。
test_tipc/benchmark_train.sh
浏览文件 @
e0a4307a
...
...
@@ -225,7 +225,7 @@ for batch_size in ${batch_size_list[*]}; do
echo
$cmd
eval
$cmd
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
else
IFS
=
";"
unset_env
=
`
unset
CUDA_VISIBLE_DEVICES
`
...
...
@@ -261,7 +261,7 @@ for batch_size in ${batch_size_list[*]}; do
echo
$cmd
eval
$cmd
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
fi
done
done
...
...
test_tipc/test_inference_cpp.sh
浏览文件 @
e0a4307a
...
...
@@ -63,7 +63,7 @@ function func_shitu_cpp_inference(){
command
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
done
done
done
...
...
@@ -87,7 +87,7 @@ function func_shitu_cpp_inference(){
command
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
done
done
done
...
...
@@ -125,7 +125,7 @@ function func_cls_cpp_inference(){
command1
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
eval
${
command1
}
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
command1
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
command1
}
"
"
${
status_log
}
"
"
${
model_name
}
"
done
done
done
...
...
@@ -148,7 +148,7 @@ function func_cls_cpp_inference(){
command
=
"
${
_script
}
>
${
_save_log_path
}
2>&1"
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
done
done
done
...
...
test_tipc/test_inference_jeston.sh
浏览文件 @
e0a4307a
...
...
@@ -71,7 +71,7 @@ if [ ${MODE} = "whole_infer" ]; then
echo
$export_cmd
eval
$export_cmd
status_export
=
$?
status_check
$status_export
"
${
export_cmd
}
"
"
${
status_log
}
"
status_check
$status_export
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
else
save_infer_dir
=
${
infer_model
}
fi
...
...
test_tipc/test_lite_arm_cpu_cpp.sh
浏览文件 @
e0a4307a
...
...
@@ -67,7 +67,7 @@ function func_test_tipc(){
eval
${
command1
}
command2
=
"adb shell 'export LD_LIBRARY_PATH=
${
lite_arm_work_path
}
;
${
real_inference_cmd
}
' >
${
_save_log_path
}
2>&1"
eval
${
command2
}
status_check
$?
"
${
command2
}
"
"
${
status_log
}
"
status_check
$?
"
${
command2
}
"
"
${
status_log
}
"
"
${
model_name
}
"
done
done
done
...
...
test_tipc/test_paddle2onnx.sh
浏览文件 @
e0a4307a
...
...
@@ -55,7 +55,7 @@ function func_paddle2onnx(){
trans_model_cmd
=
"
${
padlle2onnx_cmd
}
${
set_dirname
}
${
set_model_filename
}
${
set_params_filename
}
${
set_save_model
}
${
set_opset_version
}
${
set_enable_onnx_checker
}
"
eval
$trans_model_cmd
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
trans_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
trans_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
model_name
}
"
# python inference
set_model_dir
=
$(
func_set_params
"
${
inference_model_dir_key
}
"
"
${
inference_model_dir_value
}
"
)
...
...
@@ -64,7 +64,7 @@ function func_paddle2onnx(){
set_inference_config
=
$(
func_set_params
"
${
inference_config_key
}
"
"
${
inference_config_value
}
"
)
infer_model_cmd
=
"cd deploy &&
${
python
}
${
inference_py
}
-o
${
set_model_dir
}
-o
${
set_use_onnx
}
-o
${
set_hardware
}
${
set_inference_config
}
>
${
_save_log_path
}
2>&1 && cd ../"
eval
$infer_model_cmd
status_check
$last_status
"
${
infer_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
status_check
$last_status
"
${
infer_model_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
"
${
model_name
}
"
}
...
...
test_tipc/test_serving_infer.sh
浏览文件 @
e0a4307a
...
...
@@ -88,7 +88,7 @@ function func_serving_cls(){
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd
=
"
${
python
}
ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval
$pipeline_cmd
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5s
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
else
...
...
@@ -98,7 +98,7 @@ function func_serving_cls(){
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd
=
"
${
python
}
ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval
$pipeline_cmd
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5s
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
fi
...
...
@@ -124,7 +124,7 @@ function func_serving_cls(){
eval
$pipeline_cmd
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5s
done
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
...
...
@@ -156,7 +156,7 @@ function func_serving_cls(){
eval
$pipeline_cmd
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5s
done
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
...
...
@@ -250,7 +250,7 @@ function func_serving_rec(){
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd
=
"
${
python
}
ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval
$pipeline_cmd
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5s
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
else
...
...
@@ -260,7 +260,7 @@ function func_serving_rec(){
_save_log_path
=
"
${
LOG_PATH
}
/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd
=
"
${
python
}
ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval
$pipeline_cmd
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5s
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
fi
...
...
@@ -286,7 +286,7 @@ function func_serving_rec(){
eval
$pipeline_cmd
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5s
done
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
...
...
@@ -318,7 +318,7 @@ function func_serving_rec(){
eval
$pipeline_cmd
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
pipeline_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
10s
done
ps ux |
grep
-E
'web_service|pipeline'
|
awk
'{print $2}'
| xargs
kill
-s
9
...
...
test_tipc/test_train_inference_python.sh
浏览文件 @
e0a4307a
...
...
@@ -126,7 +126,7 @@ function func_inference(){
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"../
${
status_log
}
"
status_check
$last_status
"
${
command
}
"
"../
${
status_log
}
"
"
${
model_name
}
"
done
done
done
...
...
@@ -151,7 +151,7 @@ function func_inference(){
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
eval
"cat
${
_save_log_path
}
"
status_check
$last_status
"
${
command
}
"
"../
${
status_log
}
"
status_check
$last_status
"
${
command
}
"
"../
${
status_log
}
"
"
${
model_name
}
"
done
done
done
...
...
@@ -198,7 +198,7 @@ elif [[ ${MODE} = "klquant_whole_infer" ]]; then
command
=
"
${
python
}
${
kl_quant_cmd_value
}
"
eval
$command
last_status
=
${
PIPESTATUS
[0]
}
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
status_check
$last_status
"
${
command
}
"
"
${
status_log
}
"
"
${
model_name
}
"
cd
inference/quant_post_static_model
ln
-s
__model__ inference.pdmodel
ln
-s
__params__ inference.pdiparams
...
...
@@ -301,7 +301,7 @@ else
# export FLAGS_cudnn_deterministic=True
sleep
5
eval
$cmd
status_check
$?
"
${
cmd
}
"
"
${
status_log
}
"
status_check
$?
"
${
cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5
if
[[
$FILENAME
==
*
GeneralRecognition
*
]]
;
then
...
...
@@ -318,7 +318,7 @@ else
set_eval_params1
=
$(
func_set_params
"
${
eval_key1
}
"
"
${
eval_value1
}
"
)
eval_cmd
=
"
${
python
}
${
eval_py
}
${
set_eval_pretrain
}
${
set_use_gpu
}
${
set_eval_params1
}
"
eval
$eval_cmd
status_check
$?
"
${
eval_cmd
}
"
"
${
status_log
}
"
status_check
$?
"
${
eval_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
sleep
5
fi
# run export model
...
...
@@ -333,7 +333,7 @@ else
set_save_infer_key
=
$(
func_set_params
"
${
save_infer_key
}
"
"
${
save_infer_path
}
"
)
export_cmd
=
"
${
python
}
${
run_export
}
${
set_export_weight
}
${
set_save_infer_key
}
"
eval
$export_cmd
status_check
$?
"
${
export_cmd
}
"
"
${
status_log
}
"
status_check
$?
"
${
export_cmd
}
"
"
${
status_log
}
"
"
${
model_name
}
"
#run inference
eval
$env
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录