未验证 提交 8a6acfbd 编写于 作者: W Wei Shengyu 提交者: GitHub

Merge pull request #2006 from HydrogenSulfate/fix_paddle2onnx_tipc

Fix paddle2onnx tipc
...@@ -225,7 +225,7 @@ for batch_size in ${batch_size_list[*]}; do ...@@ -225,7 +225,7 @@ for batch_size in ${batch_size_list[*]}; do
echo $cmd echo $cmd
eval $cmd eval $cmd
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
status_check $last_status "${cmd}" "${status_log}" status_check $last_status "${cmd}" "${status_log}" "${model_name}"
else else
IFS=";" IFS=";"
unset_env=`unset CUDA_VISIBLE_DEVICES` unset_env=`unset CUDA_VISIBLE_DEVICES`
...@@ -261,7 +261,7 @@ for batch_size in ${batch_size_list[*]}; do ...@@ -261,7 +261,7 @@ for batch_size in ${batch_size_list[*]}; do
echo $cmd echo $cmd
eval $cmd eval $cmd
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
status_check $last_status "${cmd}" "${status_log}" status_check $last_status "${cmd}" "${status_log}" "${model_name}"
fi fi
done done
done done
......
...@@ -38,6 +38,7 @@ function func_set_params(){ ...@@ -38,6 +38,7 @@ function func_set_params(){
function func_parser_params(){ function func_parser_params(){
strs=$1 strs=$1
MODE=$2
IFS=":" IFS=":"
array=(${strs}) array=(${strs})
key=${array[0]} key=${array[0]}
...@@ -64,10 +65,10 @@ function status_check(){ ...@@ -64,10 +65,10 @@ function status_check(){
last_status=$1 # the exit code last_status=$1 # the exit code
run_command=$2 run_command=$2
run_log=$3 run_log=$3
model_name=$4
if [ $last_status -eq 0 ]; then if [ $last_status -eq 0 ]; then
echo -e "\033[33m Run successfully with command - ${run_command}! \033[0m" | tee -a ${run_log} echo -e "\033[33m Run successfully with command - ${model_name} - ${run_command}! \033[0m" | tee -a ${run_log}
else else
echo -e "\033[33m Run failed with command - ${run_command}! \033[0m" | tee -a ${run_log} echo -e "\033[33m Run failed with command - ${model_name} - ${run_command}! \033[0m" | tee -a ${run_log}
fi fi
} }
...@@ -6,7 +6,7 @@ python:python3.7 ...@@ -6,7 +6,7 @@ python:python3.7
--model_filename:inference.pdmodel --model_filename:inference.pdmodel
--params_filename:inference.pdiparams --params_filename:inference.pdiparams
--save_file:./deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/inference.onnx --save_file:./deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/inference.onnx
--opset_version:10 --opset_version:11
--enable_onnx_checker:True --enable_onnx_checker:True
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar
inference:./python/predict_cls.py inference:./python/predict_cls.py
......
...@@ -8,6 +8,7 @@ python:python3.7 ...@@ -8,6 +8,7 @@ python:python3.7
--save_file:./deploy/models/PPHGNet_small_infer/inference.onnx --save_file:./deploy/models/PPHGNet_small_infer/inference.onnx
--opset_version:10 --opset_version:10
--enable_onnx_checker:True --enable_onnx_checker:True
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_small_infer.tar
inference:./python/predict_cls.py inference:./python/predict_cls.py
Global.use_onnx:True Global.use_onnx:True
Global.inference_model_dir:./models/PPHGNet_small_infer Global.inference_model_dir:./models/PPHGNet_small_infer
......
...@@ -8,6 +8,7 @@ python:python3.7 ...@@ -8,6 +8,7 @@ python:python3.7
--save_file:./deploy/models/PPHGNet_tiny_infer/inference.onnx --save_file:./deploy/models/PPHGNet_tiny_infer/inference.onnx
--opset_version:10 --opset_version:10
--enable_onnx_checker:True --enable_onnx_checker:True
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPHGNet_tiny_infer.tar
inference:./python/predict_cls.py inference:./python/predict_cls.py
Global.use_onnx:True Global.use_onnx:True
Global.inference_model_dir:./models/PPHGNet_tiny_infer Global.inference_model_dir:./models/PPHGNet_tiny_infer
......
...@@ -8,6 +8,7 @@ python:python3.7 ...@@ -8,6 +8,7 @@ python:python3.7
--save_file:./deploy/models/PPLCNet_x0_25_infer/inference.onnx --save_file:./deploy/models/PPLCNet_x0_25_infer/inference.onnx
--opset_version:10 --opset_version:10
--enable_onnx_checker:True --enable_onnx_checker:True
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_25_infer.tar
inference:./python/predict_cls.py inference:./python/predict_cls.py
Global.use_onnx:True Global.use_onnx:True
Global.inference_model_dir:./models/PPLCNet_x0_25_infer Global.inference_model_dir:./models/PPLCNet_x0_25_infer
......
===========================paddle2onnx_params=========================== ===========================paddle2onnx_params===========================
model_name:PPLCNet_x0_25 model_name:PPLCNet_x0_35
python:python3.7 python:python3.7
2onnx: paddle2onnx 2onnx: paddle2onnx
--model_dir:./deploy/models/PPLCNet_x0_25_infer/ --model_dir:./deploy/models/PPLCNet_x0_35_infer/
--model_filename:inference.pdmodel --model_filename:inference.pdmodel
--params_filename:inference.pdiparams --params_filename:inference.pdiparams
--save_file:./deploy/models/PPLCNet_x0_25_infer/inference.onnx --save_file:./deploy/models/PPLCNet_x0_35_infer/inference.onnx
--opset_version:10 --opset_version:10
--enable_onnx_checker:True --enable_onnx_checker:True
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_25_infer.tar inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_35_infer.tar
inference:./python/predict_cls.py inference:./python/predict_cls.py
Global.use_onnx:True Global.use_onnx:True
Global.inference_model_dir:./models/PPLCNet_x0_25_infer Global.inference_model_dir:./models/PPLCNet_x0_35_infer
Global.use_gpu:False Global.use_gpu:False
-c:configs/inference_cls.yaml -c:configs/inference_cls.yaml
\ No newline at end of file
===========================paddle2onnx_params=========================== ===========================paddle2onnx_params===========================
model_name:PP-ShiTu_mainbody_det model_name:PPLCNet_x0_5
python:python3.7 python:python3.7
2onnx: paddle2onnx 2onnx: paddle2onnx
--model_dir:./deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/ --model_dir:./deploy/models/PPLCNet_x0_5_infer/
--model_filename:inference.pdmodel --model_filename:inference.pdmodel
--params_filename:inference.pdiparams --params_filename:inference.pdiparams
--save_file:./deploy/models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/inference.onnx --save_file:./deploy/models/PPLCNet_x0_5_infer/inference.onnx
--opset_version:10 --opset_version:10
--enable_onnx_checker:True --enable_onnx_checker:True
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/PPLCNet_x0_5_infer.tar
inference:./python/predict_cls.py inference:./python/predict_cls.py
Global.use_onnx:True Global.use_onnx:True
Global.inference_model_dir:./models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer Global.inference_model_dir:./models/PPLCNet_x0_5_infer
Global.use_gpu:False Global.use_gpu:False
-c:configs/inference_cls.yaml -c:configs/inference_cls.yaml
\ No newline at end of file
...@@ -9,8 +9,8 @@ python:python3.7 ...@@ -9,8 +9,8 @@ python:python3.7
--opset_version:10 --opset_version:10
--enable_onnx_checker:True --enable_onnx_checker:True
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar
inference: python/predict_cls.py -c configs/inference_cls.yaml inference:./python/predict_cls.py
Global.use_onnx:True Global.use_onnx:True
Global.inference_model_dir:models/ResNet50_vd_infer/ Global.inference_model_dir:./models/ResNet50_vd_infer/
Global.use_gpu:False Global.use_gpu:False
-c:configs/inference_cls.yaml -c:configs/inference_cls.yaml
...@@ -200,7 +200,7 @@ fi ...@@ -200,7 +200,7 @@ fi
if [[ ${MODE} = "serving_infer" ]]; then if [[ ${MODE} = "serving_infer" ]]; then
# prepare serving env # prepare serving env
python_name=$(func_parser_value "${lines[2]}") python_name=$(func_parser_value "${lines[2]}")
${python_name} -m pip install install paddle-serving-server-gpu==0.7.0.post102 ${python_name} -m pip install paddle-serving-server-gpu==0.7.0.post102
${python_name} -m pip install paddle_serving_client==0.7.0 ${python_name} -m pip install paddle_serving_client==0.7.0
${python_name} -m pip install paddle-serving-app==0.7.0 ${python_name} -m pip install paddle-serving-app==0.7.0
if [[ ${model_name} =~ "ShiTu" ]]; then if [[ ${model_name} =~ "ShiTu" ]]; then
...@@ -231,7 +231,7 @@ if [[ ${MODE} = "paddle2onnx_infer" ]]; then ...@@ -231,7 +231,7 @@ if [[ ${MODE} = "paddle2onnx_infer" ]]; then
inference_model_url=$(func_parser_value "${lines[10]}") inference_model_url=$(func_parser_value "${lines[10]}")
tar_name=${inference_model_url##*/} tar_name=${inference_model_url##*/}
${python_name} -m pip install install paddle2onnx ${python_name} -m pip install paddle2onnx
${python_name} -m pip install onnxruntime ${python_name} -m pip install onnxruntime
cd deploy cd deploy
mkdir models mkdir models
......
...@@ -63,7 +63,7 @@ function func_shitu_cpp_inference(){ ...@@ -63,7 +63,7 @@ function func_shitu_cpp_inference(){
command="${_script} > ${_save_log_path} 2>&1" command="${_script} > ${_save_log_path} 2>&1"
eval $command eval $command
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
status_check $last_status "${command}" "${status_log}" status_check $last_status "${command}" "${status_log}" "${model_name}"
done done
done done
done done
...@@ -87,7 +87,7 @@ function func_shitu_cpp_inference(){ ...@@ -87,7 +87,7 @@ function func_shitu_cpp_inference(){
command="${_script} > ${_save_log_path} 2>&1" command="${_script} > ${_save_log_path} 2>&1"
eval $command eval $command
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
status_check $last_status "${command}" "${status_log}" status_check $last_status "${command}" "${status_log}" "${model_name}"
done done
done done
done done
...@@ -125,7 +125,7 @@ function func_cls_cpp_inference(){ ...@@ -125,7 +125,7 @@ function func_cls_cpp_inference(){
command1="${_script} > ${_save_log_path} 2>&1" command1="${_script} > ${_save_log_path} 2>&1"
eval ${command1} eval ${command1}
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
status_check $last_status "${command1}" "${status_log}" status_check $last_status "${command1}" "${status_log}" "${model_name}"
done done
done done
done done
...@@ -148,7 +148,7 @@ function func_cls_cpp_inference(){ ...@@ -148,7 +148,7 @@ function func_cls_cpp_inference(){
command="${_script} > ${_save_log_path} 2>&1" command="${_script} > ${_save_log_path} 2>&1"
eval $command eval $command
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
status_check $last_status "${command}" "${status_log}" status_check $last_status "${command}" "${status_log}" "${model_name}"
done done
done done
done done
......
...@@ -71,7 +71,7 @@ if [ ${MODE} = "whole_infer" ]; then ...@@ -71,7 +71,7 @@ if [ ${MODE} = "whole_infer" ]; then
echo $export_cmd echo $export_cmd
eval $export_cmd eval $export_cmd
status_export=$? status_export=$?
status_check $status_export "${export_cmd}" "${status_log}" status_check $status_export "${export_cmd}" "${status_log}" "${model_name}"
else else
save_infer_dir=${infer_model} save_infer_dir=${infer_model}
fi fi
......
...@@ -67,7 +67,7 @@ function func_test_tipc(){ ...@@ -67,7 +67,7 @@ function func_test_tipc(){
eval ${command1} eval ${command1}
command2="adb shell 'export LD_LIBRARY_PATH=${lite_arm_work_path}; ${real_inference_cmd}' > ${_save_log_path} 2>&1" command2="adb shell 'export LD_LIBRARY_PATH=${lite_arm_work_path}; ${real_inference_cmd}' > ${_save_log_path} 2>&1"
eval ${command2} eval ${command2}
status_check $? "${command2}" "${status_log}" status_check $? "${command2}" "${status_log}" "${model_name}"
done done
done done
done done
......
...@@ -3,13 +3,6 @@ source test_tipc/common_func.sh ...@@ -3,13 +3,6 @@ source test_tipc/common_func.sh
FILENAME=$1 FILENAME=$1
dataline=$(cat ${FILENAME})
lines=(${dataline})
# common params
model_name=$(func_parser_value "${lines[1]}")
python=$(func_parser_value "${lines[2]}")
# parser params # parser params
dataline=$(awk 'NR==1, NR==16{print}' $FILENAME) dataline=$(awk 'NR==1, NR==16{print}' $FILENAME)
IFS=$'\n' IFS=$'\n'
...@@ -43,7 +36,7 @@ inference_config_key=$(func_parser_key "${lines[15]}") ...@@ -43,7 +36,7 @@ inference_config_key=$(func_parser_key "${lines[15]}")
inference_config_value=$(func_parser_value "${lines[15]}") inference_config_value=$(func_parser_value "${lines[15]}")
LOG_PATH="./test_tipc/output/${model_name}" LOG_PATH="./test_tipc/output/${model_name}"
mkdir -p ./test_tipc/output mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results_paddle2onnx.log" status_log="${LOG_PATH}/results_paddle2onnx.log"
...@@ -62,7 +55,8 @@ function func_paddle2onnx(){ ...@@ -62,7 +55,8 @@ function func_paddle2onnx(){
trans_model_cmd="${padlle2onnx_cmd} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_save_model} ${set_opset_version} ${set_enable_onnx_checker}" trans_model_cmd="${padlle2onnx_cmd} ${set_dirname} ${set_model_filename} ${set_params_filename} ${set_save_model} ${set_opset_version} ${set_enable_onnx_checker}"
eval $trans_model_cmd eval $trans_model_cmd
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
status_check $last_status "${trans_model_cmd}" "${status_log}" status_check $last_status "${trans_model_cmd}" "${status_log}" "${model_name}"
# python inference # python inference
set_model_dir=$(func_set_params "${inference_model_dir_key}" "${inference_model_dir_value}") set_model_dir=$(func_set_params "${inference_model_dir_key}" "${inference_model_dir_value}")
set_use_onnx=$(func_set_params "${use_onnx_key}" "${use_onnx_value}") set_use_onnx=$(func_set_params "${use_onnx_key}" "${use_onnx_value}")
...@@ -70,7 +64,7 @@ function func_paddle2onnx(){ ...@@ -70,7 +64,7 @@ function func_paddle2onnx(){
set_inference_config=$(func_set_params "${inference_config_key}" "${inference_config_value}") set_inference_config=$(func_set_params "${inference_config_key}" "${inference_config_value}")
infer_model_cmd="cd deploy && ${python} ${inference_py} -o ${set_model_dir} -o ${set_use_onnx} -o ${set_hardware} ${set_inference_config} > ${_save_log_path} 2>&1 && cd ../" infer_model_cmd="cd deploy && ${python} ${inference_py} -o ${set_model_dir} -o ${set_use_onnx} -o ${set_hardware} ${set_inference_config} > ${_save_log_path} 2>&1 && cd ../"
eval $infer_model_cmd eval $infer_model_cmd
status_check $last_status "${infer_model_cmd}" "${status_log}" status_check $last_status "${infer_model_cmd}" "${status_log}" "${model_name}"
} }
......
...@@ -88,7 +88,7 @@ function func_serving_cls(){ ...@@ -88,7 +88,7 @@ function func_serving_cls(){
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log" _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/" pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval $pipeline_cmd eval $pipeline_cmd
status_check $last_status "${pipeline_cmd}" "${status_log}" status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
sleep 5s sleep 5s
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
else else
...@@ -98,7 +98,7 @@ function func_serving_cls(){ ...@@ -98,7 +98,7 @@ function func_serving_cls(){
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log" _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/" pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval $pipeline_cmd eval $pipeline_cmd
status_check $last_status "${pipeline_cmd}" "${status_log}" status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
sleep 5s sleep 5s
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
fi fi
...@@ -124,7 +124,7 @@ function func_serving_cls(){ ...@@ -124,7 +124,7 @@ function func_serving_cls(){
eval $pipeline_cmd eval $pipeline_cmd
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
status_check $last_status "${pipeline_cmd}" "${status_log}" status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
sleep 5s sleep 5s
done done
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
...@@ -156,7 +156,7 @@ function func_serving_cls(){ ...@@ -156,7 +156,7 @@ function func_serving_cls(){
eval $pipeline_cmd eval $pipeline_cmd
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
status_check $last_status "${pipeline_cmd}" "${status_log}" status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
sleep 5s sleep 5s
done done
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
...@@ -250,7 +250,7 @@ function func_serving_rec(){ ...@@ -250,7 +250,7 @@ function func_serving_rec(){
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log" _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/" pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval $pipeline_cmd eval $pipeline_cmd
status_check $last_status "${pipeline_cmd}" "${status_log}" status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
sleep 5s sleep 5s
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
else else
...@@ -260,7 +260,7 @@ function func_serving_rec(){ ...@@ -260,7 +260,7 @@ function func_serving_rec(){
_save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log" _save_log_path="${LOG_PATH}/server_infer_cpp_cpu_pipeline_usemkldnn_False_threads_4_batchsize_1.log"
pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/" pipeline_cmd="${python} ocr_cpp_client.py ppocr_det_mobile_2.0_client/ ppocr_rec_mobile_2.0_client/"
eval $pipeline_cmd eval $pipeline_cmd
status_check $last_status "${pipeline_cmd}" "${status_log}" status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
sleep 5s sleep 5s
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
fi fi
...@@ -286,7 +286,7 @@ function func_serving_rec(){ ...@@ -286,7 +286,7 @@ function func_serving_rec(){
eval $pipeline_cmd eval $pipeline_cmd
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
status_check $last_status "${pipeline_cmd}" "${status_log}" status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
sleep 5s sleep 5s
done done
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
...@@ -318,7 +318,7 @@ function func_serving_rec(){ ...@@ -318,7 +318,7 @@ function func_serving_rec(){
eval $pipeline_cmd eval $pipeline_cmd
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
status_check $last_status "${pipeline_cmd}" "${status_log}" status_check $last_status "${pipeline_cmd}" "${status_log}" "${model_name}"
sleep 10s sleep 10s
done done
ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9 ps ux | grep -E 'web_service|pipeline' | awk '{print $2}' | xargs kill -s 9
......
...@@ -126,7 +126,7 @@ function func_inference(){ ...@@ -126,7 +126,7 @@ function func_inference(){
eval $command eval $command
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
status_check $last_status "${command}" "../${status_log}" status_check $last_status "${command}" "../${status_log}" "${model_name}"
done done
done done
done done
...@@ -151,7 +151,7 @@ function func_inference(){ ...@@ -151,7 +151,7 @@ function func_inference(){
eval $command eval $command
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
eval "cat ${_save_log_path}" eval "cat ${_save_log_path}"
status_check $last_status "${command}" "../${status_log}" status_check $last_status "${command}" "../${status_log}" "${model_name}"
done done
done done
done done
...@@ -198,7 +198,7 @@ elif [[ ${MODE} = "klquant_whole_infer" ]]; then ...@@ -198,7 +198,7 @@ elif [[ ${MODE} = "klquant_whole_infer" ]]; then
command="${python} ${kl_quant_cmd_value}" command="${python} ${kl_quant_cmd_value}"
eval $command eval $command
last_status=${PIPESTATUS[0]} last_status=${PIPESTATUS[0]}
status_check $last_status "${command}" "${status_log}" status_check $last_status "${command}" "${status_log}" "${model_name}"
cd inference/quant_post_static_model cd inference/quant_post_static_model
ln -s __model__ inference.pdmodel ln -s __model__ inference.pdmodel
ln -s __params__ inference.pdiparams ln -s __params__ inference.pdiparams
...@@ -301,7 +301,7 @@ else ...@@ -301,7 +301,7 @@ else
# export FLAGS_cudnn_deterministic=True # export FLAGS_cudnn_deterministic=True
sleep 5 sleep 5
eval $cmd eval $cmd
status_check $? "${cmd}" "${status_log}" status_check $? "${cmd}" "${status_log}" "${model_name}"
sleep 5 sleep 5
if [[ $FILENAME == *GeneralRecognition* ]]; then if [[ $FILENAME == *GeneralRecognition* ]]; then
...@@ -318,7 +318,7 @@ else ...@@ -318,7 +318,7 @@ else
set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}") set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}")
eval_cmd="${python} ${eval_py} ${set_eval_pretrain} ${set_use_gpu} ${set_eval_params1}" eval_cmd="${python} ${eval_py} ${set_eval_pretrain} ${set_use_gpu} ${set_eval_params1}"
eval $eval_cmd eval $eval_cmd
status_check $? "${eval_cmd}" "${status_log}" status_check $? "${eval_cmd}" "${status_log}" "${model_name}"
sleep 5 sleep 5
fi fi
# run export model # run export model
...@@ -333,7 +333,7 @@ else ...@@ -333,7 +333,7 @@ else
set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_path}") set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_path}")
export_cmd="${python} ${run_export} ${set_export_weight} ${set_save_infer_key}" export_cmd="${python} ${run_export} ${set_export_weight} ${set_save_infer_key}"
eval $export_cmd eval $export_cmd
status_check $? "${export_cmd}" "${status_log}" status_check $? "${export_cmd}" "${status_log}" "${model_name}"
#run inference #run inference
eval $env eval $env
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册