diff --git a/deploy/lite/ocr_db_crnn.cc b/deploy/lite/ocr_db_crnn.cc index 011d4adbeb65732f12c263ddfec94afb84bf5969..1ffbbacb74545b0bbea4957e25b6235225bad02b 100644 --- a/deploy/lite/ocr_db_crnn.cc +++ b/deploy/lite/ocr_db_crnn.cc @@ -172,7 +172,10 @@ void RunRecModel(std::vector>> boxes, cv::Mat img, cv::Mat resize_img; int index = 0; + + std::vector time_info = {0, 0, 0}; for (int i = boxes.size() - 1; i >= 0; i--) { + auto preprocess_start = std::chrono::steady_clock::now(); crop_img = GetRotateCropImage(srcimg, boxes[i]); if (use_direction_classify >= 1) { crop_img = RunClsModel(crop_img, predictor_cls); @@ -191,7 +194,9 @@ void RunRecModel(std::vector>> boxes, cv::Mat img, auto *data0 = input_tensor0->mutable_data(); NeonMeanScale(dimg, data0, resize_img.rows * resize_img.cols, mean, scale); + auto preprocess_end = std::chrono::steady_clock::now(); //// Run CRNN predictor + auto inference_start = std::chrono::steady_clock::now(); predictor_crnn->Run(); // Get output and run postprocess @@ -199,8 +204,10 @@ void RunRecModel(std::vector>> boxes, cv::Mat img, std::move(predictor_crnn->GetOutput(0))); auto *predict_batch = output_tensor0->data(); auto predict_shape = output_tensor0->shape(); + auto inference_end = std::chrono::steady_clock::now(); // ctc decode + auto postprocess_start = std::chrono::steady_clock::now(); std::string str_res; int argmax_idx; int last_index = 0; @@ -224,7 +231,20 @@ void RunRecModel(std::vector>> boxes, cv::Mat img, score /= count; rec_text.push_back(str_res); rec_text_score.push_back(score); + auto postprocess_end = std::chrono::steady_clock::now(); + + std::chrono::duration preprocess_diff = preprocess_end - preprocess_start; + time_info[0] += double(preprocess_diff.count() * 1000); + std::chrono::duration inference_diff = inference_end - inference_start; + time_info[1] += double(inference_diff.count() * 1000); + std::chrono::duration postprocess_diff = postprocess_end - postprocess_start; + time_info[2] += double(postprocess_diff.count() * 1000); + } + +times->push_back(time_info[0]); +times->push_back(time_info[1]); +times->push_back(time_info[2]); } std::vector>> @@ -312,7 +332,6 @@ std::shared_ptr loadModel(std::string model_file, int num_threa config.set_model_from_file(model_file); config.set_threads(num_threads); - std::shared_ptr predictor = CreatePaddlePredictor(config); return predictor; @@ -434,6 +453,9 @@ void system(char **argv){ auto rec_predictor = loadModel(rec_model_file, std::stoi(num_threads)); auto cls_predictor = loadModel(cls_model_file, std::stoi(num_threads)); + std::vector det_time_info = {0, 0, 0}; + std::vector rec_time_info = {0, 0, 0}; + for (int i = 0; i < cv_all_img_names.size(); ++i) { std::cout << "The predict img: " << cv_all_img_names[i] << std::endl; cv::Mat srcimg = cv::imread(cv_all_img_names[i], cv::IMREAD_COLOR); @@ -459,8 +481,38 @@ void system(char **argv){ //// print recognized text for (int i = 0; i < rec_text.size(); i++) { std::cout << i << "\t" << rec_text[i] << "\t" << rec_text_score[i] - << std::endl; + << std::endl; + } + + det_time_info[0] += det_times[0]; + det_time_info[1] += det_times[1]; + det_time_info[2] += det_times[2]; + rec_time_info[0] += rec_times[0]; + rec_time_info[1] += rec_times[1]; + rec_time_info[2] += rec_times[2]; + } + if (strcmp(argv[12], "True") == 0) { + AutoLogger autolog_det(det_model_file, + runtime_device, + std::stoi(num_threads), + std::stoi(batchsize), + "dynamic", + precision, + det_time_info, + cv_all_img_names.size()); + AutoLogger autolog_rec(rec_model_file, + runtime_device, + std::stoi(num_threads), + std::stoi(batchsize), + "dynamic", + precision, + rec_time_info, + cv_all_img_names.size()); + + autolog_det.report(); + std::cout << std::endl; + autolog_rec.report(); } } @@ -503,15 +555,15 @@ void det(int argc, char **argv) { auto img_vis = Visualization(srcimg, boxes); std::cout << boxes.size() << " bboxes have detected:" << std::endl; - // for (int i=0; i ./output/lite_ch_PP-OCRv2_det_infer_opt.nb_runtime_device_ARM_CPU_precision_FP32_batchsize_1_threads_1.log 2>&1! +Run successfully with command xxx +... +``` + +运行失败时会输出: + +``` +Run failed with command - ./ocr_db_crnn det ch_PP-OCRv2_det_infer_opt.nb ARM_CPU FP32 1 1 ./test_data/icdar2015_lite/text_localization/ch4_test_images/ ./config.txt True > ./output/lite_ch_PP-OCRv2_det_infer_opt.nb_runtime_device_ARM_CPU_precision_FP32_batchsize_1_threads_1.log 2>&1! +Run failed with command xxx +... +``` + +在./output/文件夹下,会存在如下日志,每一个日志都是不同配置下的log结果: + + + +在每一个log中,都会调用autolog打印如下信息: + + + + + +## 3. 更多教程 + +本文档为功能测试用,更详细的Lite端预测使用教程请参考:[Lite端部署](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/deploy/lite/readme.md)。 diff --git a/test_tipc/docs/test_lite_arm_cpu_cpp.md b/test_tipc/docs/test_lite_arm_cpu_cpp.md deleted file mode 100644 index 6f58026a315dabb8810e56b6d694733c1c72019c..0000000000000000000000000000000000000000 --- a/test_tipc/docs/test_lite_arm_cpu_cpp.md +++ /dev/null @@ -1,71 +0,0 @@ -# Lite\_arm\_cpu\_cpp预测功能测试 - -Lite\_arm\_cpu\_cpp预测功能测试的主程序为`test_lite_arm_cpu_cpp.sh`,可以在ARM CPU上基于Lite预测库测试模型的C++推理功能。 - -## 1. 测试结论汇总 - -目前Lite端的样本间支持以方式的组合: - -**字段说明:** -- 模型类型:包括正常模型(FP32)和量化模型(INT8) -- batch-size:包括1和4 -- threads:包括1和4 -- predictor数量:包括多predictor预测和单predictor预测 -- 预测库来源:包括下载方式和编译方式 - -| 模型类型 | batch-size | threads | predictor数量 | 预测库来源 | -| :----: | :----: | :----: | :----: | :----: | -| 正常模型/量化模型 | 1 | 1/4 | 1 | 下载方式 | - - -## 2. 测试流程 -运行环境配置请参考[文档](./install.md)的内容配置TIPC的运行环境。 - -### 2.1 功能测试 - -先运行`prepare_lite.sh`,运行后会在当前路径下生成`test_lite.tar`,其中包含了测试数据、测试模型和用于预测的可执行文件。将`test_lite.tar`上传到被测试的手机上,在手机的终端解压该文件,进入`test_lite`目录中,然后运行`test_lite_arm_cpu_cpp.sh`进行测试,最终在`test_lite/output`目录下生成`lite_*.log`后缀的日志文件。 - -```shell - -# 数据和模型准备 -bash test_tipc/prepare_lite.sh ./test_tipc/configs/ppocr_det_mobile/model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt - -# 手机端测试: -bash test_lite_arm_cpu_cpp.sh model_linux_gpu_normal_normal_lite_cpp_arm_cpu.txt - -``` - -**注意**:由于运行该项目需要bash等命令,传统的adb方式不能很好的安装。所以此处推荐通在手机上开启虚拟终端的方式连接电脑,连接方式可以参考[安卓手机termux连接电脑](./termux_for_android.md)。 - -#### 运行结果 - -各测试的运行情况会打印在 `./output/` 中: -运行成功时会输出: - -``` -Run successfully with command - ./ocr_db_crnn det ch_PP-OCRv2_det_infer_opt.nb ARM_CPU FP32 1 1 ./test_data/icdar2015_lite/text_localization/ch4_test_images/ ./config.txt True > ./output/lite_ch_PP-OCRv2_det_infer_opt.nb_runtime_device_ARM_CPU_precision_FP32_batchsize_1_threads_1.log 2>&1! -Run successfully with command xxx -... -``` - -运行失败时会输出: - -``` -Run failed with command - ./ocr_db_crnn det ch_PP-OCRv2_det_infer_opt.nb ARM_CPU FP32 1 1 ./test_data/icdar2015_lite/text_localization/ch4_test_images/ ./config.txt True > ./output/lite_ch_PP-OCRv2_det_infer_opt.nb_runtime_device_ARM_CPU_precision_FP32_batchsize_1_threads_1.log 2>&1! -Run failed with command xxx -... -``` - -在./output/文件夹下,会存在如下日志,每一个日志都是不同配置下的log结果: - - - -在每一个log中,都会调用autolog打印如下信息: - - - - - -## 3. 更多教程 - -本文档为功能测试用,更详细的Lite端预测使用教程请参考:[Lite端部署](https://github.com/PaddlePaddle/PaddleOCR/blob/develop/deploy/lite/readme.md)。 diff --git a/test_tipc/prepare_lite.sh b/test_tipc/prepare_lite_cpp.sh similarity index 50% rename from test_tipc/prepare_lite.sh rename to test_tipc/prepare_lite_cpp.sh index 6a08d96298592c829547df9fa30ef4149ddc5b00..b129322ddde36e496c8bc1b08f7b78e94e27288f 100644 --- a/test_tipc/prepare_lite.sh +++ b/test_tipc/prepare_lite_cpp.sh @@ -6,22 +6,59 @@ dataline=$(cat ${FILENAME}) IFS=$'\n' lines=(${dataline}) IFS=$'\n' -lite_model_list=$(func_parser_value "${lines[2]}") + +inference_cmd=$(func_parser_value "${lines[1]}") +DEVICE=$(func_parser_value "${lines[2]}") +det_lite_model_list=$(func_parser_value "${lines[3]}") +rec_lite_model_list=$(func_parser_value "${lines[4]}") +cls_lite_model_list=$(func_parser_value "${lines[5]}") + +if [[ $inference_cmd =~ "det" ]];then + lite_model_list=${det_lite_model_list} +elif [[ $inference_cmd =~ "rec" ]];then + lite_model_list=(${rec_lite_model_list[*]} ${cls_lite_model_list[*]}) +elif [[ $inference_cmd =~ "system" ]];then + lite_model_list=(${det_lite_model_list[*]} ${rec_lite_model_list[*]} ${cls_lite_model_list[*]}) +else + echo "inference_cmd is wrong, please check." + exit 1 +fi + +if [ ${DEVICE} = "ARM_CPU" ];then + valid_targets="arm" + paddlelite_url="https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10-rc/inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv.tar.gz" + end_index="66" +elif [ ${DEVICE} = "ARM_GPU_OPENCL" ];then + valid_targets="opencl" + paddlelite_url="https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.10-rc/inference_lite_lib.armv8.clang.with_exception.with_extra.with_cv.opencl.tar.gz" + end_index="71" +else + echo "DEVICE only suport ARM_CPU, ARM_GPU_OPENCL." + exit 2 +fi # prepare lite .nb model -pip install paddlelite==2.9 +pip install paddlelite==2.10-rc current_dir=${PWD} IFS="|" model_path=./inference_models + for model in ${lite_model_list[*]}; do - inference_model_url=https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/${model}.tar + if [[ $model =~ "PP-OCRv2" ]];then + inference_model_url=https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/${model}.tar + elif [[ $model =~ "v2.0" ]];then + inference_model_url=https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/${model}.tar + else + echo "Model is wrong, please check." + exit 3 + fi inference_model=${inference_model_url##*/} wget -nc -P ${model_path} ${inference_model_url} cd ${model_path} && tar -xf ${inference_model} && cd ../ model_dir=${model_path}/${inference_model%.*} model_file=${model_dir}/inference.pdmodel param_file=${model_dir}/inference.pdiparams - paddle_lite_opt --model_dir=${model_dir} --model_file=${model_file} --param_file=${param_file} --valid_targets=arm --optimize_out=${model_dir}_opt + paddle_lite_opt --model_dir=${model_dir} --model_file=${model_file} --param_file=${param_file} --valid_targets=${valid_targets} --optimize_out=${model_dir}_opt done # prepare test data @@ -35,18 +72,19 @@ cd ./inference_models && tar -xf ${inference_model} && cd ../ cd ./test_data && tar -xf ${data_file} && rm ${data_file} && cd ../ # prepare lite env -paddlelite_url=https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.9/inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv.tar.gz paddlelite_zipfile=$(echo $paddlelite_url | awk -F "/" '{print $NF}') -paddlelite_file=${paddlelite_zipfile:0:66} +paddlelite_file=${paddlelite_zipfile:0:${end_index}} wget ${paddlelite_url} && tar -xf ${paddlelite_zipfile} mkdir -p ${paddlelite_file}/demo/cxx/ocr/test_lite cp -r ${model_path}/*_opt.nb test_data ${paddlelite_file}/demo/cxx/ocr/test_lite cp ppocr/utils/ppocr_keys_v1.txt deploy/lite/config.txt ${paddlelite_file}/demo/cxx/ocr/test_lite cp -r ./deploy/lite/* ${paddlelite_file}/demo/cxx/ocr/ cp ${paddlelite_file}/cxx/lib/libpaddle_light_api_shared.so ${paddlelite_file}/demo/cxx/ocr/test_lite -cp ${FILENAME} test_tipc/test_lite_arm_cpu_cpp.sh test_tipc/common_func.sh ${paddlelite_file}/demo/cxx/ocr/test_lite +cp ${FILENAME} test_tipc/test_lite_arm_cpp.sh test_tipc/common_func.sh ${paddlelite_file}/demo/cxx/ocr/test_lite cd ${paddlelite_file}/demo/cxx/ocr/ git clone https://github.com/cuicheng01/AutoLog.git + +# make make -j sleep 1 make -j diff --git a/test_tipc/readme.md b/test_tipc/readme.md index 31635bc2a56f8c7b6ee6e05b2d9e061ea3585e00..d953deeb3d7f6f372c7943180f81dabff86bceff 100644 --- a/test_tipc/readme.md +++ b/test_tipc/readme.md @@ -83,10 +83,11 @@ test_tipc/ ├── cpp_ppocr_det_mobile_results_fp16.txt # 预存的mobile版ppocr检测模型c++预测的fp16精度的结果 ├── ... ├── prepare.sh # 完成test_*.sh运行所需要的数据和模型下载 +├── prepare_lite_cpp.sh # 完成手机端test_*.sh运行所需要的数据、模型、可执行文件 ├── test_train_inference_python.sh # 测试python训练预测的主程序 ├── test_inference_cpp.sh # 测试c++预测的主程序 ├── test_serving.sh # 测试serving部署预测的主程序 -├── test_lite_arm_cpu_cpp.sh # 测试lite在arm_cpu上部署的C++预测的主程序 +├── test_lite_arm_cpp.sh # 测试lite在arm上部署的C++预测的主程序 ├── compare_results.py # 用于对比log中的预测结果与results中的预存结果精度误差是否在限定范围内 └── readme.md # 使用文档 ``` @@ -125,5 +126,5 @@ test_tipc/ [test_train_inference_python 使用](docs/test_train_inference_python.md) [test_inference_cpp 使用](docs/test_inference_cpp.md) [test_serving 使用](docs/test_serving.md) -[test_lite_arm_cpu_cpp 使用](docs/test_lite_arm_cpu_cpp.md) +[test_lite_arm_cpp 使用](docs/test_lite_arm_cpp.md) [test_paddle2onnx 使用](docs/test_paddle2onnx.md) diff --git a/test_tipc/test_lite_arm_cpp.sh b/test_tipc/test_lite_arm_cpp.sh new file mode 100644 index 0000000000000000000000000000000000000000..c071a236bb7ea35b86b32bfc3b22e87a5aabbb93 --- /dev/null +++ b/test_tipc/test_lite_arm_cpp.sh @@ -0,0 +1,159 @@ +#!/bin/bash +source ./common_func.sh +export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH + +FILENAME=$1 +dataline=$(cat $FILENAME) +# parser params +IFS=$'\n' +lines=(${dataline}) + +# parser lite inference +inference_cmd=$(func_parser_value "${lines[1]}") +runtime_device=$(func_parser_value "${lines[2]}") +det_model_list=$(func_parser_value "${lines[3]}") +rec_model_list=$(func_parser_value "${lines[4]}") +cls_model_list=$(func_parser_value "${lines[5]}") +cpu_threads_list=$(func_parser_value "${lines[6]}") +det_batch_size_list=$(func_parser_value "${lines[7]}") +rec_batch_size_list=$(func_parser_value "${lines[8]}") +infer_img_dir_list=$(func_parser_value "${lines[9]}") +config_dir=$(func_parser_value "${lines[10]}") +rec_dict_dir=$(func_parser_value "${lines[11]}") +benchmark_value=$(func_parser_value "${lines[12]}") + +if [[ $inference_cmd =~ "det" ]]; then + lite_model_list=${det_lite_model_list} +elif [[ $inference_cmd =~ "rec" ]]; then + lite_model_list=(${rec_lite_model_list[*]} ${cls_lite_model_list[*]}) +elif [[ $inference_cmd =~ "system" ]]; then + lite_model_list=(${det_lite_model_list[*]} ${rec_lite_model_list[*]} ${cls_lite_model_list[*]}) +else + echo "inference_cmd is wrong, please check." + exit 1 +fi + +LOG_PATH="./output" +mkdir -p ${LOG_PATH} +status_log="${LOG_PATH}/results.log" + + +function func_test_det(){ + IFS='|' + _script=$1 + _det_model=$2 + _log_path=$3 + _img_dir=$4 + _config=$5 + if [[ $_det_model =~ "slim" ]]; then + precision="INT8" + else + precision="FP32" + fi + + # lite inference + for num_threads in ${cpu_threads_list[*]}; do + for det_batchsize in ${det_batch_size_list[*]}; do + _save_log_path="${_log_path}/lite_${_det_model}_runtime_device_${runtime_device}_precision_${precision}_det_batchsize_${det_batchsize}_threads_${num_threads}.log" + command="${_script} ${_det_model} ${runtime_device} ${precision} ${num_threads} ${det_batchsize} ${_img_dir} ${_config} ${benchmark_value} > ${_save_log_path} 2>&1" + eval ${command} + status_check $? "${command}" "${status_log}" + done + done +} + +function func_test_rec(){ + IFS='|' + _script=$1 + _rec_model=$2 + _cls_model=$3 + _log_path=$4 + _img_dir=$5 + _config=$6 + _rec_dict_dir=$7 + + if [[ $_det_model =~ "slim" ]]; then + _precision="INT8" + else + _precision="FP32" + fi + + # lite inference + for num_threads in ${cpu_threads_list[*]}; do + for rec_batchsize in ${rec_batch_size_list[*]}; do + _save_log_path="${_log_path}/lite_${_rec_model}_${cls_model}_runtime_device_${runtime_device}_precision_${_precision}_rec_batchsize_${rec_batchsize}_threads_${num_threads}.log" + command="${_script} ${_rec_model} ${_cls_model} ${runtime_device} ${_precision} ${num_threads} ${rec_batchsize} ${_img_dir} ${_config} ${_rec_dict_dir} ${benchmark_value} > ${_save_log_path} 2>&1" + eval ${command} + status_check $? "${command}" "${status_log}" + done + done +} + +function func_test_system(){ + IFS='|' + _script=$1 + _det_model=$2 + _rec_model=$3 + _cls_model=$4 + _log_path=$5 + _img_dir=$6 + _config=$7 + _rec_dict_dir=$8 + if [[ $_det_model =~ "slim" ]]; then + _precision="INT8" + else + _precision="FP32" + fi + + # lite inference + for num_threads in ${cpu_threads_list[*]}; do + for det_batchsize in ${det_batch_size_list[*]}; do + for rec_batchsize in ${rec_batch_size_list[*]}; do + _save_log_path="${_log_path}/lite_${_det_model}_${_rec_model}_${_cls_model}_runtime_device_${runtime_device}_precision_${_precision}_det_batchsize_${det_batchsize}_rec_batchsize_${rec_batchsize}_threads_${num_threads}.log" + command="${_script} ${_det_model} ${_rec_model} ${_cls_model} ${runtime_device} ${_precision} ${num_threads} ${det_batchsize} ${_img_dir} ${_config} ${_rec_dict_dir} ${benchmark_value} > ${_save_log_path} 2>&1" + eval ${command} + status_check $? "${command}" "${status_log}" + done + done + done +} + + +echo "################### run test ###################" + +if [[ $inference_cmd =~ "det" ]]; then + IFS="|" + det_model_list=(${det_model_list[*]}) + + for i in {0..1}; do + #run lite inference + for img_dir in ${infer_img_dir_list[*]}; do + func_test_det "${inference_cmd}" "${det_model_list[i]}_opt.nb" "${LOG_PATH}" "${img_dir}" "${config_dir}" + done + done + +elif [[ $inference_cmd =~ "rec" ]]; then + IFS="|" + rec_model_list=(${rec_model_list[*]}) + cls_model_list=(${cls_model_list[*]}) + + for i in {0..1}; do + #run lite inference + for img_dir in ${infer_img_dir_list[*]}; do + func_test_rec "${inference_cmd}" "${rec_model}_opt.nb" "${cls_model_list[i]}_opt.nb" "${LOG_PATH}" "${img_dir}" "${rec_dict_dir}" "${config_dir}" + done + done + +elif [[ $inference_cmd =~ "system" ]]; then + IFS="|" + det_model_list=(${det_model_list[*]}) + rec_model_list=(${rec_model_list[*]}) + cls_model_list=(${cls_model_list[*]}) + + for i in {0..1}; do + #run lite inference + for img_dir in ${infer_img_dir_list[*]}; do + func_test_system "${inference_cmd}" "${det_model_list[i]}_opt.nb" "${rec_model_list[i]}_opt.nb" "${cls_model_list[i]}_opt.nb" "${LOG_PATH}" "${img_dir}" "${config_dir}" "${rec_dict_dir}" + done + done +fi diff --git a/test_tipc/test_lite_arm_cpu_cpp.sh b/test_tipc/test_lite_arm_cpu_cpp.sh deleted file mode 100644 index 04eebbd28a334f7ac7819f8ff55d7b3192f4b490..0000000000000000000000000000000000000000 --- a/test_tipc/test_lite_arm_cpu_cpp.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/bin/bash -source ./common_func.sh -export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH - -FILENAME=$1 -dataline=$(cat $FILENAME) -# parser params -IFS=$'\n' -lines=(${dataline}) - -# parser lite inference -lite_inference_cmd=$(func_parser_value "${lines[1]}") -lite_model_dir_list=$(func_parser_value "${lines[2]}") -runtime_device=$(func_parser_value "${lines[3]}") -lite_cpu_threads_list=$(func_parser_value "${lines[4]}") -lite_batch_size_list=$(func_parser_value "${lines[5]}") -lite_infer_img_dir_list=$(func_parser_value "${lines[8]}") -lite_config_dir=$(func_parser_value "${lines[9]}") -lite_rec_dict_dir=$(func_parser_value "${lines[10]}") -lite_benchmark_value=$(func_parser_value "${lines[11]}") - - -LOG_PATH="./output" -mkdir -p ${LOG_PATH} -status_log="${LOG_PATH}/results.log" - - -function func_lite(){ - IFS='|' - _script=$1 - _lite_model=$2 - _log_path=$3 - _img_dir=$4 - _config=$5 - if [[ $lite_model =~ "slim" ]]; then - precision="INT8" - else - precision="FP32" - fi - - # lite inference - for num_threads in ${lite_cpu_threads_list[*]}; do - for batchsize in ${lite_batch_size_list[*]}; do - _save_log_path="${_log_path}/lite_${_lite_model}_runtime_device_${runtime_device}_precision_${precision}_batchsize_${batchsize}_threads_${num_threads}.log" - command="${_script} ${_lite_model} ${runtime_device} ${precision} ${num_threads} ${batchsize} ${_img_dir} ${_config} ${lite_benchmark_value} > ${_save_log_path} 2>&1" - eval ${command} - status_check $? "${command}" "${status_log}" - done - done -} - - -echo "################### run test ###################" -IFS="|" -for lite_model in ${lite_model_dir_list[*]}; do - #run lite inference - for img_dir in ${lite_infer_img_dir_list[*]}; do - func_lite "${lite_inference_cmd}" "${lite_model}_opt.nb" "${LOG_PATH}" "${img_dir}" "${lite_config_dir}" - done -done