未验证 提交 e53b9dba 编写于 作者: M MissPenguin 提交者: GitHub

Merge pull request #4455 from cuicheng01/dygraph

Add fullchain lite demo
...@@ -98,3 +98,13 @@ null:null ...@@ -98,3 +98,13 @@ null:null
--benchmark:True --benchmark:True
null:null null:null
null:null null:null
===========================lite_params===========================
inference:./ocr_db_crnn det
infer_model:./models/ch_ppocr_mobile_v2.0_det_opt.nb|./models/ch_ppocr_mobile_v2.0_det_slim_opt.nb
--cpu_threads:1|4
--batch_size:1
--power_mode:LITE_POWER_HIGH|LITE_POWER_LOW
--image_dir:./test_data/icdar2015_lite/text_localization/ch4_test_images/|./test_data/icdar2015_lite/text_localization/ch4_test_images/img_233.jpg
--config_dir:./config.txt
--rec_dict_dir:./ppocr_keys_v1.txt
--benchmark:True
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
FILENAME=$1 FILENAME=$1
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer', # MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer',
# 'cpp_infer', 'serving_infer', 'klquant_infer'] # 'cpp_infer', 'serving_infer', 'klquant_infer', 'lite_infer']
MODE=$2 MODE=$2
...@@ -136,3 +136,37 @@ if [ ${MODE} = "serving_infer" ];then ...@@ -136,3 +136,37 @@ if [ ${MODE} = "serving_infer" ];then
wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar
cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_server_v2.0_rec_infer.tar && tar xf ch_ppocr_server_v2.0_det_infer.tar && cd ../ cd ./inference && tar xf ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_server_v2.0_rec_infer.tar && tar xf ch_ppocr_server_v2.0_det_infer.tar && cd ../
fi fi
if [ ${MODE} = "lite_infer" ];then
# prepare lite nb model and test data
current_dir=${PWD}
wget -nc -P ./models https://paddleocr.bj.bcebos.com/dygraph_v2.0/lite/ch_ppocr_mobile_v2.0_det_opt.nb
wget -nc -P ./models https://paddleocr.bj.bcebos.com/dygraph_v2.0/lite/ch_ppocr_mobile_v2.0_det_slim_opt.nb
wget -nc -P ./test_data https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar
cd ./test_data && tar -xf icdar2015_lite.tar && rm icdar2015_lite.tar && cd ../
# prepare lite env
export http_proxy=http://172.19.57.45:3128
export https_proxy=http://172.19.57.45:3128
paddlelite_url=https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.9/inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv.tar.gz
paddlelite_zipfile=$(echo $paddlelite_url | awk -F "/" '{print $NF}')
paddlelite_file=inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv
wget ${paddlelite_url}
tar -xf ${paddlelite_zipfile}
mkdir -p ${paddlelite_file}/demo/cxx/ocr/test_lite
mv models test_data ${paddlelite_file}/demo/cxx/ocr/test_lite
cp ppocr/utils/ppocr_keys_v1.txt deploy/lite/config.txt ${paddlelite_file}/demo/cxx/ocr/test_lite
cp ./deploy/lite/* ${paddlelite_file}/demo/cxx/ocr/
cp ${paddlelite_file}/cxx/lib/libpaddle_light_api_shared.so ${paddlelite_file}/demo/cxx/ocr/test_lite
cp PTDN/configs/ppocr_det_mobile_params.txt PTDN/test_lite.sh PTDN/common_func.sh ${paddlelite_file}/demo/cxx/ocr/test_lite
cd ${paddlelite_file}/demo/cxx/ocr/
git clone https://github.com/LDOUBLEV/AutoLog.git
unset http_proxy
unset https_proxy
make -j
sleep 1
make -j
cp ocr_db_crnn test_lite && cp test_lite/libpaddle_light_api_shared.so test_lite/libc++_shared.so
tar -cf test_lite.tar ./test_lite && cp test_lite.tar ${current_dir} && cd ${current_dir}
fi
#!/bin/bash
source ./common_func.sh
export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH
FILENAME=$1
dataline=$(awk 'NR==101, NR==110{print}' $FILENAME)
echo $dataline
# parser params
IFS=$'\n'
lines=(${dataline})
# parser lite inference
lite_inference_cmd=$(func_parser_value "${lines[1]}")
lite_model_dir_list=$(func_parser_value "${lines[2]}")
lite_cpu_threads_list=$(func_parser_value "${lines[3]}")
lite_batch_size_list=$(func_parser_value "${lines[4]}")
lite_power_mode_list=$(func_parser_value "${lines[5]}")
lite_infer_img_dir_list=$(func_parser_value "${lines[6]}")
lite_config_dir=$(func_parser_value "${lines[7]}")
lite_rec_dict_dir=$(func_parser_value "${lines[8]}")
lite_benchmark_value=$(func_parser_value "${lines[9]}")
LOG_PATH="./output"
mkdir -p ${LOG_PATH}
status_log="${LOG_PATH}/results.log"
function func_lite(){
IFS='|'
_script=$1
_lite_model=$2
_log_path=$3
_img_dir=$4
_config=$5
if [[ $lite_model =~ "slim" ]]; then
precision="INT8"
else
precision="FP32"
fi
is_single_img=$(echo $_img_dir | grep -E ".jpg|.jpeg|.png|.JPEG|.JPG")
if [[ "$is_single_img" != "" ]]; then
single_img="True"
else
single_img="False"
fi
# lite inference
for num_threads in ${lite_cpu_threads_list[*]}; do
for power_mode in ${lite_power_mode_list[*]}; do
for batchsize in ${lite_batch_size_list[*]}; do
model_name=$(echo $lite_model | awk -F "/" '{print $NF}')
_save_log_path="${_log_path}/lite_${model_name}_precision_${precision}_batchsize_${batchsize}_threads_${num_threads}_powermode_${power_mode}_singleimg_${single_img}.log"
command="${_script} ${lite_model} ${precision} ${num_threads} ${batchsize} ${power_mode} ${_img_dir} ${_config} ${lite_benchmark_value} > ${_save_log_path} 2>&1"
eval ${command}
status_check $? "${command}" "${status_log}"
done
done
done
}
echo "################### run test ###################"
IFS="|"
for lite_model in ${lite_model_dir_list[*]}; do
#run lite inference
for img_dir in ${lite_infer_img_dir_list[*]}; do
func_lite "${lite_inference_cmd}" "${lite_model}" "${LOG_PATH}" "${img_dir}" "${lite_config_dir}"
done
done
...@@ -12,12 +12,14 @@ ...@@ -12,12 +12,14 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle_api.h" // NOLINT
#include <chrono> #include <chrono>
#include "paddle_api.h" // NOLINT
#include "paddle_place.h"
#include "cls_process.h" #include "cls_process.h"
#include "crnn_process.h" #include "crnn_process.h"
#include "db_post_process.h" #include "db_post_process.h"
#include "AutoLog/auto_log/lite_autolog.h"
using namespace paddle::lite_api; // NOLINT using namespace paddle::lite_api; // NOLINT
using namespace std; using namespace std;
...@@ -27,7 +29,7 @@ void NeonMeanScale(const float *din, float *dout, int size, ...@@ -27,7 +29,7 @@ void NeonMeanScale(const float *din, float *dout, int size,
const std::vector<float> mean, const std::vector<float> mean,
const std::vector<float> scale) { const std::vector<float> scale) {
if (mean.size() != 3 || scale.size() != 3) { if (mean.size() != 3 || scale.size() != 3) {
std::cerr << "[ERROR] mean or scale size must equal to 3\n"; std::cerr << "[ERROR] mean or scale size must equal to 3" << std::endl;
exit(1); exit(1);
} }
float32x4_t vmean0 = vdupq_n_f32(mean[0]); float32x4_t vmean0 = vdupq_n_f32(mean[0]);
...@@ -159,7 +161,8 @@ void RunRecModel(std::vector<std::vector<std::vector<int>>> boxes, cv::Mat img, ...@@ -159,7 +161,8 @@ void RunRecModel(std::vector<std::vector<std::vector<int>>> boxes, cv::Mat img,
std::vector<float> &rec_text_score, std::vector<float> &rec_text_score,
std::vector<std::string> charactor_dict, std::vector<std::string> charactor_dict,
std::shared_ptr<PaddlePredictor> predictor_cls, std::shared_ptr<PaddlePredictor> predictor_cls,
int use_direction_classify) { int use_direction_classify,
std::vector<double> *times) {
std::vector<float> mean = {0.5f, 0.5f, 0.5f}; std::vector<float> mean = {0.5f, 0.5f, 0.5f};
std::vector<float> scale = {1 / 0.5f, 1 / 0.5f, 1 / 0.5f}; std::vector<float> scale = {1 / 0.5f, 1 / 0.5f, 1 / 0.5f};
...@@ -226,14 +229,15 @@ void RunRecModel(std::vector<std::vector<std::vector<int>>> boxes, cv::Mat img, ...@@ -226,14 +229,15 @@ void RunRecModel(std::vector<std::vector<std::vector<int>>> boxes, cv::Mat img,
std::vector<std::vector<std::vector<int>>> std::vector<std::vector<std::vector<int>>>
RunDetModel(std::shared_ptr<PaddlePredictor> predictor, cv::Mat img, RunDetModel(std::shared_ptr<PaddlePredictor> predictor, cv::Mat img,
std::map<std::string, double> Config) { std::map<std::string, double> Config, std::vector<double> *times) {
// Read img // Read img
int max_side_len = int(Config["max_side_len"]); int max_side_len = int(Config["max_side_len"]);
int det_db_use_dilate = int(Config["det_db_use_dilate"]); int det_db_use_dilate = int(Config["det_db_use_dilate"]);
cv::Mat srcimg; cv::Mat srcimg;
img.copyTo(srcimg); img.copyTo(srcimg);
auto preprocess_start = std::chrono::steady_clock::now();
std::vector<float> ratio_hw; std::vector<float> ratio_hw;
img = DetResizeImg(img, max_side_len, ratio_hw); img = DetResizeImg(img, max_side_len, ratio_hw);
cv::Mat img_fp; cv::Mat img_fp;
...@@ -248,8 +252,10 @@ RunDetModel(std::shared_ptr<PaddlePredictor> predictor, cv::Mat img, ...@@ -248,8 +252,10 @@ RunDetModel(std::shared_ptr<PaddlePredictor> predictor, cv::Mat img,
std::vector<float> scale = {1 / 0.229f, 1 / 0.224f, 1 / 0.225f}; std::vector<float> scale = {1 / 0.229f, 1 / 0.224f, 1 / 0.225f};
const float *dimg = reinterpret_cast<const float *>(img_fp.data); const float *dimg = reinterpret_cast<const float *>(img_fp.data);
NeonMeanScale(dimg, data0, img_fp.rows * img_fp.cols, mean, scale); NeonMeanScale(dimg, data0, img_fp.rows * img_fp.cols, mean, scale);
auto preprocess_end = std::chrono::steady_clock::now();
// Run predictor // Run predictor
auto inference_start = std::chrono::steady_clock::now();
predictor->Run(); predictor->Run();
// Get output and post process // Get output and post process
...@@ -257,8 +263,10 @@ RunDetModel(std::shared_ptr<PaddlePredictor> predictor, cv::Mat img, ...@@ -257,8 +263,10 @@ RunDetModel(std::shared_ptr<PaddlePredictor> predictor, cv::Mat img,
std::move(predictor->GetOutput(0))); std::move(predictor->GetOutput(0)));
auto *outptr = output_tensor->data<float>(); auto *outptr = output_tensor->data<float>();
auto shape_out = output_tensor->shape(); auto shape_out = output_tensor->shape();
auto inference_end = std::chrono::steady_clock::now();
// Save output // Save output
auto postprocess_start = std::chrono::steady_clock::now();
float pred[shape_out[2] * shape_out[3]]; float pred[shape_out[2] * shape_out[3]];
unsigned char cbuf[shape_out[2] * shape_out[3]]; unsigned char cbuf[shape_out[2] * shape_out[3]];
...@@ -287,14 +295,35 @@ RunDetModel(std::shared_ptr<PaddlePredictor> predictor, cv::Mat img, ...@@ -287,14 +295,35 @@ RunDetModel(std::shared_ptr<PaddlePredictor> predictor, cv::Mat img,
std::vector<std::vector<std::vector<int>>> filter_boxes = std::vector<std::vector<std::vector<int>>> filter_boxes =
FilterTagDetRes(boxes, ratio_hw[0], ratio_hw[1], srcimg); FilterTagDetRes(boxes, ratio_hw[0], ratio_hw[1], srcimg);
auto postprocess_end = std::chrono::steady_clock::now();
std::chrono::duration<float> preprocess_diff = preprocess_end - preprocess_start;
times->push_back(double(preprocess_diff.count() * 1000));
std::chrono::duration<float> inference_diff = inference_end - inference_start;
times->push_back(double(inference_diff.count() * 1000));
std::chrono::duration<float> postprocess_diff = postprocess_end - postprocess_start;
times->push_back(double(postprocess_diff.count() * 1000));
return filter_boxes; return filter_boxes;
} }
std::shared_ptr<PaddlePredictor> loadModel(std::string model_file) { std::shared_ptr<PaddlePredictor> loadModel(std::string model_file, std::string power_mode, int num_threads) {
MobileConfig config; MobileConfig config;
config.set_model_from_file(model_file); config.set_model_from_file(model_file);
if (power_mode == "LITE_POWER_HIGH"){
config.set_power_mode(LITE_POWER_HIGH);
} else {
if (power_mode == "LITE_POWER_LOW") {
config.set_power_mode(LITE_POWER_HIGH);
} else {
std::cerr << "Only support LITE_POWER_HIGH or LITE_POWER_HIGH." << std::endl;
exit(1);
}
}
config.set_threads(num_threads);
std::shared_ptr<PaddlePredictor> predictor = std::shared_ptr<PaddlePredictor> predictor =
CreatePaddlePredictor<MobileConfig>(config); CreatePaddlePredictor<MobileConfig>(config);
return predictor; return predictor;
...@@ -354,60 +383,255 @@ std::map<std::string, double> LoadConfigTxt(std::string config_path) { ...@@ -354,60 +383,255 @@ std::map<std::string, double> LoadConfigTxt(std::string config_path) {
return dict; return dict;
} }
int main(int argc, char **argv) { void check_params(int argc, char **argv) {
if (argc < 5) { if (argc<=1 || (strcmp(argv[1], "det")!=0 && strcmp(argv[1], "rec")!=0 && strcmp(argv[1], "system")!=0)) {
std::cerr << "[ERROR] usage: " << argv[0] std::cerr << "Please choose one mode of [det, rec, system] !" << std::endl;
<< " det_model_file cls_model_file rec_model_file image_path "
"charactor_dict\n";
exit(1); exit(1);
} }
std::string det_model_file = argv[1]; if (strcmp(argv[1], "det") == 0) {
std::string rec_model_file = argv[2]; if (argc < 9){
std::string cls_model_file = argv[3]; std::cerr << "[ERROR] usage:" << argv[0]
std::string img_path = argv[4]; << " det det_model num_threads batchsize power_mode img_dir det_config lite_benchmark_value" << std::endl;
std::string dict_path = argv[5]; exit(1);
}
}
if (strcmp(argv[1], "rec") == 0) {
if (argc < 9){
std::cerr << "[ERROR] usage:" << argv[0]
<< " rec rec_model num_threads batchsize power_mode img_dir key_txt lite_benchmark_value" << std::endl;
exit(1);
}
}
if (strcmp(argv[1], "system") == 0) {
if (argc < 12){
std::cerr << "[ERROR] usage:" << argv[0]
<< " system det_model rec_model clas_model num_threads batchsize power_mode img_dir det_config key_txt lite_benchmark_value" << std::endl;
exit(1);
}
}
}
void system(char **argv){
std::string det_model_file = argv[2];
std::string rec_model_file = argv[3];
std::string cls_model_file = argv[4];
std::string precision = argv[5];
std::string num_threads = argv[6];
std::string batchsize = argv[7];
std::string power_mode = argv[8];
std::string img_dir = argv[9];
std::string det_config_path = argv[10];
std::string dict_path = argv[11];
if (strcmp(argv[5], "FP32") != 0 && strcmp(argv[5], "INT8") != 0) {
std::cerr << "Only support FP32 or INT8." << std::endl;
exit(1);
}
std::vector<cv::String> cv_all_img_names;
cv::glob(img_dir, cv_all_img_names);
//// load config from txt file //// load config from txt file
auto Config = LoadConfigTxt("./config.txt"); auto Config = LoadConfigTxt(det_config_path);
int use_direction_classify = int(Config["use_direction_classify"]); int use_direction_classify = int(Config["use_direction_classify"]);
auto start = std::chrono::system_clock::now(); auto charactor_dict = ReadDict(dict_path);
charactor_dict.insert(charactor_dict.begin(), "#"); // blank char for ctc
charactor_dict.push_back(" ");
auto det_predictor = loadModel(det_model_file, power_mode, std::stoi(num_threads));
auto rec_predictor = loadModel(rec_model_file, power_mode, std::stoi(num_threads));
auto cls_predictor = loadModel(cls_model_file, power_mode, std::stoi(num_threads));
auto det_predictor = loadModel(det_model_file); for (int i = 0; i < cv_all_img_names.size(); ++i) {
auto rec_predictor = loadModel(rec_model_file); std::cout << "The predict img: " << cv_all_img_names[i] << std::endl;
auto cls_predictor = loadModel(cls_model_file); cv::Mat srcimg = cv::imread(cv_all_img_names[i], cv::IMREAD_COLOR);
if (!srcimg.data) {
std::cerr << "[ERROR] image read failed! image path: " << cv_all_img_names[i] << std::endl;
exit(1);
}
std::vector<double> det_times;
auto boxes = RunDetModel(det_predictor, srcimg, Config, &det_times);
std::vector<std::string> rec_text;
std::vector<float> rec_text_score;
std::vector<double> rec_times;
RunRecModel(boxes, srcimg, rec_predictor, rec_text, rec_text_score,
charactor_dict, cls_predictor, use_direction_classify, &rec_times);
//// visualization
auto img_vis = Visualization(srcimg, boxes);
//// print recognized text
for (int i = 0; i < rec_text.size(); i++) {
std::cout << i << "\t" << rec_text[i] << "\t" << rec_text_score[i]
<< std::endl;
}
}
}
void det(int argc, char **argv) {
std::string det_model_file = argv[2];
std::string precision = argv[3];
std::string num_threads = argv[4];
std::string batchsize = argv[5];
std::string power_mode = argv[6];
std::string img_dir = argv[7];
std::string det_config_path = argv[8];
if (strcmp(argv[3], "FP32") != 0 && strcmp(argv[3], "INT8") != 0) {
std::cerr << "Only support FP32 or INT8." << std::endl;
exit(1);
}
std::vector<cv::String> cv_all_img_names;
cv::glob(img_dir, cv_all_img_names);
//// load config from txt file
auto Config = LoadConfigTxt(det_config_path);
auto det_predictor = loadModel(det_model_file, power_mode, std::stoi(num_threads));
std::vector<double> time_info = {0, 0, 0};
for (int i = 0; i < cv_all_img_names.size(); ++i) {
std::cout << "The predict img: " << cv_all_img_names[i] << std::endl;
cv::Mat srcimg = cv::imread(cv_all_img_names[i], cv::IMREAD_COLOR);
if (!srcimg.data) {
std::cerr << "[ERROR] image read failed! image path: " << cv_all_img_names[i] << std::endl;
exit(1);
}
std::vector<double> times;
auto boxes = RunDetModel(det_predictor, srcimg, Config, &times);
//// visualization
auto img_vis = Visualization(srcimg, boxes);
std::cout << boxes.size() << " bboxes have detected:" << std::endl;
// for (int i=0; i<boxes.size(); i++){
// std::cout << "The " << i << " box:" << std::endl;
// for (int j=0; j<4; j++){
// for (int k=0; k<2; k++){
// std::cout << boxes[i][j][k] << "\t";
// }
// }
// std::cout << std::endl;
// }
time_info[0] += times[0];
time_info[1] += times[1];
time_info[2] += times[2];
}
if (strcmp(argv[9], "True") == 0) {
AutoLogger autolog(det_model_file,
0,
0,
0,
std::stoi(num_threads),
std::stoi(batchsize),
"dynamic",
precision,
power_mode,
time_info,
cv_all_img_names.size());
autolog.report();
}
}
void rec(int argc, char **argv) {
std::string rec_model_file = argv[2];
std::string precision = argv[3];
std::string num_threads = argv[4];
std::string batchsize = argv[5];
std::string power_mode = argv[6];
std::string img_dir = argv[7];
std::string dict_path = argv[8];
if (strcmp(argv[3], "FP32") != 0 && strcmp(argv[3], "INT8") != 0) {
std::cerr << "Only support FP32 or INT8." << std::endl;
exit(1);
}
std::vector<cv::String> cv_all_img_names;
cv::glob(img_dir, cv_all_img_names);
auto charactor_dict = ReadDict(dict_path); auto charactor_dict = ReadDict(dict_path);
charactor_dict.insert(charactor_dict.begin(), "#"); // blank char for ctc charactor_dict.insert(charactor_dict.begin(), "#"); // blank char for ctc
charactor_dict.push_back(" "); charactor_dict.push_back(" ");
cv::Mat srcimg = cv::imread(img_path, cv::IMREAD_COLOR); auto rec_predictor = loadModel(rec_model_file, power_mode, std::stoi(num_threads));
auto boxes = RunDetModel(det_predictor, srcimg, Config);
std::vector<std::string> rec_text; std::shared_ptr<PaddlePredictor> cls_predictor;
std::vector<float> rec_text_score;
RunRecModel(boxes, srcimg, rec_predictor, rec_text, rec_text_score, std::vector<double> time_info = {0, 0, 0};
charactor_dict, cls_predictor, use_direction_classify); for (int i = 0; i < cv_all_img_names.size(); ++i) {
std::cout << "The predict img: " << cv_all_img_names[i] << std::endl;
cv::Mat srcimg = cv::imread(cv_all_img_names[i], cv::IMREAD_COLOR);
auto end = std::chrono::system_clock::now(); if (!srcimg.data) {
auto duration = std::cerr << "[ERROR] image read failed! image path: " << cv_all_img_names[i] << std::endl;
std::chrono::duration_cast<std::chrono::microseconds>(end - start); exit(1);
}
//// visualization int width = srcimg.cols;
auto img_vis = Visualization(srcimg, boxes); int height = srcimg.rows;
std::vector<int> upper_left = {0, 0};
std::vector<int> upper_right = {width, 0};
std::vector<int> lower_right = {width, height};
std::vector<int> lower_left = {0, height};
std::vector<std::vector<int>> box = {upper_left, upper_right, lower_right, lower_left};
std::vector<std::vector<std::vector<int>>> boxes = {box};
std::vector<std::string> rec_text;
std::vector<float> rec_text_score;
std::vector<double> times;
RunRecModel(boxes, srcimg, rec_predictor, rec_text, rec_text_score,
charactor_dict, cls_predictor, 0, &times);
//// print recognized text
for (int i = 0; i < rec_text.size(); i++) {
std::cout << i << "\t" << rec_text[i] << "\t" << rec_text_score[i]
<< std::endl;
}
}
// TODO: support autolog
if (strcmp(argv[9], "True") == 0) {
AutoLogger autolog(rec_model_file,
0,
0,
0,
std::stoi(num_threads),
std::stoi(batchsize),
"dynamic",
precision,
power_mode,
time_info,
cv_all_img_names.size());
autolog.report();
}
}
int main(int argc, char **argv) {
check_params(argc, argv);
std::cout << "mode: " << argv[1] << endl;
//// print recognized text if (strcmp(argv[1], "system") == 0) {
for (int i = 0; i < rec_text.size(); i++) { system(argv);
std::cout << i << "\t" << rec_text[i] << "\t" << rec_text_score[i]
<< std::endl;
} }
std::cout << "花费了" if (strcmp(argv[1], "det") == 0) {
<< double(duration.count()) * det(argc, argv);
std::chrono::microseconds::period::num / }
std::chrono::microseconds::period::den
<< "秒" << std::endl; if (strcmp(argv[1], "rec") == 0) {
rec(argc, argv);
}
return 0; return 0;
} }
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册