diff --git a/deploy/cpp_infer/readme.md b/deploy/cpp_infer/readme.md
index d901366235db21727ceac88528d83ae1120fd030..725197ad5cf9c7bf54be445f2bb3698096e7f9fb 100644
--- a/deploy/cpp_infer/readme.md
+++ b/deploy/cpp_infer/readme.md
@@ -4,16 +4,20 @@
C++在性能计算上优于python,因此,在大多数CPU、GPU部署场景,多采用C++的部署方式,本节将介绍如何在Linux\Windows (CPU\GPU)环境下配置C++环境并完成
PaddleOCR模型部署。
-* [1. 准备环境](#1)
- + [1.0 运行准备](#10)
- + [1.1 编译opencv库](#11)
- + [1.2 下载或者编译Paddle预测库](#12)
- - [1.2.1 直接下载安装](#121)
- - [1.2.2 预测库源码编译](#122)
-* [2 开始运行](#2)
- + [2.1 将模型导出为inference model](#21)
- + [2.2 编译PaddleOCR C++预测demo](#22)
- + [2.3运行demo](#23)
+- [服务器端C++预测](#服务器端c预测)
+ - [1. 准备环境](#1-准备环境)
+ - [1.0 运行准备](#10-运行准备)
+ - [1.1 编译opencv库](#11-编译opencv库)
+ - [1.2 下载或者编译Paddle预测库](#12-下载或者编译paddle预测库)
+ - [1.2.1 直接下载安装](#121-直接下载安装)
+ - [1.2.2 预测库源码编译](#122-预测库源码编译)
+ - [2 开始运行](#2-开始运行)
+ - [2.1 将模型导出为inference model](#21-将模型导出为inference-model)
+ - [2.2 编译PaddleOCR C++预测demo](#22-编译paddleocr-c预测demo)
+ - [2.3 运行demo](#23-运行demo)
+ - [1. 只调用检测:](#1-只调用检测)
+ - [2. 只调用识别:](#2-只调用识别)
+ - [3. 调用串联:](#3-调用串联)
@@ -103,7 +107,7 @@ opencv3/
#### 1.2.1 直接下载安装
-* [Paddle预测库官网](https://paddle-inference.readthedocs.io/en/latest/user_guides/download_lib.html) 上提供了不同cuda版本的Linux预测库,可以在官网查看并选择合适的预测库版本(*建议选择paddle版本>=2.0.1版本的预测库* )。
+* [Paddle预测库官网](https://paddleinference.paddlepaddle.org.cn/user_guides/download_lib.html#linux) 上提供了不同cuda版本的Linux预测库,可以在官网查看并选择合适的预测库版本(*建议选择paddle版本>=2.0.1版本的预测库* )。
* 下载之后使用下面的方法解压。
@@ -249,7 +253,7 @@ CUDNN_LIB_DIR=/your_cudnn_lib_dir
|gpu_id|int|0|GPU id,使用GPU时有效|
|gpu_mem|int|4000|申请的GPU内存|
|cpu_math_library_num_threads|int|10|CPU预测时的线程数,在机器核数充足的情况下,该值越大,预测速度越快|
-|use_mkldnn|bool|true|是否使用mkldnn库|
+|enable_mkldnn|bool|true|是否使用mkldnn库|
- 检测模型相关
diff --git a/deploy/cpp_infer/readme_en.md b/deploy/cpp_infer/readme_en.md
index 8c5a323af40e64f77e76cba23fd5c4408c643de5..6b1fa71cbe12e86fb3d0c9ac2ea981cb7f930c50 100644
--- a/deploy/cpp_infer/readme_en.md
+++ b/deploy/cpp_infer/readme_en.md
@@ -231,7 +231,7 @@ More parameters are as follows,
|gpu_id|int|0|GPU id when use_gpu is true|
|gpu_mem|int|4000|GPU memory requested|
|cpu_math_library_num_threads|int|10|Number of threads when using CPU inference. When machine cores is enough, the large the value, the faster the inference speed|
-|use_mkldnn|bool|true|Whether to use mkdlnn library|
+|enable_mkldnn|bool|true|Whether to use mkdlnn library|
- Detection related parameters
diff --git a/deploy/cpp_infer/src/main.cpp b/deploy/cpp_infer/src/main.cpp
index b7a199b548beca881e4ab69491adcc9351f52c0f..e1069a11e3bd7fa7ce18259d5fcc32594c0ecddc 100644
--- a/deploy/cpp_infer/src/main.cpp
+++ b/deploy/cpp_infer/src/main.cpp
@@ -28,14 +28,14 @@
#include
#include
-#include
#include
+#include
#include
#include
#include
-#include
#include "auto_log/autolog.h"
+#include
DEFINE_bool(use_gpu, false, "Infering with GPU or CPU.");
DEFINE_int32(gpu_id, 0, "Device id of GPU to execute.");
@@ -51,8 +51,8 @@ DEFINE_string(image_dir, "", "Dir of input image.");
DEFINE_string(det_model_dir, "", "Path of det inference model.");
DEFINE_int32(max_side_len, 960, "max_side_len of input image.");
DEFINE_double(det_db_thresh, 0.3, "Threshold of det_db_thresh.");
-DEFINE_double(det_db_box_thresh, 0.5, "Threshold of det_db_box_thresh.");
-DEFINE_double(det_db_unclip_ratio, 1.6, "Threshold of det_db_unclip_ratio.");
+DEFINE_double(det_db_box_thresh, 0.6, "Threshold of det_db_box_thresh.");
+DEFINE_double(det_db_unclip_ratio, 1.5, "Threshold of det_db_unclip_ratio.");
DEFINE_bool(use_polygon_score, false, "Whether use polygon score.");
DEFINE_bool(visualize, true, "Whether show the detection results.");
// classification related
@@ -62,281 +62,267 @@ DEFINE_double(cls_thresh, 0.9, "Threshold of cls_thresh.");
// recognition related
DEFINE_string(rec_model_dir, "", "Path of rec inference model.");
DEFINE_int32(rec_batch_num, 6, "rec_batch_num.");
-DEFINE_string(char_list_file, "../../ppocr/utils/ppocr_keys_v1.txt", "Path of dictionary.");
-
+DEFINE_string(char_list_file, "../../ppocr/utils/ppocr_keys_v1.txt",
+ "Path of dictionary.");
using namespace std;
using namespace cv;
using namespace PaddleOCR;
-
-static bool PathExists(const std::string& path){
+static bool PathExists(const std::string &path) {
#ifdef _WIN32
struct _stat buffer;
return (_stat(path.c_str(), &buffer) == 0);
#else
struct stat buffer;
return (stat(path.c_str(), &buffer) == 0);
-#endif // !_WIN32
+#endif // !_WIN32
}
-
int main_det(std::vector cv_all_img_names) {
- std::vector time_info = {0, 0, 0};
- DBDetector det(FLAGS_det_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
- FLAGS_gpu_mem, FLAGS_cpu_threads,
- FLAGS_enable_mkldnn, FLAGS_max_side_len, FLAGS_det_db_thresh,
- FLAGS_det_db_box_thresh, FLAGS_det_db_unclip_ratio,
- FLAGS_use_polygon_score, FLAGS_visualize,
- FLAGS_use_tensorrt, FLAGS_precision);
-
- for (int i = 0; i < cv_all_img_names.size(); ++i) {
-// LOG(INFO) << "The predict img: " << cv_all_img_names[i];
-
- cv::Mat srcimg = cv::imread(cv_all_img_names[i], cv::IMREAD_COLOR);
- if (!srcimg.data) {
- std::cerr << "[ERROR] image read failed! image path: " << cv_all_img_names[i] << endl;
- exit(1);
+ std::vector time_info = {0, 0, 0};
+ DBDetector det(FLAGS_det_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
+ FLAGS_gpu_mem, FLAGS_cpu_threads, FLAGS_enable_mkldnn,
+ FLAGS_max_side_len, FLAGS_det_db_thresh,
+ FLAGS_det_db_box_thresh, FLAGS_det_db_unclip_ratio,
+ FLAGS_use_polygon_score, FLAGS_visualize, FLAGS_use_tensorrt,
+ FLAGS_precision);
+
+ for (int i = 0; i < cv_all_img_names.size(); ++i) {
+ // LOG(INFO) << "The predict img: " << cv_all_img_names[i];
+
+ cv::Mat srcimg = cv::imread(cv_all_img_names[i], cv::IMREAD_COLOR);
+ if (!srcimg.data) {
+ std::cerr << "[ERROR] image read failed! image path: "
+ << cv_all_img_names[i] << endl;
+ exit(1);
+ }
+ std::vector>> boxes;
+ std::vector det_times;
+
+ det.Run(srcimg, boxes, &det_times);
+
+ time_info[0] += det_times[0];
+ time_info[1] += det_times[1];
+ time_info[2] += det_times[2];
+
+ cout << cv_all_img_names[i] << '\t';
+ for (int n = 0; n < boxes.size(); n++) {
+ for (int m = 0; m < boxes[n].size(); m++) {
+ cout << boxes[n][m][0] << ' ' << boxes[n][m][1] << ' ';
}
- std::vector>> boxes;
- std::vector det_times;
-
- det.Run(srcimg, boxes, &det_times);
-
- time_info[0] += det_times[0];
- time_info[1] += det_times[1];
- time_info[2] += det_times[2];
-
- if (FLAGS_benchmark) {
- cout << cv_all_img_names[i] << '\t';
- for (int n = 0; n < boxes.size(); n++) {
- for (int m = 0; m < boxes[n].size(); m++) {
- cout << boxes[n][m][0] << ' ' << boxes[n][m][1] << ' ';
- }
- }
- cout << endl;
- }
}
-
+ cout << endl;
if (FLAGS_benchmark) {
- AutoLogger autolog("ocr_det",
- FLAGS_use_gpu,
- FLAGS_use_tensorrt,
- FLAGS_enable_mkldnn,
- FLAGS_cpu_threads,
- 1,
- "dynamic",
- FLAGS_precision,
- time_info,
- cv_all_img_names.size());
- autolog.report();
+ cout << cv_all_img_names[i] << '\t';
+ for (int n = 0; n < boxes.size(); n++) {
+ for (int m = 0; m < boxes[n].size(); m++) {
+ cout << boxes[n][m][0] << ' ' << boxes[n][m][1] << ' ';
+ }
+ }
+ cout << endl;
}
- return 0;
-}
+ }
+ if (FLAGS_benchmark) {
+ AutoLogger autolog("ocr_det", FLAGS_use_gpu, FLAGS_use_tensorrt,
+ FLAGS_enable_mkldnn, FLAGS_cpu_threads, 1, "dynamic",
+ FLAGS_precision, time_info, cv_all_img_names.size());
+ autolog.report();
+ }
+ return 0;
+}
int main_rec(std::vector cv_all_img_names) {
- std::vector time_info = {0, 0, 0};
-
- std::string char_list_file = FLAGS_char_list_file;
- if (FLAGS_benchmark)
- char_list_file = FLAGS_char_list_file.substr(6);
- cout << "label file: " << char_list_file << endl;
-
- CRNNRecognizer rec(FLAGS_rec_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
- FLAGS_gpu_mem, FLAGS_cpu_threads,
- FLAGS_enable_mkldnn, char_list_file,
- FLAGS_use_tensorrt, FLAGS_precision, FLAGS_rec_batch_num);
+ std::vector time_info = {0, 0, 0};
- std::vector img_list;
- for (int i = 0; i < cv_all_img_names.size(); ++i) {
- LOG(INFO) << "The predict img: " << cv_all_img_names[i];
+ std::string char_list_file = FLAGS_char_list_file;
+ if (FLAGS_benchmark)
+ char_list_file = FLAGS_char_list_file.substr(6);
+ cout << "label file: " << char_list_file << endl;
- cv::Mat srcimg = cv::imread(cv_all_img_names[i], cv::IMREAD_COLOR);
- if (!srcimg.data) {
- std::cerr << "[ERROR] image read failed! image path: " << cv_all_img_names[i] << endl;
- exit(1);
- }
- img_list.push_back(srcimg);
- }
- std::vector rec_times;
- rec.Run(img_list, &rec_times);
- time_info[0] += rec_times[0];
- time_info[1] += rec_times[1];
- time_info[2] += rec_times[2];
-
- if (FLAGS_benchmark) {
- AutoLogger autolog("ocr_rec",
- FLAGS_use_gpu,
- FLAGS_use_tensorrt,
- FLAGS_enable_mkldnn,
- FLAGS_cpu_threads,
- FLAGS_rec_batch_num,
- "dynamic",
- FLAGS_precision,
- time_info,
- cv_all_img_names.size());
- autolog.report();
+ CRNNRecognizer rec(FLAGS_rec_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
+ FLAGS_gpu_mem, FLAGS_cpu_threads, FLAGS_enable_mkldnn,
+ char_list_file, FLAGS_use_tensorrt, FLAGS_precision,
+ FLAGS_rec_batch_num);
+
+ std::vector img_list;
+ for (int i = 0; i < cv_all_img_names.size(); ++i) {
+ LOG(INFO) << "The predict img: " << cv_all_img_names[i];
+
+ cv::Mat srcimg = cv::imread(cv_all_img_names[i], cv::IMREAD_COLOR);
+ if (!srcimg.data) {
+ std::cerr << "[ERROR] image read failed! image path: "
+ << cv_all_img_names[i] << endl;
+ exit(1);
}
- return 0;
-}
+ img_list.push_back(srcimg);
+ }
+ std::vector rec_times;
+ rec.Run(img_list, &rec_times);
+ time_info[0] += rec_times[0];
+ time_info[1] += rec_times[1];
+ time_info[2] += rec_times[2];
+ if (FLAGS_benchmark) {
+ AutoLogger autolog("ocr_rec", FLAGS_use_gpu, FLAGS_use_tensorrt,
+ FLAGS_enable_mkldnn, FLAGS_cpu_threads,
+ FLAGS_rec_batch_num, "dynamic", FLAGS_precision,
+ time_info, cv_all_img_names.size());
+ autolog.report();
+ }
+ return 0;
+}
int main_system(std::vector cv_all_img_names) {
- std::vector time_info_det = {0, 0, 0};
- std::vector time_info_rec = {0, 0, 0};
-
- DBDetector det(FLAGS_det_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
- FLAGS_gpu_mem, FLAGS_cpu_threads,
- FLAGS_enable_mkldnn, FLAGS_max_side_len, FLAGS_det_db_thresh,
- FLAGS_det_db_box_thresh, FLAGS_det_db_unclip_ratio,
- FLAGS_use_polygon_score, FLAGS_visualize,
- FLAGS_use_tensorrt, FLAGS_precision);
-
- Classifier *cls = nullptr;
- if (FLAGS_use_angle_cls) {
- cls = new Classifier(FLAGS_cls_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
- FLAGS_gpu_mem, FLAGS_cpu_threads,
- FLAGS_enable_mkldnn, FLAGS_cls_thresh,
- FLAGS_use_tensorrt, FLAGS_precision);
- }
+ std::vector time_info_det = {0, 0, 0};
+ std::vector time_info_rec = {0, 0, 0};
- std::string char_list_file = FLAGS_char_list_file;
- if (FLAGS_benchmark)
- char_list_file = FLAGS_char_list_file.substr(6);
- cout << "label file: " << char_list_file << endl;
-
- CRNNRecognizer rec(FLAGS_rec_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
- FLAGS_gpu_mem, FLAGS_cpu_threads,
- FLAGS_enable_mkldnn, char_list_file,
- FLAGS_use_tensorrt, FLAGS_precision, FLAGS_rec_batch_num);
-
- for (int i = 0; i < cv_all_img_names.size(); ++i) {
- LOG(INFO) << "The predict img: " << cv_all_img_names[i];
-
- cv::Mat srcimg = cv::imread(cv_all_img_names[i], cv::IMREAD_COLOR);
- if (!srcimg.data) {
- std::cerr << "[ERROR] image read failed! image path: " << cv_all_img_names[i] << endl;
- exit(1);
- }
- std::vector>> boxes;
- std::vector det_times;
- std::vector rec_times;
-
- det.Run(srcimg, boxes, &det_times);
- time_info_det[0] += det_times[0];
- time_info_det[1] += det_times[1];
- time_info_det[2] += det_times[2];
-
- std::vector img_list;
- for (int j = 0; j < boxes.size(); j++) {
- cv::Mat crop_img;
- crop_img = Utility::GetRotateCropImage(srcimg, boxes[j]);
- if (cls != nullptr) {
- crop_img = cls->Run(crop_img);
- }
- img_list.push_back(crop_img);
- }
+ DBDetector det(FLAGS_det_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
+ FLAGS_gpu_mem, FLAGS_cpu_threads, FLAGS_enable_mkldnn,
+ FLAGS_max_side_len, FLAGS_det_db_thresh,
+ FLAGS_det_db_box_thresh, FLAGS_det_db_unclip_ratio,
+ FLAGS_use_polygon_score, FLAGS_visualize, FLAGS_use_tensorrt,
+ FLAGS_precision);
+
+ Classifier *cls = nullptr;
+ if (FLAGS_use_angle_cls) {
+ cls = new Classifier(FLAGS_cls_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
+ FLAGS_gpu_mem, FLAGS_cpu_threads, FLAGS_enable_mkldnn,
+ FLAGS_cls_thresh, FLAGS_use_tensorrt, FLAGS_precision);
+ }
+
+ std::string char_list_file = FLAGS_char_list_file;
+ if (FLAGS_benchmark)
+ char_list_file = FLAGS_char_list_file.substr(6);
+ cout << "label file: " << char_list_file << endl;
+
+ CRNNRecognizer rec(FLAGS_rec_model_dir, FLAGS_use_gpu, FLAGS_gpu_id,
+ FLAGS_gpu_mem, FLAGS_cpu_threads, FLAGS_enable_mkldnn,
+ char_list_file, FLAGS_use_tensorrt, FLAGS_precision,
+ FLAGS_rec_batch_num);
+
+ for (int i = 0; i < cv_all_img_names.size(); ++i) {
+ LOG(INFO) << "The predict img: " << cv_all_img_names[i];
- rec.Run(img_list, &rec_times);
- time_info_rec[0] += rec_times[0];
- time_info_rec[1] += rec_times[1];
- time_info_rec[2] += rec_times[2];
+ cv::Mat srcimg = cv::imread(cv_all_img_names[i], cv::IMREAD_COLOR);
+ if (!srcimg.data) {
+ std::cerr << "[ERROR] image read failed! image path: "
+ << cv_all_img_names[i] << endl;
+ exit(1);
}
-
- if (FLAGS_benchmark) {
- AutoLogger autolog_det("ocr_det",
- FLAGS_use_gpu,
- FLAGS_use_tensorrt,
- FLAGS_enable_mkldnn,
- FLAGS_cpu_threads,
- 1,
- "dynamic",
- FLAGS_precision,
- time_info_det,
- cv_all_img_names.size());
- AutoLogger autolog_rec("ocr_rec",
- FLAGS_use_gpu,
- FLAGS_use_tensorrt,
- FLAGS_enable_mkldnn,
- FLAGS_cpu_threads,
- FLAGS_rec_batch_num,
- "dynamic",
- FLAGS_precision,
- time_info_rec,
- cv_all_img_names.size());
- autolog_det.report();
- std::cout << endl;
- autolog_rec.report();
- }
- return 0;
-}
+ std::vector>> boxes;
+ std::vector det_times;
+ std::vector rec_times;
+ det.Run(srcimg, boxes, &det_times);
+ time_info_det[0] += det_times[0];
+ time_info_det[1] += det_times[1];
+ time_info_det[2] += det_times[2];
-void check_params(char* mode) {
- if (strcmp(mode, "det")==0) {
- if (FLAGS_det_model_dir.empty() || FLAGS_image_dir.empty()) {
- std::cout << "Usage[det]: ./ppocr --det_model_dir=/PATH/TO/DET_INFERENCE_MODEL/ "
- << "--image_dir=/PATH/TO/INPUT/IMAGE/" << std::endl;
- exit(1);
- }
+ std::vector img_list;
+ for (int j = 0; j < boxes.size(); j++) {
+ cv::Mat crop_img;
+ crop_img = Utility::GetRotateCropImage(srcimg, boxes[j]);
+ if (cls != nullptr) {
+ crop_img = cls->Run(crop_img);
+ }
+ img_list.push_back(crop_img);
}
- if (strcmp(mode, "rec")==0) {
- if (FLAGS_rec_model_dir.empty() || FLAGS_image_dir.empty()) {
- std::cout << "Usage[rec]: ./ppocr --rec_model_dir=/PATH/TO/REC_INFERENCE_MODEL/ "
- << "--image_dir=/PATH/TO/INPUT/IMAGE/" << std::endl;
- exit(1);
- }
+
+ rec.Run(img_list, &rec_times);
+ time_info_rec[0] += rec_times[0];
+ time_info_rec[1] += rec_times[1];
+ time_info_rec[2] += rec_times[2];
+ }
+
+ if (FLAGS_benchmark) {
+ AutoLogger autolog_det("ocr_det", FLAGS_use_gpu, FLAGS_use_tensorrt,
+ FLAGS_enable_mkldnn, FLAGS_cpu_threads, 1, "dynamic",
+ FLAGS_precision, time_info_det,
+ cv_all_img_names.size());
+ AutoLogger autolog_rec("ocr_rec", FLAGS_use_gpu, FLAGS_use_tensorrt,
+ FLAGS_enable_mkldnn, FLAGS_cpu_threads,
+ FLAGS_rec_batch_num, "dynamic", FLAGS_precision,
+ time_info_rec, cv_all_img_names.size());
+ autolog_det.report();
+ std::cout << endl;
+ autolog_rec.report();
+ }
+ return 0;
+}
+
+void check_params(char *mode) {
+ if (strcmp(mode, "det") == 0) {
+ if (FLAGS_det_model_dir.empty() || FLAGS_image_dir.empty()) {
+ std::cout << "Usage[det]: ./ppocr "
+ "--det_model_dir=/PATH/TO/DET_INFERENCE_MODEL/ "
+ << "--image_dir=/PATH/TO/INPUT/IMAGE/" << std::endl;
+ exit(1);
}
- if (strcmp(mode, "system")==0) {
- if ((FLAGS_det_model_dir.empty() || FLAGS_rec_model_dir.empty() || FLAGS_image_dir.empty()) ||
- (FLAGS_use_angle_cls && FLAGS_cls_model_dir.empty())) {
- std::cout << "Usage[system without angle cls]: ./ppocr --det_model_dir=/PATH/TO/DET_INFERENCE_MODEL/ "
- << "--rec_model_dir=/PATH/TO/REC_INFERENCE_MODEL/ "
- << "--image_dir=/PATH/TO/INPUT/IMAGE/" << std::endl;
- std::cout << "Usage[system with angle cls]: ./ppocr --det_model_dir=/PATH/TO/DET_INFERENCE_MODEL/ "
- << "--use_angle_cls=true "
- << "--cls_model_dir=/PATH/TO/CLS_INFERENCE_MODEL/ "
- << "--rec_model_dir=/PATH/TO/REC_INFERENCE_MODEL/ "
- << "--image_dir=/PATH/TO/INPUT/IMAGE/" << std::endl;
- exit(1);
- }
+ }
+ if (strcmp(mode, "rec") == 0) {
+ if (FLAGS_rec_model_dir.empty() || FLAGS_image_dir.empty()) {
+ std::cout << "Usage[rec]: ./ppocr "
+ "--rec_model_dir=/PATH/TO/REC_INFERENCE_MODEL/ "
+ << "--image_dir=/PATH/TO/INPUT/IMAGE/" << std::endl;
+ exit(1);
}
- if (FLAGS_precision != "fp32" && FLAGS_precision != "fp16" && FLAGS_precision != "int8") {
- cout << "precison should be 'fp32'(default), 'fp16' or 'int8'. " << endl;
- exit(1);
+ }
+ if (strcmp(mode, "system") == 0) {
+ if ((FLAGS_det_model_dir.empty() || FLAGS_rec_model_dir.empty() ||
+ FLAGS_image_dir.empty()) ||
+ (FLAGS_use_angle_cls && FLAGS_cls_model_dir.empty())) {
+ std::cout << "Usage[system without angle cls]: ./ppocr "
+ "--det_model_dir=/PATH/TO/DET_INFERENCE_MODEL/ "
+ << "--rec_model_dir=/PATH/TO/REC_INFERENCE_MODEL/ "
+ << "--image_dir=/PATH/TO/INPUT/IMAGE/" << std::endl;
+ std::cout << "Usage[system with angle cls]: ./ppocr "
+ "--det_model_dir=/PATH/TO/DET_INFERENCE_MODEL/ "
+ << "--use_angle_cls=true "
+ << "--cls_model_dir=/PATH/TO/CLS_INFERENCE_MODEL/ "
+ << "--rec_model_dir=/PATH/TO/REC_INFERENCE_MODEL/ "
+ << "--image_dir=/PATH/TO/INPUT/IMAGE/" << std::endl;
+ exit(1);
}
+ }
+ if (FLAGS_precision != "fp32" && FLAGS_precision != "fp16" &&
+ FLAGS_precision != "int8") {
+ cout << "precison should be 'fp32'(default), 'fp16' or 'int8'. " << endl;
+ exit(1);
+ }
}
-
int main(int argc, char **argv) {
- if (argc<=1 || (strcmp(argv[1], "det")!=0 && strcmp(argv[1], "rec")!=0 && strcmp(argv[1], "system")!=0)) {
- std::cout << "Please choose one mode of [det, rec, system] !" << std::endl;
- return -1;
- }
- std::cout << "mode: " << argv[1] << endl;
-
- // Parsing command-line
- google::ParseCommandLineFlags(&argc, &argv, true);
- check_params(argv[1]);
-
- if (!PathExists(FLAGS_image_dir)) {
- std::cerr << "[ERROR] image path not exist! image_dir: " << FLAGS_image_dir << endl;
- exit(1);
- }
-
- std::vector cv_all_img_names;
- cv::glob(FLAGS_image_dir, cv_all_img_names);
- std::cout << "total images num: " << cv_all_img_names.size() << endl;
-
- if (strcmp(argv[1], "det")==0) {
- return main_det(cv_all_img_names);
- }
- if (strcmp(argv[1], "rec")==0) {
- return main_rec(cv_all_img_names);
- }
- if (strcmp(argv[1], "system")==0) {
- return main_system(cv_all_img_names);
- }
+ if (argc <= 1 ||
+ (strcmp(argv[1], "det") != 0 && strcmp(argv[1], "rec") != 0 &&
+ strcmp(argv[1], "system") != 0)) {
+ std::cout << "Please choose one mode of [det, rec, system] !" << std::endl;
+ return -1;
+ }
+ std::cout << "mode: " << argv[1] << endl;
+
+ // Parsing command-line
+ google::ParseCommandLineFlags(&argc, &argv, true);
+ check_params(argv[1]);
+
+ if (!PathExists(FLAGS_image_dir)) {
+ std::cerr << "[ERROR] image path not exist! image_dir: " << FLAGS_image_dir
+ << endl;
+ exit(1);
+ }
+
+ std::vector cv_all_img_names;
+ cv::glob(FLAGS_image_dir, cv_all_img_names);
+ std::cout << "total images num: " << cv_all_img_names.size() << endl;
+ if (strcmp(argv[1], "det") == 0) {
+ return main_det(cv_all_img_names);
+ }
+ if (strcmp(argv[1], "rec") == 0) {
+ return main_rec(cv_all_img_names);
+ }
+ if (strcmp(argv[1], "system") == 0) {
+ return main_system(cv_all_img_names);
+ }
}