diff --git a/MANIFEST.in b/MANIFEST.in index 1ca129b15787978e5e890498eb4d6609d8a4cba0..d674fabc5d714f7e31ed00b32be2d44d6dd10871 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -5,4 +5,5 @@ recursive-include ppocr/utils *.txt utility.py logging.py network.py recursive-include ppocr/data *.py recursive-include ppocr/postprocess *.py recursive-include tools/infer *.py -recursive-include ppocr/utils/e2e_utils *.py \ No newline at end of file +recursive-include ppocr/utils/e2e_utils *.py +recursive-include ppstructure *.py \ No newline at end of file diff --git a/__init__.py b/__init__.py index 7d94f66be072067172d56da13d8bb27d9aeac431..e22e466a8426c437407c491bbae47c3b66defa2e 100644 --- a/__init__.py +++ b/__init__.py @@ -11,7 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import paddleocr +from .paddleocr import * -__all__ = ['PaddleOCR', 'draw_ocr'] -from .paddleocr import PaddleOCR -from .tools.infer.utility import draw_ocr +__version__ = paddleocr.VERSION +__all__ = ['PaddleOCR', 'PPStructure', 'draw_ocr', 'draw_structure_result', 'save_structure_res','download_with_progressbar'] diff --git a/configs/rec/rec_icdar15_train.yml b/configs/rec/rec_icdar15_train.yml index 20f758c20da9547486d7492d6a09cceb2b937e4e..500d2333f217008b2abf352b0ccd29a43ec24fd5 100644 --- a/configs/rec/rec_icdar15_train.yml +++ b/configs/rec/rec_icdar15_train.yml @@ -10,7 +10,7 @@ Global: cal_metric_during_train: True pretrained_model: checkpoints: - save_inference_dir: + save_inference_dir: ./ use_visualdl: False infer_img: doc/imgs_words_en/word_10.png # for data or label process @@ -60,8 +60,8 @@ Metric: Train: dataset: name: SimpleDataSet - data_dir: ./train_data/ - label_file_list: ["./train_data/train_list.txt"] + data_dir: ./train_data/ic15_data/ + label_file_list: ["./train_data/ic15_data/rec_gt_train.txt"] transforms: - DecodeImage: # load image img_mode: BGR @@ -81,8 +81,8 @@ Train: Eval: dataset: name: SimpleDataSet - data_dir: ./train_data/ - label_file_list: ["./train_data/val_list.txt"] + data_dir: ./train_data/ic15_data + label_file_list: ["./train_data/ic15_data/rec_gt_test.txt"] transforms: - DecodeImage: # load image img_mode: BGR diff --git a/deploy/cpp_infer/CMakeLists.txt b/deploy/cpp_infer/CMakeLists.txt index 4f2dc88a8f1a9d185b7274a1a723c0e670bd1bf1..0cf20635f8849cbb405118fd0e2fa8538eb3fa06 100644 --- a/deploy/cpp_infer/CMakeLists.txt +++ b/deploy/cpp_infer/CMakeLists.txt @@ -37,10 +37,8 @@ endif() if (WIN32) - include_directories("${PADDLE_LIB}/paddle/fluid/inference") include_directories("${PADDLE_LIB}/paddle/include") link_directories("${PADDLE_LIB}/paddle/lib") - link_directories("${PADDLE_LIB}/paddle/fluid/inference") find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/build/ NO_DEFAULT_PATH) else () diff --git a/deploy/cpp_infer/docs/windows_vs2019_build.md b/deploy/cpp_infer/docs/windows_vs2019_build.md index be3dd3833f57d5592f7074e8149cbe9fdbe7ade1..e46f542a323dbe539b4a7f596e4587f7729a4420 100644 --- a/deploy/cpp_infer/docs/windows_vs2019_build.md +++ b/deploy/cpp_infer/docs/windows_vs2019_build.md @@ -14,7 +14,7 @@ PaddleOCR在Windows 平台下基于`Visual Studio 2019 Community` 进行了测 ### Step1: 下载PaddlePaddle C++ 预测库 fluid_inference -PaddlePaddle C++ 预测库针对不同的`CPU`和`CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/05_inference_deployment/inference/windows_cpp_inference.html) +PaddlePaddle C++ 预测库针对不同的`CPU`和`CUDA`版本提供了不同的预编译版本,请根据实际情况下载: [C++预测库下载列表](https://paddleinference.paddlepaddle.org.cn/user_guides/download_lib.html#windows) 解压后`D:\projects\fluid_inference`目录包含内容为: ``` diff --git a/deploy/cpp_infer/src/config.cpp b/deploy/cpp_infer/src/config.cpp index a0ac1d08c93d1ff1e51f769465b2df2b4355fbc0..050b75ede9475432f250cafac2cd5fbed17fea0a 100644 --- a/deploy/cpp_infer/src/config.cpp +++ b/deploy/cpp_infer/src/config.cpp @@ -21,12 +21,18 @@ std::vector OCRConfig::split(const std::string &str, std::vector res; if ("" == str) return res; - char strs[str.length() + 1]; + + int strlen = str.length() + 1; + chars *strs = new char[strlen]; std::strcpy(strs, str.c_str()); - char d[delim.length() + 1]; + int delimlen = delim.length() + 1; + char *d = new char[delimlen]; std::strcpy(d, delim.c_str()); + delete[] strs; + delete[] d; + char *p = std::strtok(strs, d); while (p) { std::string s = p; diff --git a/doc/doc_ch/whl.md b/doc/doc_ch/whl.md index 957c6926b15fad3091265da9295f5ad820fe6a26..167ed7b2b8a13706dfe1533265b6d96560265511 100644 --- a/doc/doc_ch/whl.md +++ b/doc/doc_ch/whl.md @@ -5,26 +5,32 @@ ### 1.1 安装whl包 pip安装 + ```bash pip install "paddleocr>=2.0.1" # 推荐使用2.0.1+版本 ``` 本地构建并安装 + ```bash python3 setup.py bdist_wheel pip3 install dist/paddleocr-x.x.x-py3-none-any.whl # x.x.x是paddleocr的版本号 ``` ## 2 使用 + ### 2.1 代码使用 + paddleocr whl包会自动下载ppocr轻量级模型作为默认模型,可以根据第3节**自定义模型**进行自定义更换。 * 检测+方向分类器+识别全流程 + ```python from paddleocr import PaddleOCR, draw_ocr + # Paddleocr目前支持中英文、英文、法语、德语、韩语、日语,可以通过修改lang参数进行切换 # 参数依次为`ch`, `en`, `french`, `german`, `korean`, `japan`。 -ocr = PaddleOCR(use_angle_cls=True, lang="ch") # need to run only once to download and load model into memory +ocr = PaddleOCR(use_angle_cls=True, lang="ch") # need to run only once to download and load model into memory img_path = 'PaddleOCR/doc/imgs/11.jpg' result = ocr.ocr(img_path, cls=True) for line in result: @@ -32,6 +38,7 @@ for line in result: # 显示结果 from PIL import Image + image = Image.open(img_path).convert('RGB') boxes = [line[0] for line in result] txts = [line[1][0] for line in result] @@ -40,31 +47,36 @@ im_show = draw_ocr(image, boxes, txts, scores, font_path='/path/to/PaddleOCR/doc im_show = Image.fromarray(im_show) im_show.save('result.jpg') ``` + 结果是一个list,每个item包含了文本框,文字和识别置信度 + ```bash [[[24.0, 36.0], [304.0, 34.0], [304.0, 72.0], [24.0, 74.0]], ['纯臻营养护发素', 0.964739]] [[[24.0, 80.0], [172.0, 80.0], [172.0, 104.0], [24.0, 104.0]], ['产品信息/参数', 0.98069626]] [[[24.0, 109.0], [333.0, 109.0], [333.0, 136.0], [24.0, 136.0]], ['(45元/每公斤,100公斤起订)', 0.9676722]] ...... ``` + 结果可视化
- * 检测+识别 + ```python from paddleocr import PaddleOCR, draw_ocr -ocr = PaddleOCR() # need to run only once to download and load model into memory + +ocr = PaddleOCR() # need to run only once to download and load model into memory img_path = 'PaddleOCR/doc/imgs/11.jpg' -result = ocr.ocr(img_path,cls=False) +result = ocr.ocr(img_path, cls=False) for line in result: print(line) # 显示结果 from PIL import Image + image = Image.open(img_path).convert('RGB') boxes = [line[0] for line in result] txts = [line[1][0] for line in result] @@ -73,38 +85,46 @@ im_show = draw_ocr(image, boxes, txts, scores, font_path='/path/to/PaddleOCR/doc im_show = Image.fromarray(im_show) im_show.save('result.jpg') ``` + 结果是一个list,每个item包含了文本框,文字和识别置信度 + ```bash [[[24.0, 36.0], [304.0, 34.0], [304.0, 72.0], [24.0, 74.0]], ['纯臻营养护发素', 0.964739]] [[[24.0, 80.0], [172.0, 80.0], [172.0, 104.0], [24.0, 104.0]], ['产品信息/参数', 0.98069626]] [[[24.0, 109.0], [333.0, 109.0], [333.0, 136.0], [24.0, 136.0]], ['(45元/每公斤,100公斤起订)', 0.9676722]] ...... ``` + 结果可视化
- * 方向分类器+识别 + ```python from paddleocr import PaddleOCR -ocr = PaddleOCR(use_angle_cls=True) # need to run only once to download and load model into memory + +ocr = PaddleOCR(use_angle_cls=True) # need to run only once to download and load model into memory img_path = 'PaddleOCR/doc/imgs_words/ch/word_1.jpg' result = ocr.ocr(img_path, det=False, cls=True) for line in result: print(line) ``` + 结果是一个list,每个item只包含识别结果和识别置信度 + ```bash ['韩国小馆', 0.9907421] ``` * 单独执行检测 + ```python from paddleocr import PaddleOCR, draw_ocr -ocr = PaddleOCR() # need to run only once to download and load model into memory + +ocr = PaddleOCR() # need to run only once to download and load model into memory img_path = 'PaddleOCR/doc/imgs/11.jpg' result = ocr.ocr(img_path, rec=False) for line in result: @@ -118,13 +138,16 @@ im_show = draw_ocr(image, result, txts=None, scores=None, font_path='/path/to/Pa im_show = Image.fromarray(im_show) im_show.save('result.jpg') ``` + 结果是一个list,每个item只包含文本框 + ```bash [[26.0, 457.0], [137.0, 457.0], [137.0, 477.0], [26.0, 477.0]] [[25.0, 425.0], [372.0, 425.0], [372.0, 448.0], [25.0, 448.0]] [[128.0, 397.0], [273.0, 397.0], [273.0, 414.0], [128.0, 414.0]] ...... ``` + 结果可视化 @@ -133,29 +156,37 @@ im_show.save('result.jpg') * 单独执行识别 + ```python from paddleocr import PaddleOCR -ocr = PaddleOCR() # need to run only once to download and load model into memory + +ocr = PaddleOCR() # need to run only once to download and load model into memory img_path = 'PaddleOCR/doc/imgs_words/ch/word_1.jpg' result = ocr.ocr(img_path, det=False) for line in result: print(line) ``` + 结果是一个list,每个item只包含识别结果和识别置信度 + ```bash ['韩国小馆', 0.9907421] ``` * 单独执行方向分类器 + ```python from paddleocr import PaddleOCR -ocr = PaddleOCR(use_angle_cls=True) # need to run only once to download and load model into memory + +ocr = PaddleOCR(use_angle_cls=True) # need to run only once to download and load model into memory img_path = 'PaddleOCR/doc/imgs_words/ch/word_1.jpg' result = ocr.ocr(img_path, det=False, rec=False, cls=True) for line in result: print(line) ``` + 结果是一个list,每个item只包含分类结果和分类置信度 + ```bash ['0', 0.9999924] ``` @@ -163,15 +194,19 @@ for line in result: ### 2.2 通过命令行使用 查看帮助信息 + ```bash paddleocr -h ``` * 检测+方向分类器+识别全流程 + ```bash paddleocr --image_dir PaddleOCR/doc/imgs/11.jpg --use_angle_cls true ``` + 结果是一个list,每个item包含了文本框,文字和识别置信度 + ```bash [[[24.0, 36.0], [304.0, 34.0], [304.0, 72.0], [24.0, 74.0]], ['纯臻营养护发素', 0.964739]] [[[24.0, 80.0], [172.0, 80.0], [172.0, 104.0], [24.0, 104.0]], ['产品信息/参数', 0.98069626]] @@ -180,10 +215,13 @@ paddleocr --image_dir PaddleOCR/doc/imgs/11.jpg --use_angle_cls true ``` * 检测+识别 + ```bash paddleocr --image_dir PaddleOCR/doc/imgs/11.jpg ``` + 结果是一个list,每个item包含了文本框,文字和识别置信度 + ```bash [[[24.0, 36.0], [304.0, 34.0], [304.0, 72.0], [24.0, 74.0]], ['纯臻营养护发素', 0.964739]] [[[24.0, 80.0], [172.0, 80.0], [172.0, 104.0], [24.0, 104.0]], ['产品信息/参数', 0.98069626]] @@ -192,20 +230,25 @@ paddleocr --image_dir PaddleOCR/doc/imgs/11.jpg ``` * 方向分类器+识别 + ```bash paddleocr --image_dir PaddleOCR/doc/imgs_words/ch/word_1.jpg --use_angle_cls true --det false ``` 结果是一个list,每个item只包含识别结果和识别置信度 + ```bash ['韩国小馆', 0.9907421] ``` * 单独执行检测 + ```bash paddleocr --image_dir PaddleOCR/doc/imgs/11.jpg --rec false ``` + 结果是一个list,每个item只包含文本框 + ```bash [[26.0, 457.0], [137.0, 457.0], [137.0, 477.0], [26.0, 477.0]] [[25.0, 425.0], [372.0, 425.0], [372.0, 448.0], [25.0, 448.0]] @@ -214,34 +257,42 @@ paddleocr --image_dir PaddleOCR/doc/imgs/11.jpg --rec false ``` * 单独执行识别 + ```bash paddleocr --image_dir PaddleOCR/doc/imgs_words/ch/word_1.jpg --det false ``` 结果是一个list,每个item只包含识别结果和识别置信度 + ```bash ['韩国小馆', 0.9907421] ``` * 单独执行方向分类器 + ```bash paddleocr --image_dir PaddleOCR/doc/imgs_words/ch/word_1.jpg --use_angle_cls true --det false --rec false ``` 结果是一个list,每个item只包含分类结果和分类置信度 + ```bash ['0', 0.9999924] ``` ## 3 自定义模型 -当内置模型无法满足需求时,需要使用到自己训练的模型。 -首先,参照[inference.md](./inference.md) 第一节转换将检测、分类和识别模型转换为inference模型,然后按照如下方式使用 + +当内置模型无法满足需求时,需要使用到自己训练的模型。 首先,参照[inference.md](./inference.md) 第一节转换将检测、分类和识别模型转换为inference模型,然后按照如下方式使用 ### 3.1 代码使用 + ```python from paddleocr import PaddleOCR, draw_ocr + # 模型路径下必须含有model和params文件 -ocr = PaddleOCR(det_model_dir='{your_det_model_dir}', rec_model_dir='{your_rec_model_dir}', rec_char_dict_path='{your_rec_char_dict_path}', cls_model_dir='{your_cls_model_dir}', use_angle_cls=True) +ocr = PaddleOCR(det_model_dir='{your_det_model_dir}', rec_model_dir='{your_rec_model_dir}', + rec_char_dict_path='{your_rec_char_dict_path}', cls_model_dir='{your_cls_model_dir}', + use_angle_cls=True) img_path = 'PaddleOCR/doc/imgs/11.jpg' result = ocr.ocr(img_path, cls=True) for line in result: @@ -249,6 +300,7 @@ for line in result: # 显示结果 from PIL import Image + image = Image.open(img_path).convert('RGB') boxes = [line[0] for line in result] txts = [line[1][0] for line in result] @@ -269,11 +321,13 @@ paddleocr --image_dir PaddleOCR/doc/imgs/11.jpg --det_model_dir {your_det_model_ ### 4.1 网络图片 - 代码使用 + ```python -from paddleocr import PaddleOCR, draw_ocr +from paddleocr import PaddleOCR, draw_ocr, download_with_progressbar + # Paddleocr目前支持中英文、英文、法语、德语、韩语、日语,可以通过修改lang参数进行切换 # 参数依次为`ch`, `en`, `french`, `german`, `korean`, `japan`。 -ocr = PaddleOCR(use_angle_cls=True, lang="ch") # need to run only once to download and load model into memory +ocr = PaddleOCR(use_angle_cls=True, lang="ch") # need to run only once to download and load model into memory img_path = 'http://n.sinaimg.cn/ent/transform/w630h933/20171222/o111-fypvuqf1838418.jpg' result = ocr.ocr(img_path, cls=True) for line in result: @@ -281,7 +335,9 @@ for line in result: # 显示结果 from PIL import Image -image = Image.open(img_path).convert('RGB') + +download_with_progressbar(img_path, 'tmp.jpg') +image = Image.open('tmp.jpg').convert('RGB') boxes = [line[0] for line in result] txts = [line[1][0] for line in result] scores = [line[1][1] for line in result] @@ -289,18 +345,24 @@ im_show = draw_ocr(image, boxes, txts, scores, font_path='/path/to/PaddleOCR/doc im_show = Image.fromarray(im_show) im_show.save('result.jpg') ``` + - 命令行模式 + ```bash paddleocr --image_dir http://n.sinaimg.cn/ent/transform/w630h933/20171222/o111-fypvuqf1838418.jpg --use_angle_cls=true ``` ### 4.2 numpy数组 + 仅通过代码使用时支持numpy数组作为输入 + ```python +import cv2 from paddleocr import PaddleOCR, draw_ocr + # Paddleocr目前支持中英文、英文、法语、德语、韩语、日语,可以通过修改lang参数进行切换 # 参数依次为`ch`, `en`, `french`, `german`, `korean`, `japan`。 -ocr = PaddleOCR(use_angle_cls=True, lang="ch") # need to run only once to download and load model into memory +ocr = PaddleOCR(use_angle_cls=True, lang="ch") # need to run only once to download and load model into memory img_path = 'PaddleOCR/doc/imgs/11.jpg' img = cv2.imread(img_path) # img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY), 如果你自己训练的模型支持灰度图,可以将这句话的注释取消 @@ -310,6 +372,7 @@ for line in result: # 显示结果 from PIL import Image + image = Image.open(img_path).convert('RGB') boxes = [line[0] for line in result] txts = [line[1][0] for line in result] @@ -356,3 +419,4 @@ im_show.save('result.jpg') | rec | 前向时是否启动识别 | TRUE | | cls | 前向时是否启动分类 (命令行模式下使用use_angle_cls控制前向是否启动分类) | FALSE | | show_log | 是否打印det和rec等信息 | FALSE | +| type | 执行ocr或者表格结构化, 值可选['ocr','structure'] | ocr | diff --git a/doc/doc_en/whl_en.md b/doc/doc_en/whl_en.md index b9909f498e830309eaad952df9171cd63b6f5e7b..c8c8353accdf7f6ce179d3700547bfe9bd70c200 100644 --- a/doc/doc_en/whl_en.md +++ b/doc/doc_en/whl_en.md @@ -305,7 +305,8 @@ paddleocr --image_dir http://n.sinaimg.cn/ent/transform/w630h933/20171222/o111-f Support numpy array as input only when used by code ```python -from paddleocr import PaddleOCR, draw_ocr +import cv2 +from paddleocr import PaddleOCR, draw_ocr, download_with_progressbar ocr = PaddleOCR(use_angle_cls=True, lang="ch") # need to run only once to download and load model into memory img_path = 'PaddleOCR/doc/imgs/11.jpg' img = cv2.imread(img_path) @@ -316,7 +317,9 @@ for line in result: # show result from PIL import Image -image = Image.open(img_path).convert('RGB') + +download_with_progressbar(img_path, 'tmp.jpg') +image = Image.open('tmp.jpg').convert('RGB') boxes = [line[0] for line in result] txts = [line[1][0] for line in result] scores = [line[1][1] for line in result] @@ -362,5 +365,5 @@ im_show.save('result.jpg') | det | Enable detction when `ppocr.ocr` func exec | TRUE | | rec | Enable recognition when `ppocr.ocr` func exec | TRUE | | cls | Enable classification when `ppocr.ocr` func exec((Use use_angle_cls in command line mode to control whether to start classification in the forward direction) | FALSE | -| show_log | Whether to print log in det and rec - | FALSE | \ No newline at end of file +| show_log | Whether to print log in det and rec | FALSE | +| type | Perform ocr or table structuring, the value is selected in ['ocr','structure'] | ocr | \ No newline at end of file diff --git a/doc/table/ppstructure.GIF b/doc/table/ppstructure.GIF new file mode 100644 index 0000000000000000000000000000000000000000..bff836e3ea53d447c948309de56ac5d2ad553264 Binary files /dev/null and b/doc/table/ppstructure.GIF differ diff --git a/doc/table/table.jpg b/doc/table/table.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3daa619e52dc2471df62ea7767be3bff350b623f Binary files /dev/null and b/doc/table/table.jpg differ diff --git a/paddleocr.py b/paddleocr.py index f2a3496897c07f8d969b198441076ef992774c19..c52737f55b61cd29c08367adb6d7e05c561e933e 100644 --- a/paddleocr.py +++ b/paddleocr.py @@ -29,16 +29,19 @@ from ppocr.utils.logging import get_logger logger = get_logger() from ppocr.utils.utility import check_and_read_gif, get_image_file_list from ppocr.utils.network import maybe_download, download_with_progressbar, is_link, confirm_model_dir_url -from tools.infer.utility import draw_ocr, init_args, str2bool +from tools.infer.utility import draw_ocr, str2bool +from ppstructure.utility import init_args, draw_structure_result +from ppstructure.predict_system import OCRSystem, save_structure_res -__all__ = ['PaddleOCR'] +__all__ = ['PaddleOCR', 'PPStructure', 'draw_ocr', 'draw_structure_result', 'save_structure_res','download_with_progressbar'] model_urls = { 'det': { 'ch': 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar', 'en': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/en_ppocr_mobile_v2.0_det_infer.tar' + 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/en_ppocr_mobile_v2.0_det_infer.tar', + 'structure': 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar' }, 'rec': { 'ch': { @@ -110,14 +113,21 @@ model_urls = { 'url': 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/multilingual/devanagari_ppocr_mobile_v2.0_rec_infer.tar', 'dict_path': './ppocr/utils/dict/devanagari_dict.txt' + }, + 'structure': { + 'url': 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar', + 'dict_path': 'ppocr/utils/dict/table_dict.txt' } }, - 'cls': - 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar' + 'cls': 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar', + 'table': { + 'url': 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar', + 'dict_path': 'ppocr/utils/dict/table_structure_dict.txt' + } } SUPPORT_DET_MODEL = ['DB'] -VERSION = '2.1' +VERSION = '2.2' SUPPORT_REC_MODEL = ['CRNN'] BASE_DIR = os.path.expanduser("~/.paddleocr/") @@ -129,9 +139,10 @@ def parse_args(mMain=True): parser.add_argument("--lang", type=str, default='ch') parser.add_argument("--det", type=str2bool, default=True) parser.add_argument("--rec", type=str2bool, default=True) + parser.add_argument("--type", type=str, default='ocr') for action in parser._actions: - if action.dest == 'rec_char_dict_path': + if action.dest in ['rec_char_dict_path', 'table_char_dict_path']: action.default = None if mMain: return parser.parse_args() @@ -142,6 +153,42 @@ def parse_args(mMain=True): return argparse.Namespace(**inference_args_dict) +def parse_lang(lang): + latin_lang = [ + 'af', 'az', 'bs', 'cs', 'cy', 'da', 'de', 'es', 'et', 'fr', 'ga', + 'hr', 'hu', 'id', 'is', 'it', 'ku', 'la', 'lt', 'lv', 'mi', 'ms', + 'mt', 'nl', 'no', 'oc', 'pi', 'pl', 'pt', 'ro', 'rs_latin', 'sk', + 'sl', 'sq', 'sv', 'sw', 'tl', 'tr', 'uz', 'vi' + ] + arabic_lang = ['ar', 'fa', 'ug', 'ur'] + cyrillic_lang = [ + 'ru', 'rs_cyrillic', 'be', 'bg', 'uk', 'mn', 'abq', 'ady', 'kbd', + 'ava', 'dar', 'inh', 'che', 'lbe', 'lez', 'tab' + ] + devanagari_lang = [ + 'hi', 'mr', 'ne', 'bh', 'mai', 'ang', 'bho', 'mah', 'sck', 'new', + 'gom', 'sa', 'bgc' + ] + if lang in latin_lang: + lang = "latin" + elif lang in arabic_lang: + lang = "arabic" + elif lang in cyrillic_lang: + lang = "cyrillic" + elif lang in devanagari_lang: + lang = "devanagari" + assert lang in model_urls[ + 'rec'], 'param lang must in {}, but got {}'.format( + model_urls['rec'].keys(), lang) + if lang == "ch": + det_lang = "ch" + elif lang == 'structure': + det_lang = 'structure' + else: + det_lang = "en" + return lang, det_lang + + class PaddleOCR(predict_system.TextSystem): def __init__(self, **kwargs): """ @@ -154,52 +201,17 @@ class PaddleOCR(predict_system.TextSystem): if not params.show_log: logger.setLevel(logging.INFO) self.use_angle_cls = params.use_angle_cls - lang = params.lang - latin_lang = [ - 'af', 'az', 'bs', 'cs', 'cy', 'da', 'de', 'es', 'et', 'fr', 'ga', - 'hr', 'hu', 'id', 'is', 'it', 'ku', 'la', 'lt', 'lv', 'mi', 'ms', - 'mt', 'nl', 'no', 'oc', 'pi', 'pl', 'pt', 'ro', 'rs_latin', 'sk', - 'sl', 'sq', 'sv', 'sw', 'tl', 'tr', 'uz', 'vi' - ] - arabic_lang = ['ar', 'fa', 'ug', 'ur'] - cyrillic_lang = [ - 'ru', 'rs_cyrillic', 'be', 'bg', 'uk', 'mn', 'abq', 'ady', 'kbd', - 'ava', 'dar', 'inh', 'che', 'lbe', 'lez', 'tab' - ] - devanagari_lang = [ - 'hi', 'mr', 'ne', 'bh', 'mai', 'ang', 'bho', 'mah', 'sck', 'new', - 'gom', 'sa', 'bgc' - ] - if lang in latin_lang: - lang = "latin" - elif lang in arabic_lang: - lang = "arabic" - elif lang in cyrillic_lang: - lang = "cyrillic" - elif lang in devanagari_lang: - lang = "devanagari" - assert lang in model_urls[ - 'rec'], 'param lang must in {}, but got {}'.format( - model_urls['rec'].keys(), lang) - if lang == "ch": - det_lang = "ch" - else: - det_lang = "en" - use_inner_dict = False - if params.rec_char_dict_path is None: - use_inner_dict = True - params.rec_char_dict_path = model_urls['rec'][lang][ - 'dict_path'] + lang, det_lang = parse_lang(params.lang) # init model dir params.det_model_dir, det_url = confirm_model_dir_url(params.det_model_dir, - os.path.join(BASE_DIR, VERSION, 'det', det_lang), + os.path.join(BASE_DIR, VERSION, 'ocr', 'det', det_lang), model_urls['det'][det_lang]) params.rec_model_dir, rec_url = confirm_model_dir_url(params.rec_model_dir, - os.path.join(BASE_DIR, VERSION, 'rec', lang), + os.path.join(BASE_DIR, VERSION, 'ocr', 'rec', lang), model_urls['rec'][lang]['url']) params.cls_model_dir, cls_url = confirm_model_dir_url(params.cls_model_dir, - os.path.join(BASE_DIR, VERSION, 'cls'), + os.path.join(BASE_DIR, VERSION, 'ocr', 'cls'), model_urls['cls']) # download model maybe_download(params.det_model_dir, det_url) @@ -212,9 +224,9 @@ class PaddleOCR(predict_system.TextSystem): if params.rec_algorithm not in SUPPORT_REC_MODEL: logger.error('rec_algorithm must in {}'.format(SUPPORT_REC_MODEL)) sys.exit(0) - if use_inner_dict: - params.rec_char_dict_path = str( - Path(__file__).parent / params.rec_char_dict_path) + + if params.rec_char_dict_path is None: + params.rec_char_dict_path = str(Path(__file__).parent / model_urls['rec'][lang]['dict_path']) print(params) # init det_model and rec_model @@ -272,6 +284,59 @@ class PaddleOCR(predict_system.TextSystem): return rec_res +class PPStructure(OCRSystem): + def __init__(self, **kwargs): + params = parse_args(mMain=False) + params.__dict__.update(**kwargs) + if not params.show_log: + logger.setLevel(logging.INFO) + lang, det_lang = parse_lang(params.lang) + + # init model dir + params.det_model_dir, det_url = confirm_model_dir_url(params.det_model_dir, + os.path.join(BASE_DIR, VERSION, 'ocr', 'det', det_lang), + model_urls['det'][det_lang]) + params.rec_model_dir, rec_url = confirm_model_dir_url(params.rec_model_dir, + os.path.join(BASE_DIR, VERSION, 'ocr', 'rec', lang), + model_urls['rec'][lang]['url']) + params.table_model_dir, table_url = confirm_model_dir_url(params.table_model_dir, + os.path.join(BASE_DIR, VERSION, 'ocr', 'table'), + model_urls['table']['url']) + # download model + maybe_download(params.det_model_dir, det_url) + maybe_download(params.rec_model_dir, rec_url) + maybe_download(params.table_model_dir, table_url) + + if params.rec_char_dict_path is None: + params.rec_char_dict_path = str(Path(__file__).parent / model_urls['rec'][lang]['dict_path']) + if params.table_char_dict_path is None: + params.table_char_dict_path = str(Path(__file__).parent / model_urls['table']['dict_path']) + + print(params) + super().__init__(params) + + def __call__(self, img): + if isinstance(img, str): + # download net image + if img.startswith('http'): + download_with_progressbar(img, 'tmp.jpg') + img = 'tmp.jpg' + image_file = img + img, flag = check_and_read_gif(image_file) + if not flag: + with open(image_file, 'rb') as f: + np_arr = np.frombuffer(f.read(), dtype=np.uint8) + img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) + if img is None: + logger.error("error in loading image:{}".format(image_file)) + return None + if isinstance(img, np.ndarray) and len(img.shape) == 2: + img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) + + res = super().__call__(img) + return res + + def main(): # for cmd args = parse_args(mMain=True) @@ -284,14 +349,29 @@ def main(): if len(image_file_list) == 0: logger.error('no images find in {}'.format(args.image_dir)) return + if args.type == 'ocr': + engine = PaddleOCR(**(args.__dict__)) + elif args.type == 'structure': + engine = PPStructure(**(args.__dict__)) + else: + raise NotImplementedError - ocr_engine = PaddleOCR(**(args.__dict__)) for img_path in image_file_list: + img_name = os.path.basename(img_path).split('.')[0] logger.info('{}{}{}'.format('*' * 10, img_path, '*' * 10)) - result = ocr_engine.ocr(img_path, + if args.type == 'ocr': + result = engine.ocr(img_path, det=args.det, rec=args.rec, cls=args.use_angle_cls) - if result is not None: - for line in result: - logger.info(line) + if result is not None: + for line in result: + logger.info(line) + elif args.type == 'structure': + result = engine(img_path) + save_structure_res(result, args.output, img_name) + + for item in result: + item.pop('img') + logger.info(item) + diff --git a/ppocr/data/imaug/label_ops.py b/ppocr/data/imaug/label_ops.py index e25cce79b553f127afc0167f18b6f663ceb617d7..d222c4109c3723bc1adb71ee7c21a27a010f8f45 100644 --- a/ppocr/data/imaug/label_ops.py +++ b/ppocr/data/imaug/label_ops.py @@ -19,6 +19,7 @@ from __future__ import unicode_literals import numpy as np import string +import json class ClsLabelEncode(object): @@ -39,7 +40,6 @@ class DetLabelEncode(object): pass def __call__(self, data): - import json label = data['label'] label = json.loads(label) nBox = len(label) @@ -53,6 +53,8 @@ class DetLabelEncode(object): txt_tags.append(True) else: txt_tags.append(False) + if len(boxes) == 0: + return None boxes = self.expand_points_num(boxes) boxes = np.array(boxes, dtype=np.float32) txt_tags = np.array(txt_tags, dtype=np.bool) @@ -352,19 +354,22 @@ class SRNLabelEncode(BaseRecLabelEncode): % beg_or_end return idx + class TableLabelEncode(object): """ Convert between text-label and text-index """ - def __init__(self, - max_text_length, - max_elem_length, - max_cell_num, - character_dict_path, - span_weight = 1.0, - **kwargs): + + def __init__(self, + max_text_length, + max_elem_length, + max_cell_num, + character_dict_path, + span_weight=1.0, + **kwargs): self.max_text_length = max_text_length self.max_elem_length = max_elem_length self.max_cell_num = max_cell_num - list_character, list_elem = self.load_char_elem_dict(character_dict_path) + list_character, list_elem = self.load_char_elem_dict( + character_dict_path) list_character = self.add_special_char(list_character) list_elem = self.add_special_char(list_elem) self.dict_character = {} @@ -374,36 +379,37 @@ class TableLabelEncode(object): for i, elem in enumerate(list_elem): self.dict_elem[elem] = i self.span_weight = span_weight - + def load_char_elem_dict(self, character_dict_path): list_character = [] list_elem = [] with open(character_dict_path, "rb") as fin: lines = fin.readlines() - substr = lines[0].decode('utf-8').strip("\n").split("\t") + substr = lines[0].decode('utf-8').strip("\r\n").split("\t") character_num = int(substr[0]) elem_num = int(substr[1]) - for cno in range(1, 1+character_num): - character = lines[cno].decode('utf-8').strip("\n") + + for cno in range(1, 1 + character_num): + character = lines[cno].decode('utf-8').strip("\r\n") list_character.append(character) - for eno in range(1+character_num, 1+character_num+elem_num): - elem = lines[eno].decode('utf-8').strip("\n") + for eno in range(1 + character_num, 1 + character_num + elem_num): + elem = lines[eno].decode('utf-8').strip("\r\n") list_elem.append(elem) return list_character, list_elem - + def add_special_char(self, list_character): self.beg_str = "sos" self.end_str = "eos" list_character = [self.beg_str] + list_character + [self.end_str] return list_character - + def get_span_idx_list(self): span_idx_list = [] for elem in self.dict_elem: if 'span' in elem: span_idx_list.append(self.dict_elem[elem]) return span_idx_list - + def __call__(self, data): cells = data['cells'] structure = data['structure']['tokens'] @@ -412,18 +418,22 @@ class TableLabelEncode(object): return None elem_num = len(structure) structure = [0] + structure + [len(self.dict_elem) - 1] - structure = structure + [0] * (self.max_elem_length + 2 - len(structure)) + structure = structure + [0] * (self.max_elem_length + 2 - len(structure) + ) structure = np.array(structure) data['structure'] = structure elem_char_idx1 = self.dict_elem[''] elem_char_idx2 = self.dict_elem[' 0: span_weight = len(td_idx_list) * 1.0 / len(span_idx_list) @@ -450,9 +460,11 @@ class TableLabelEncode(object): char_end_idx = self.get_beg_end_flag_idx('end', 'char') elem_beg_idx = self.get_beg_end_flag_idx('beg', 'elem') elem_end_idx = self.get_beg_end_flag_idx('end', 'elem') - data['sp_tokens'] = np.array([char_beg_idx, char_end_idx, elem_beg_idx, - elem_end_idx, elem_char_idx1, elem_char_idx2, self.max_text_length, - self.max_elem_length, self.max_cell_num, elem_num]) + data['sp_tokens'] = np.array([ + char_beg_idx, char_end_idx, elem_beg_idx, elem_end_idx, + elem_char_idx1, elem_char_idx2, self.max_text_length, + self.max_elem_length, self.max_cell_num, elem_num + ]) return data def encode(self, text, char_or_elem): @@ -504,9 +516,8 @@ class TableLabelEncode(object): idx = np.array(self.dict_elem[self.end_str]) else: assert False, "Unsupport type %s in get_beg_end_flag_idx of elem" \ - % beg_or_end + % beg_or_end else: assert False, "Unsupport type %s in char_or_elem" \ - % char_or_elem + % char_or_elem return idx - \ No newline at end of file diff --git a/ppstructure/MANIFEST.in b/ppstructure/MANIFEST.in deleted file mode 100644 index 713e4b06f3ac924070afe53de9c2ec48726185e6..0000000000000000000000000000000000000000 --- a/ppstructure/MANIFEST.in +++ /dev/null @@ -1,9 +0,0 @@ -include LICENSE -include README.md - -recursive-include ppocr/utils *.txt utility.py logging.py network.py -recursive-include ppocr/data *.py -recursive-include ppocr/postprocess *.py -recursive-include tools/infer *.py -recursive-include ppstructure *.py - diff --git a/ppstructure/README.md b/ppstructure/README.md index 90cd412df038a59ab6555b1ff632f99e2d32bb74..8e1642cc75cc52b179d0f8441a8da2fe86e78d7b 100644 --- a/ppstructure/README.md +++ b/ppstructure/README.md @@ -1,84 +1,116 @@ -# PaddleStructure +English | [简体中文](README_ch.md) -PaddleStructure is an OCR toolkit for complex layout analysis. It can divide document data in the form of pictures into **text, table, title, picture and list** 5 types of areas, and extract the table area as excel -## 1. Quick start +# PP-Structure -### install +PP-Structure is an OCR toolkit that can be used for complex documents analysis. The main features are as follows: +- Support the layout analysis of documents, divide the documents into 5 types of areas **text, title, table, image and list** (conjunction with Layout-Parser) +- Support to extract the texts from the text, title, picture and list areas (used in conjunction with PP-OCR) +- Support to extract excel files from the table areas +- Support python whl package and command line usage, easy to use +- Support custom training for layout analysis and table structure tasks -**install layoutparser** -```sh +## 1. Visualization + + + + + +## 2. Installation + +### 2.1 Install requirements + +- **(1) Install PaddlePaddle** + +```bash +pip3 install --upgrade pip + +# GPU +python3 -m pip install paddlepaddle-gpu==2.1.1 -i https://mirror.baidu.com/pypi/simple + +# CPU + python3 -m pip install paddlepaddle==2.1.1 -i https://mirror.baidu.com/pypi/simple + +# For more,refer[Installation](https://www.paddlepaddle.org.cn/install/quick)。 +``` + +- **(2) Install Layout-Parser** + +```bash pip3 install -U premailer paddleocr https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl ``` -**install paddlestructure** -install by pypi +### 2.2 Install PaddleOCR(including PP-OCR and PP-Structure) + +- **(1) PIP install PaddleOCR whl package(inference only)** ```bash -pip install paddlestructure +pip install "paddleocr>=2.2" ``` -build own whl package and install +- **(2) Clone PaddleOCR(Inference+training)** + ```bash -python3 setup.py bdist_wheel -pip3 install dist/paddlestructure-x.x.x-py3-none-any.whl # x.x.x is the version of paddlestructure +git clone https://github.com/PaddlePaddle/PaddleOCR ``` -### 1.2 Use -#### 1.2.1 Use by command line +## 3. Quick Start + +### 3.1 Use by command line ```bash -paddlestructure --image_dir=../doc/table/1.png +paddleocr --image_dir=../doc/table/1.png --type=structure ``` -#### 1.2.2 Use by code +### 3.2 Use by python API ```python import os import cv2 -from paddlestructure import PaddleStructure,draw_result,save_res +from paddleocr import PPStructure,draw_structure_result,save_structure_res -table_engine = PaddleStructure(show_log=True) +table_engine = PPStructure(show_log=True) save_folder = './output/table' img_path = '../doc/table/1.png' img = cv2.imread(img_path) result = table_engine(img) -save_res(result, save_folder,os.path.basename(img_path).split('.')[0]) +save_structure_res(result, save_folder,os.path.basename(img_path).split('.')[0]) for line in result: + line.pop('img') print(line) from PIL import Image -font_path = '../doc/fonts/simfang.ttf' # PaddleOCR下提供字体包 +font_path = '../doc/fonts/simfang.ttf' image = Image.open(img_path).convert('RGB') -im_show = draw_result(image, result,font_path=font_path) +im_show = draw_structure_result(image, result,font_path=font_path) im_show = Image.fromarray(im_show) im_show.save('result.jpg') ``` -#### 1.2.3 返回结果说明 -The return result of PaddleStructure is a list composed of a dict, an example is as follows +### 3.3 Returned results format +The returned results of PP-Structure is a list composed of a dict, an example is as follows ```shell [ - { 'type': 'Text', - 'bbox': [34, 432, 345, 462], - 'res': ([[36.0, 437.0, 341.0, 437.0, 341.0, 446.0, 36.0, 447.0], [41.0, 454.0, 125.0, 453.0, 125.0, 459.0, 41.0, 460.0]], + { 'type': 'Text', + 'bbox': [34, 432, 345, 462], + 'res': ([[36.0, 437.0, 341.0, 437.0, 341.0, 446.0, 36.0, 447.0], [41.0, 454.0, 125.0, 453.0, 125.0, 459.0, 41.0, 460.0]], [('Tigure-6. The performance of CNN and IPT models using difforen', 0.90060663), ('Tent ', 0.465441)]) } ] ``` The description of each field in dict is as follows -| Parameter | Description | +| Parameter | Description | | --------------- | -------------| |type|Type of image area| |bbox|The coordinates of the image area in the original image, respectively [left upper x, left upper y, right bottom x, right bottom y]| |res|OCR or table recognition result of image area。
Table: HTML string of the table;
OCR: A tuple containing the detection coordinates and recognition results of each single line of text| -#### 1.2.4 Parameter Description: +### 3.4 Parameter description: | Parameter | Description | Default value | | --------------- | ---------------------------------------- | ------------------------------------------- | @@ -89,37 +121,69 @@ The description of each field in dict is as follows Most of the parameters are consistent with the paddleocr whl package, see [doc of whl](../doc/doc_en/whl_en.md) -After running, each image will have a directory with the same name under the directory specified in the output field. Each table in the picture will be stored as an excel, and the excel file name will be the coordinates of the table in the image. +After running, each image will have a directory with the same name under the directory specified in the output field. Each table in the picture will be stored as an excel and figure area will be cropped and saved, the excel and image file name will be the coordinates of the table in the image. -## 2. PaddleStructure Pipeline +## 4. PP-Structure Pipeline the process is as follows ![pipeline](../doc/table/pipeline_en.jpg) -In PaddleStructure, the image will be analyzed by layoutparser first. In the layout analysis, the area in the image will be classified, including **text, title, image, list and table** 5 categories. For the first 4 types of areas, directly use the PP-OCR to complete the text detection and recognition. The table area will be converted to an excel file of the same table style via Table OCR. +In PP-Structure, the image will be analyzed by layoutparser first. In the layout analysis, the area in the image will be classified, including **text, title, image, list and table** 5 categories. For the first 4 types of areas, directly use the PP-OCR to complete the text detection and recognition. The table area will be converted to an excel file of the same table style via Table OCR. -### 2.1 LayoutParser +### 4.1 LayoutParser Layout analysis divides the document data into regions, including the use of Python scripts for layout analysis tools, extraction of special category detection boxes, performance indicators, and custom training layout analysis models. For details, please refer to [document](layout/README_en.md). -### 2.2 Table OCR +### 4.2 Table Recognition -Table OCR converts table image into excel documents, which include the detection and recognition of table text and the prediction of table structure and cell coordinates. For detailed, please refer to [document](table/README.md) +Table Recognition converts table image into excel documents, which include the detection and recognition of table text and the prediction of table structure and cell coordinates. For detailed, please refer to [document](table/README.md) -## 3. Predictive by inference engine +## 5. Prediction by inference engine -Use the following commands to complete the inference. +Use the following commands to complete the inference. ```python -python3 table/predict_system.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --output=../output/table --vis_font_path=../doc/fonts/simfang.ttf +cd PaddleOCR/ppstructure + +# download model +mkdir inference && cd inference +# Download the detection model of the ultra-lightweight Chinese OCR model and uncompress it +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_det_infer.tar +# Download the recognition model of the ultra-lightweight Chinese OCR model and uncompress it +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar +# Download the table structure model of the ultra-lightweight Chinese OCR model and uncompress it +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar && tar xf en_ppocr_mobile_v2.0_table_structure_infer.tar +cd .. + +python3 predict_system.py --det_model_dir=inference/ch_ppocr_mobile_v2.0_det_infer --rec_model_dir=inference/ch_ppocr_mobile_v2.0_rec_infer --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/ppocr_keys_v1.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=ch --output=../output/table --vis_font_path=../doc/fonts/simfang.ttf ``` -After running, each image will have a directory with the same name under the directory specified in the output field. Each table in the picture will be stored as an excel, and the excel file name will be the coordinates of the table in the image. +After running, each image will have a directory with the same name under the directory specified in the output field. Each table in the picture will be stored as an excel and figure area will be cropped and saved, the excel and image file name will be the coordinates of the table in the image. **Model List** |model name|description|config|model size|download| | --- | --- | --- | --- | --- | -|en_ppocr_mobile_v2.0_table_det|Text detection in English table scene|[ch_det_mv3_db_v2.0.yml](../configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml)| 4.7M |[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar) | -|en_ppocr_mobile_v2.0_table_rec|Text recognition in English table scene|[rec_chinese_lite_train_v2.0.yml](..//configs/rec/rec_mv3_none_bilstm_ctc.yml)|6.9M|[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar) | -|en_ppocr_mobile_v2.0_table_structure|Table structure prediction for English table scenarios|[table_mv3.yml](../configs/table/table_mv3.yml)|18.6M|[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar) | \ No newline at end of file +|en_ppocr_mobile_v2.0_table_structure|Table structure prediction for English table scenarios|[table_mv3.yml](../configs/table/table_mv3.yml)|18.6M|[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar) | + +**Model List** + +LayoutParser model + +|model name|description|download| +| --- | --- | --- | +| ppyolov2_r50vd_dcn_365e_publaynet | The layout analysis model trained on the PubLayNet data set can be divided into 5 types of areas **text, title, table, picture and list** | [PubLayNet](https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_publaynet.tar) | +| ppyolov2_r50vd_dcn_365e_tableBank_word | The layout analysis model trained on the TableBank Word dataset can only detect tables | [TableBank Word](https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_tableBank_word.tar) | +| ppyolov2_r50vd_dcn_365e_tableBank_latex | The layout analysis model trained on the TableBank Latex dataset can only detect tables | [TableBank Latex](https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_tableBank_latex.tar) | + +OCR and table recognition model + +|model name|description|model size|download| +| --- | --- | --- | --- | +|ch_ppocr_mobile_slim_v2.0_det|Slim pruned lightweight model, supporting Chinese, English, multilingual text detection|2.6M|[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_infer.tar) | +|ch_ppocr_mobile_slim_v2.0_rec|Slim pruned and quantized lightweight model, supporting Chinese, English and number recognition|6M|[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_slim_infer.tar) | +|en_ppocr_mobile_v2.0_table_det|Text detection of English table scenes trained on PubLayNet dataset|4.7M|[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar) | +|en_ppocr_mobile_v2.0_table_rec|Text recognition of English table scene trained on PubLayNet dataset|6.9M|[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar) | +|en_ppocr_mobile_v2.0_table_structure|Table structure prediction of English table scene trained on PubLayNet dataset|18.6M|[inference model](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar) | + +If you need to use other models, you can download the model in [model_list](../doc/doc_en/models_list_en.md) or use your own trained model to configure it to the three fields of `det_model_dir`, `rec_model_dir`, `table_model_dir` . diff --git a/ppstructure/README_ch.md b/ppstructure/README_ch.md index 7ae55534309ab48caecf8de1ae20c0536b49823e..c8acac590039647cf52f47b16a99092ff68f2b6e 100644 --- a/ppstructure/README_ch.md +++ b/ppstructure/README_ch.md @@ -1,85 +1,122 @@ -# PaddleStructure +[English](README.md) | 简体中文 -PaddleStructure是一个用于复杂版面分析的OCR工具包,其能够对图片形式的文档数据划分**文字、表格、标题、图片以及列表**5类区域,并将表格区域提取为excel +# PP-Structure -## 1. 快速开始 +PP-Structure是一个可用于复杂文档结构分析和处理的OCR工具包,主要特性如下: +- 支持对图片形式的文档进行版面分析,可以划分**文字、标题、表格、图片以及列表**5类区域(与Layout-Parser联合使用) +- 支持文字、标题、图片以及列表区域提取为文字字段(与PP-OCR联合使用) +- 支持表格区域进行结构化分析,最终结果输出Excel文件 +- 支持python whl包和命令行两种方式,简单易用 +- 支持版面分析和表格结构化两类任务自定义训练 -### 1.1 安装 +## 1. 效果展示 -**安装 layoutparser** -```sh + + + + +## 2. 安装 + +### 2.1 安装依赖 + +- **(1) 安装PaddlePaddle** + +```bash +pip3 install --upgrade pip + +# GPU安装 +python3 -m pip install paddlepaddle-gpu==2.1.1 -i https://mirror.baidu.com/pypi/simple + +# CPU安装 + python3 -m pip install paddlepaddle==2.1.1 -i https://mirror.baidu.com/pypi/simple + +# 更多需求,请参照[安装文档](https://www.paddlepaddle.org.cn/install/quick)中的说明进行操作。 +``` + +- **(2) 安装 Layout-Parser** + +```bash pip3 install -U premailer paddleocr https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl ``` -**安装 paddlestructure** -pip安装 +### 2.2 安装PaddleOCR(包含PP-OCR和PP-Structure) + +- **(1) PIP快速安装PaddleOCR whl包(仅预测)** + ```bash -pip install paddlestructure +pip install "paddleocr>=2.2" # 推荐使用2.2+版本 ``` -本地构建并安装 +- **(2) 完整克隆PaddleOCR源码(预测+训练)** + ```bash -python3 setup.py bdist_wheel -pip3 install dist/paddlestructure-x.x.x-py3-none-any.whl # x.x.x是 paddlestructure 的版本号 +【推荐】git clone https://github.com/PaddlePaddle/PaddleOCR + +#如果因为网络问题无法pull成功,也可选择使用码云上的托管: +git clone https://gitee.com/paddlepaddle/PaddleOCR + +#注:码云托管代码可能无法实时同步本github项目更新,存在3~5天延时,请优先使用推荐方式。 ``` -### 1.2 PaddleStructure whl包使用 -#### 1.2.1 命令行使用 +## 3. PP-Structure 快速开始 + +### 3.1 命令行使用(默认参数,极简) ```bash -paddlestructure --image_dir=../doc/table/1.png +paddleocr --image_dir=../doc/table/1.png --type=structure ``` -#### 1.2.2 Python脚本使用 +### 3.2 Python脚本使用(自定义参数,灵活) ```python import os import cv2 -from paddlestructure import PaddleStructure,draw_result,save_res +from paddleocr import PPStructure,draw_structure_result,save_structure_res -table_engine = PaddleStructure(show_log=True) +table_engine = PPStructure(show_log=True) save_folder = './output/table' img_path = '../doc/table/1.png' img = cv2.imread(img_path) result = table_engine(img) -save_res(result, save_folder,os.path.basename(img_path).split('.')[0]) +save_structure_res(result, save_folder,os.path.basename(img_path).split('.')[0]) for line in result: + line.pop('img') print(line) from PIL import Image font_path = '../doc/fonts/simfang.ttf' # PaddleOCR下提供字体包 image = Image.open(img_path).convert('RGB') -im_show = draw_result(image, result,font_path=font_path) +im_show = draw_structure_result(image, result,font_path=font_path) im_show = Image.fromarray(im_show) im_show.save('result.jpg') ``` -#### 1.2.3 返回结果说明 -PaddleStructure 的返回结果为一个dict组成的list,示例如下 +### 3.3 返回结果说明 +PP-Structure的返回结果为一个dict组成的list,示例如下 ```shell [ - { 'type': 'Text', - 'bbox': [34, 432, 345, 462], - 'res': ([[36.0, 437.0, 341.0, 437.0, 341.0, 446.0, 36.0, 447.0], [41.0, 454.0, 125.0, 453.0, 125.0, 459.0, 41.0, 460.0]], + { 'type': 'Text', + 'bbox': [34, 432, 345, 462], + 'res': ([[36.0, 437.0, 341.0, 437.0, 341.0, 446.0, 36.0, 447.0], [41.0, 454.0, 125.0, 453.0, 125.0, 459.0, 41.0, 460.0]], [('Tigure-6. The performance of CNN and IPT models using difforen', 0.90060663), ('Tent ', 0.465441)]) } ] ``` dict 里各个字段说明如下 -| 字段 | 说明 | +| 字段 | 说明 | | --------------- | -------------| |type|图片区域的类型| |bbox|图片区域的在原图的坐标,分别[左上角x,左上角y,右下角x,右下角y]| |res|图片区域的OCR或表格识别结果。
表格: 表格的HTML字符串;
OCR: 一个包含各个单行文字的检测坐标和识别结果的元组| -#### 1.2.4 参数说明 +### 3.4 参数说明 | 字段 | 说明 | 默认值 | | --------------- | ---------------------------------------- | ------------------------------------------- | @@ -90,37 +127,62 @@ dict 里各个字段说明如下 大部分参数和paddleocr whl包保持一致,见 [whl包文档](../doc/doc_ch/whl.md) -运行完成后,每张图片会在`output`字段指定的目录下有一个同名目录,图片里的每个表格会存储为一个excel,excel文件名为表格在图片里的坐标。 +运行完成后,每张图片会在`output`字段指定的目录下有一个同名目录,图片里的每个表格会存储为一个excel,图片区域会被裁剪之后保存下来,excel文件和图片名名为表格在图片里的坐标。 -## 2. PaddleStructure Pipeline +## 4. PP-Structure Pipeline介绍 -流程如下 ![pipeline](../doc/table/pipeline.jpg) -在PaddleStructure中,图片会先经由layoutparser进行版面分析,在版面分析中,会对图片里的区域进行分类,包括**文字、标题、图片、列表和表格**5类。对于前4类区域,直接使用PP-OCR完成对应区域文字检测与识别。对于表格类区域,经过Table OCR处理后,表格图片转换为相同表格样式的Excel文件。 +在PP-Structure中,图片会先经由Layout-Parser进行版面分析,在版面分析中,会对图片里的区域进行分类,包括**文字、标题、图片、列表和表格**5类。对于前4类区域,直接使用PP-OCR完成对应区域文字检测与识别。对于表格类区域,经过表格结构化处理后,表格图片转换为相同表格样式的Excel文件。 -### 2.1 版面分析 +### 4.1 版面分析 -版面分析对文档数据进行区域分类,其中包括版面分析工具的Python脚本使用、提取指定类别检测框、性能指标以及自定义训练版面分析模型,详细内容可以参考[文档](layout/README.md)。 +版面分析对文档数据进行区域分类,其中包括版面分析工具的Python脚本使用、提取指定类别检测框、性能指标以及自定义训练版面分析模型,详细内容可以参考[文档](layout/README_ch.md)。 -### 2.2 表格识别 +### 4.2 表格识别 -Table OCR将表格图片转换为excel文档,其中包含对于表格文本的检测和识别以及对于表格结构和单元格坐标的预测,详细说明参考[文档](table/README_ch.md) +表格识别将表格图片转换为excel文档,其中包含对于表格文本的检测和识别以及对于表格结构和单元格坐标的预测,详细说明参考[文档](table/README_ch.md) -## 3. 预测引擎推理 +## 5. 预测引擎推理(与whl包效果相同) 使用如下命令即可完成预测引擎的推理 ```python -python3 table/predict_system.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --output=../output/table --vis_font_path=../doc/fonts/simfang.ttf +cd ppstructure + +# 下载模型 +mkdir inference && cd inference +# 下载超轻量级中文OCR模型的检测模型并解压 +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_det_infer.tar +# 下载超轻量级中文OCR模型的识别模型并解压 +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar +# 下载超轻量级英文表格英寸模型并解压 +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar && tar xf en_ppocr_mobile_v2.0_table_structure_infer.tar +cd .. + +python3 predict_system.py --det_model_dir=inference/ch_ppocr_mobile_v2.0_det_infer --rec_model_dir=inference/ch_ppocr_mobile_v2.0_rec_infer --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/ppocr_keys_v1.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=ch --output=../output/table --vis_font_path=../doc/fonts/simfang.ttf ``` -运行完成后,每张图片会output字段指定的目录下有一个同名目录,图片里的每个表格会存储为一个excel,excel文件名为表格在图片里的坐标。 +运行完成后,每张图片会在`output`字段指定的目录下有一个同名目录,图片里的每个表格会存储为一个excel,图片区域会被裁剪之后保存下来,excel文件和图片名名为表格在图片里的坐标。 **Model List** -|模型名称|模型简介|配置文件|推理模型大小|下载地址| -| --- | --- | --- | --- | --- | -|en_ppocr_mobile_v2.0_table_det|英文表格场景的文字检测|[ch_det_mv3_db_v2.0.yml](../configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml)| 4.7M |[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar) | -|en_ppocr_mobile_v2.0_table_rec|英文表格场景的文字识别|[rec_chinese_lite_train_v2.0.yml](../configs/rec/rec_mv3_none_bilstm_ctc.yml)|6.9M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar) | -|en_ppocr_mobile_v2.0_table_structure|英文表格场景的表格结构预测|[table_mv3.yml](../configs/table/table_mv3.yml)|18.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar) | \ No newline at end of file +LayoutParser 模型 + +|模型名称|模型简介|下载地址| +| --- | --- | --- | +| ppyolov2_r50vd_dcn_365e_publaynet | PubLayNet 数据集训练的版面分析模型,可以划分**文字、标题、表格、图片以及列表**5类区域 | [PubLayNet](https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_publaynet.tar) | +| ppyolov2_r50vd_dcn_365e_tableBank_word | TableBank Word 数据集训练的版面分析模型,只能检测表格 | [TableBank Word](https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_tableBank_word.tar) | +| ppyolov2_r50vd_dcn_365e_tableBank_latex | TableBank Latex 数据集训练的版面分析模型,只能检测表格 | [TableBank Latex](https://paddle-model-ecology.bj.bcebos.com/model/layout-parser/ppyolov2_r50vd_dcn_365e_tableBank_latex.tar) | + +OCR和表格识别模型 + +|模型名称|模型简介|推理模型大小|下载地址| +| --- | --- | --- | --- | +|ch_ppocr_mobile_slim_v2.0_det|slim裁剪版超轻量模型,支持中英文、多语种文本检测|2.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_infer.tar) | +|ch_ppocr_mobile_slim_v2.0_rec|slim裁剪量化版超轻量模型,支持中英文、数字识别|6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_slim_infer.tar) | +|en_ppocr_mobile_v2.0_table_det|PubLayNet数据集训练的英文表格场景的文字检测|4.7M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar) | +|en_ppocr_mobile_v2.0_table_rec|PubLayNet数据集训练的英文表格场景的文字识别|6.9M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar) | +|en_ppocr_mobile_v2.0_table_structure|PubLayNet数据集训练的英文表格场景的表格结构预测|18.6M|[推理模型](https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar) | + +如需要使用其他模型,可以在 [model_list](../doc/doc_ch/models_list.md) 下载模型或者使用自己训练好的模型配置到`det_model_dir`,`rec_model_dir`,`table_model_dir`三个字段即可。 diff --git a/ppstructure/__init__.py b/ppstructure/__init__.py index 3952b5ffb9f443e9aba9ba0a4a041b73d2caa9bc..1d11e265597c7c8e39098a228108da3bb954b892 100644 --- a/ppstructure/__init__.py +++ b/ppstructure/__init__.py @@ -11,7 +11,3 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -from .paddlestructure import PaddleStructure, draw_result, save_res - -__all__ = ['PaddleStructure', 'draw_result', 'save_res'] diff --git a/ppstructure/layout/README.md b/ppstructure/layout/README.md index fde6d75a4d278551aba77075c3f9789a24a01b21..74cb928e30c012d5b469d685fd63b443a7d22613 100644 --- a/ppstructure/layout/README.md +++ b/ppstructure/layout/README.md @@ -1,27 +1,31 @@ -# 版面分析使用说明 +English | [简体中文](README_ch.md) -[1. 安装whl包](#安装whl包) -[2. 使用](#使用) +# Getting Started -[3. 后处理](#后处理) +[1. Install whl package](#Install) -[4. 指标](#指标) +[2. Quick Start](#QuickStart) -[5. 训练版面分析模型](#训练版面分析模型) +[3. PostProcess](#PostProcess) - +[4. Results](#Results) -## 1. 安装whl包 +[5. Training](#Training) + + + +## 1. Install whl package ```bash -pip install -U https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl +wget https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl +pip install -U layoutparser-0.0.0-py3-none-any.whl ``` - + -## 2. 使用 +## 2. Quick Start -使用layoutparser识别给定文档的布局: +Use LayoutParser to identify the layout of a document: ```python import cv2 @@ -29,41 +33,40 @@ import layoutparser as lp image = cv2.imread("doc/table/layout.jpg") image = image[..., ::-1] -# 加载模型 -model = lp.PaddleDetectionLayoutModel(config_path="lp://PubLayNet/ppyolov2_r50vd_dcn_365e_publaynet/config", +# load model +model = lp.PaddleDetectionLayoutModel(config_path="lp://PubLayNet/ppyolov2_r50vd_dcn_365e_publaynet/config", threshold=0.5, label_map={0: "Text", 1: "Title", 2: "List", 3:"Table", 4:"Figure"}, - enforce_cpu=False, + enforce_cpu=False, enable_mkldnn=True) -# 检测 +# detect layout = model.detect(image) -# 显示结果 +# show result show_img = lp.draw_box(image, layout, box_width=3, show_element_type=True) show_img.show() ``` -下图展示了结果,不同颜色的检测框表示不同的类别,并通过`show_element_type`在框的左上角显示具体类别: +The following figure shows the result, with different colored detection boxes representing different categories and displaying specific categories in the upper left corner of the box with `show_element_type`
- -`PaddleDetectionLayoutModel`函数参数说明如下: - -| 参数 | 含义 | 默认值 | 备注 | -| :------------: | :-------------------------: | :---------: | :----------------------------------------------------------: | -| config_path | 模型配置路径 | None | 指定config_path会自动下载模型(仅第一次,之后模型存在,不会再下载) | -| model_path | 模型路径 | None | 本地模型路径,config_path和model_path必须设置一个,不能同时为None | -| threshold | 预测得分的阈值 | 0.5 | \ | -| input_shape | reshape之后图片尺寸 | [3,640,640] | \ | -| batch_size | 测试batch size | 1 | \ | -| label_map | 类别映射表 | None | 设置config_path时,可以为None,根据数据集名称自动获取label_map | -| enforce_cpu | 代码是否使用CPU运行 | False | 设置为False表示使用GPU,True表示强制使用CPU | -| enforce_mkldnn | CPU预测中是否开启MKLDNN加速 | True | \ | -| thread_num | 设置CPU线程数 | 10 | \ | - -目前支持以下几种模型配置和label map,您可以通过修改 `--config_path`和 `--label_map`使用这些模型,从而检测不同类型的内容: +`PaddleDetectionLayoutModel`parameters are described as follows: + +| parameter | description | default | remark | +| :------------: | :------------------------------------------------------: | :---------: | :----------------------------------------------------------: | +| config_path | model config path | None | Specify config_ path will automatically download the model (only for the first time,the model will exist and will not be downloaded again) | +| model_path | model path | None | local model path, config_ path and model_ path must be set to one, cannot be none at the same time | +| threshold | threshold of prediction score | 0.5 | \ | +| input_shape | picture size of reshape | [3,640,640] | \ | +| batch_size | testing batch size | 1 | \ | +| label_map | category mapping table | None | Setting config_ path, it can be none, and the label is automatically obtained according to the dataset name_ map | +| enforce_cpu | whether to use CPU | False | False to use GPU, and True to force the use of CPU | +| enforce_mkldnn | whether mkldnn acceleration is enabled in CPU prediction | True | \ | +| thread_num | the number of CPU threads | 10 | \ | + +The following model configurations and label maps are currently supported, which you can use by modifying '--config_path' and '--label_map' to detect different types of content: | dataset | config_path | label_map | | ------------------------------------------------------------ | ------------------------------------------------------------ | --------------------------------------------------------- | @@ -71,26 +74,26 @@ show_img.show() | TableBank latex | lp://TableBank/ppyolov2_r50vd_dcn_365e_tableBank_latex/config | {0:"Table"} | | [PubLayNet](https://github.com/ibm-aur-nlp/PubLayNet) | lp://PubLayNet/ppyolov2_r50vd_dcn_365e_publaynet/config | {0: "Text", 1: "Title", 2: "List", 3:"Table", 4:"Figure"} | -* TableBank word和TableBank latex分别在word文档、latex文档数据集训练; -* 下载的TableBank数据集里同时包含word和latex。 +* TableBank word and TableBank latex are trained on datasets of word documents and latex documents respectively; +* Download TableBank dataset contains both word and latex。 - + -## 3. 后处理 +## 3. PostProcess -版面分析检测包含多个类别,如果只想获取指定类别(如"Text"类别)的检测框、可以使用下述代码: +Layout parser contains multiple categories, if you only want to get the detection box for a specific category (such as the "Text" category), you can use the following code: ```python -# 接上面代码 -# 首先过滤特定文本类型的区域 +# follow the above code +# filter areas for a specific text type text_blocks = lp.Layout([b for b in layout if b.type=='Text']) figure_blocks = lp.Layout([b for b in layout if b.type=='Figure']) -# 因为在图像区域内可能检测到文本区域,所以只需要删除它们 +# text areas may be detected within the image area, delete these areas text_blocks = lp.Layout([b for b in text_blocks \ if not any(b.is_in(b_fig) for b_fig in figure_blocks)]) -# 对文本区域排序并分配id +# sort text areas and assign ID h, w = image.shape[:2] left_interval = lp.Interval(0, w/2*1.05, axis='x').put_on_canvas(image) @@ -101,40 +104,38 @@ left_blocks.sort(key = lambda b:b.coordinates[1]) right_blocks = [b for b in text_blocks if b not in left_blocks] right_blocks.sort(key = lambda b:b.coordinates[1]) -# 最终合并两个列表,并按顺序添加索引 +# the two lists are merged and the indexes are added in order text_blocks = lp.Layout([b.set(id = idx) for idx, b in enumerate(left_blocks + right_blocks)]) -# 显示结果 +# display result show_img = lp.draw_box(image, text_blocks, - box_width=3, + box_width=3, show_element_id=True) show_img.show() ``` -显示只有"Text"类别的结果: +Displays results with only the "Text" category:
+ - - -## 4. 指标 +## 4. Results | Dataset | mAP | CPU time cost | GPU time cost | | --------- | ---- | ------------- | ------------- | | PubLayNet | 93.6 | 1713.7ms | 66.6ms | | TableBank | 96.2 | 1968.4ms | 65.1ms | -**Envrionment:** - -​ **CPU:** Intel(R) Xeon(R) CPU E5-2650 v4 @ 2.20GHz,24core +**Envrionment:** -​ **GPU:** a single NVIDIA Tesla P40 +​ **CPU:** Intel(R) Xeon(R) CPU E5-2650 v4 @ 2.20GHz,24core - +​ **GPU:** a single NVIDIA Tesla P40 -## 5. 训练版面分析模型 + -上述模型基于[PaddleDetection](https://github.com/PaddlePaddle/PaddleDetection) 训练,如果您想训练自己的版面分析模型,请参考:[train_layoutparser_model](train_layoutparser_model.md) +## 5. Training +The above model is based on [PaddleDetection](https://github.com/PaddlePaddle/PaddleDetection). If you want to train your own layout parser model,please refer to:[train_layoutparser_model](train_layoutparser_model.md) diff --git a/ppstructure/layout/README_ch.md b/ppstructure/layout/README_ch.md new file mode 100644 index 0000000000000000000000000000000000000000..c722e0bd88f40ff6b711edecff0433029e101f87 --- /dev/null +++ b/ppstructure/layout/README_ch.md @@ -0,0 +1,141 @@ +[English](README.md) | 简体中文 + +# 版面分析使用说明 + +[1. 安装whl包](#安装whl包) + +[2. 使用](#使用) + +[3. 后处理](#后处理) + +[4. 指标](#指标) + +[5. 训练版面分析模型](#训练版面分析模型) + + + +## 1. 安装whl包 +```bash +pip install -U https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl +``` + + + +## 2. 使用 + +使用layoutparser识别给定文档的布局: + +```python +import cv2 +import layoutparser as lp +image = cv2.imread("doc/table/layout.jpg") +image = image[..., ::-1] + +# 加载模型 +model = lp.PaddleDetectionLayoutModel(config_path="lp://PubLayNet/ppyolov2_r50vd_dcn_365e_publaynet/config", + threshold=0.5, + label_map={0: "Text", 1: "Title", 2: "List", 3:"Table", 4:"Figure"}, + enforce_cpu=False, + enable_mkldnn=True) +# 检测 +layout = model.detect(image) + +# 显示结果 +show_img = lp.draw_box(image, layout, box_width=3, show_element_type=True) +show_img.show() +``` + +下图展示了结果,不同颜色的检测框表示不同的类别,并通过`show_element_type`在框的左上角显示具体类别: + +
+ +
+ +`PaddleDetectionLayoutModel`函数参数说明如下: + +| 参数 | 含义 | 默认值 | 备注 | +| :------------: | :-------------------------: | :---------: | :----------------------------------------------------------: | +| config_path | 模型配置路径 | None | 指定config_path会自动下载模型(仅第一次,之后模型存在,不会再下载) | +| model_path | 模型路径 | None | 本地模型路径,config_path和model_path必须设置一个,不能同时为None | +| threshold | 预测得分的阈值 | 0.5 | \ | +| input_shape | reshape之后图片尺寸 | [3,640,640] | \ | +| batch_size | 测试batch size | 1 | \ | +| label_map | 类别映射表 | None | 设置config_path时,可以为None,根据数据集名称自动获取label_map | +| enforce_cpu | 代码是否使用CPU运行 | False | 设置为False表示使用GPU,True表示强制使用CPU | +| enforce_mkldnn | CPU预测中是否开启MKLDNN加速 | True | \ | +| thread_num | 设置CPU线程数 | 10 | \ | + +目前支持以下几种模型配置和label map,您可以通过修改 `--config_path`和 `--label_map`使用这些模型,从而检测不同类型的内容: + +| dataset | config_path | label_map | +| ------------------------------------------------------------ | ------------------------------------------------------------ | --------------------------------------------------------- | +| [TableBank](https://doc-analysis.github.io/tablebank-page/index.html) word | lp://TableBank/ppyolov2_r50vd_dcn_365e_tableBank_word/config | {0:"Table"} | +| TableBank latex | lp://TableBank/ppyolov2_r50vd_dcn_365e_tableBank_latex/config | {0:"Table"} | +| [PubLayNet](https://github.com/ibm-aur-nlp/PubLayNet) | lp://PubLayNet/ppyolov2_r50vd_dcn_365e_publaynet/config | {0: "Text", 1: "Title", 2: "List", 3:"Table", 4:"Figure"} | + +* TableBank word和TableBank latex分别在word文档、latex文档数据集训练; +* 下载的TableBank数据集里同时包含word和latex。 + + + +## 3. 后处理 + +版面分析检测包含多个类别,如果只想获取指定类别(如"Text"类别)的检测框、可以使用下述代码: + +```python +# 接上面代码 +# 首先过滤特定文本类型的区域 +text_blocks = lp.Layout([b for b in layout if b.type=='Text']) +figure_blocks = lp.Layout([b for b in layout if b.type=='Figure']) + +# 因为在图像区域内可能检测到文本区域,所以只需要删除它们 +text_blocks = lp.Layout([b for b in text_blocks \ + if not any(b.is_in(b_fig) for b_fig in figure_blocks)]) + +# 对文本区域排序并分配id +h, w = image.shape[:2] + +left_interval = lp.Interval(0, w/2*1.05, axis='x').put_on_canvas(image) + +left_blocks = text_blocks.filter_by(left_interval, center=True) +left_blocks.sort(key = lambda b:b.coordinates[1]) + +right_blocks = [b for b in text_blocks if b not in left_blocks] +right_blocks.sort(key = lambda b:b.coordinates[1]) + +# 最终合并两个列表,并按顺序添加索引 +text_blocks = lp.Layout([b.set(id = idx) for idx, b in enumerate(left_blocks + right_blocks)]) + +# 显示结果 +show_img = lp.draw_box(image, text_blocks, + box_width=3, + show_element_id=True) +show_img.show() +``` + +显示只有"Text"类别的结果: + +
+ +
+ + + +## 4. 指标 + +| Dataset | mAP | CPU time cost | GPU time cost | +| --------- | ---- | ------------- | ------------- | +| PubLayNet | 93.6 | 1713.7ms | 66.6ms | +| TableBank | 96.2 | 1968.4ms | 65.1ms | + +**Envrionment:** + +​ **CPU:** Intel(R) Xeon(R) CPU E5-2650 v4 @ 2.20GHz,24core + +​ **GPU:** a single NVIDIA Tesla P40 + + + +## 5. 训练版面分析模型 + +上述模型基于[PaddleDetection](https://github.com/PaddlePaddle/PaddleDetection) 训练,如果您想训练自己的版面分析模型,请参考:[train_layoutparser_model](train_layoutparser_model_ch.md) diff --git a/ppstructure/layout/README_en.md b/ppstructure/layout/README_en.md deleted file mode 100644 index 2d885c567b15d305602e9fb00058c4cf281de041..0000000000000000000000000000000000000000 --- a/ppstructure/layout/README_en.md +++ /dev/null @@ -1,139 +0,0 @@ -# Getting Started - -[1. Install whl package](#Install whl package) - -[2. Quick Start](#Quick Start) - -[3. PostProcess](#PostProcess) - -[4. Results](#Results) - -[5. Training](#Training) - - - -## 1. Install whl package -```bash -wget https://paddleocr.bj.bcebos.com/whl/layoutparser-0.0.0-py3-none-any.whl -pip install -U layoutparser-0.0.0-py3-none-any.whl -``` - - - -## 2. Quick Start - -Use LayoutParser to identify the layout of a given document: - -```python -import cv2 -import layoutparser as lp -image = cv2.imread("doc/table/layout.jpg") -image = image[..., ::-1] - -# load model -model = lp.PaddleDetectionLayoutModel(config_path="lp://PubLayNet/ppyolov2_r50vd_dcn_365e_publaynet/config", - threshold=0.5, - label_map={0: "Text", 1: "Title", 2: "List", 3:"Table", 4:"Figure"}, - enforce_cpu=False, - enable_mkldnn=True) -# detect -layout = model.detect(image) - -# show result -show_img = lp.draw_box(image, layout, box_width=3, show_element_type=True) -show_img.show() -``` - -The following figure shows the result, with different colored detection boxes representing different categories and displaying specific categories in the upper left corner of the box with `show_element_type` - -
- -
-`PaddleDetectionLayoutModel`parameters are described as follows: - -| parameter | description | default | remark | -| :------------: | :------------------------------------------------------: | :---------: | :----------------------------------------------------------: | -| config_path | model config path | None | Specify config_ path will automatically download the model (only for the first time,the model will exist and will not be downloaded again) | -| model_path | model path | None | local model path, config_ path and model_ path must be set to one, cannot be none at the same time | -| threshold | threshold of prediction score | 0.5 | \ | -| input_shape | picture size of reshape | [3,640,640] | \ | -| batch_size | testing batch size | 1 | \ | -| label_map | category mapping table | None | Setting config_ path, it can be none, and the label is automatically obtained according to the dataset name_ map | -| enforce_cpu | whether to use CPU | False | False to use GPU, and True to force the use of CPU | -| enforce_mkldnn | whether mkldnn acceleration is enabled in CPU prediction | True | \ | -| thread_num | the number of CPU threads | 10 | \ | - -The following model configurations and label maps are currently supported, which you can use by modifying '--config_path' and '--label_map' to detect different types of content: - -| dataset | config_path | label_map | -| ------------------------------------------------------------ | ------------------------------------------------------------ | --------------------------------------------------------- | -| [TableBank](https://doc-analysis.github.io/tablebank-page/index.html) word | lp://TableBank/ppyolov2_r50vd_dcn_365e_tableBank_word/config | {0:"Table"} | -| TableBank latex | lp://TableBank/ppyolov2_r50vd_dcn_365e_tableBank_latex/config | {0:"Table"} | -| [PubLayNet](https://github.com/ibm-aur-nlp/PubLayNet) | lp://PubLayNet/ppyolov2_r50vd_dcn_365e_publaynet/config | {0: "Text", 1: "Title", 2: "List", 3:"Table", 4:"Figure"} | - -* TableBank word and TableBank latex are trained on datasets of word documents and latex documents respectively; -* Download TableBank dataset contains both word and latex。 - - - -## 3. PostProcess - -Layout parser contains multiple categories, if you only want to get the detection box for a specific category (such as the "Text" category), you can use the following code: - -```python -# follow the above code -# filter areas for a specific text type -text_blocks = lp.Layout([b for b in layout if b.type=='Text']) -figure_blocks = lp.Layout([b for b in layout if b.type=='Figure']) - -# text areas may be detected within the image area, delete these areas -text_blocks = lp.Layout([b for b in text_blocks \ - if not any(b.is_in(b_fig) for b_fig in figure_blocks)]) - -# sort text areas and assign ID -h, w = image.shape[:2] - -left_interval = lp.Interval(0, w/2*1.05, axis='x').put_on_canvas(image) - -left_blocks = text_blocks.filter_by(left_interval, center=True) -left_blocks.sort(key = lambda b:b.coordinates[1]) - -right_blocks = [b for b in text_blocks if b not in left_blocks] -right_blocks.sort(key = lambda b:b.coordinates[1]) - -# the two lists are merged and the indexes are added in order -text_blocks = lp.Layout([b.set(id = idx) for idx, b in enumerate(left_blocks + right_blocks)]) - -# display result -show_img = lp.draw_box(image, text_blocks, - box_width=3, - show_element_id=True) -show_img.show() -``` - -Displays results with only the "Text" category: - -
- -
- - -## 4. Results - -| Dataset | mAP | CPU time cost | GPU time cost | -| --------- | ---- | ------------- | ------------- | -| PubLayNet | 93.6 | 1713.7ms | 66.6ms | -| TableBank | 96.2 | 1968.4ms | 65.1ms | - -**Envrionment:** - -​ **CPU:** Intel(R) Xeon(R) CPU E5-2650 v4 @ 2.20GHz,24core - -​ **GPU:** a single NVIDIA Tesla P40 - - - -## 5. Training - -The above model is based on PaddleDetection](https://github.com/PaddlePaddle/PaddleDetection) ,if you want to train your own layout parser model,please refer to:[train_layoutparser_model](train_layoutparser_model_en.md) - diff --git a/ppstructure/layout/train_layoutparser_model.md b/ppstructure/layout/train_layoutparser_model.md index e1cd2e0773d5edef80f2aba69c491eebb9cfd03e..08f5ebbf1aa276e4a3ecf27af46442161afcda1f 100644 --- a/ppstructure/layout/train_layoutparser_model.md +++ b/ppstructure/layout/train_layoutparser_model.md @@ -1,32 +1,32 @@ -# 训练版面分析 +# Training layout-parse -[1. 安装](#安装) +[1. Installation](#Installation) -​ [1.1 环境要求](#环境要求) +​ [1.1 Requirements](#Requirements) -​ [1.2 安装PaddleDetection](#安装PaddleDetection) +​ [1.2 Install PaddleDetection](#Install PaddleDetection) -[2. 准备数据](#准备数据) +[2. Data preparation](#Data preparation) -[3. 配置文件改动和说明](#配置文件改动和说明) +[3. Configuration](#Configuration) -[4. PaddleDetection训练](#训练) +[4. Training](#Training) -[5. PaddleDetection预测](#预测) +[5. Prediction](#Prediction) -[6. 预测部署](#预测部署) +[6. Deployment](#Deployment) -​ [6.1 模型导出](#模型导出) +​ [6.1 Export model](#Export model) -​ [6.2 layout parser预测](#layout_parser预测) +​ [6.2 Inference](#Inference) - + -## 1. 安装 +## 1. Installation - + -### 1.1 环境要求 +### 1.1 Requirements - PaddlePaddle 2.1 - OS 64 bit @@ -35,56 +35,56 @@ - CUDA >= 10.1 - cuDNN >= 7.6 - + -### 1.2 安装PaddleDetection +### 1.2 Install PaddleDetection ```bash -# 克隆PaddleDetection仓库 +# Clone PaddleDetection repository cd git clone https://github.com/PaddlePaddle/PaddleDetection.git cd PaddleDetection -# 安装其他依赖 +# Install other dependencies pip install -r requirements.txt ``` -更多安装教程,请参考: [Install doc](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/docs/tutorials/INSTALL_cn.md) +For more installation tutorials, please refer to: [Install doc](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/docs/tutorials/INSTALL_cn.md) - + -## 2. 准备数据 +## 2. Data preparation -下载 [PubLayNet](https://github.com/ibm-aur-nlp/PubLayNet) 数据集: +Download the [PubLayNet](https://github.com/ibm-aur-nlp/PubLayNet) dataset ```bash cd PaddleDetection/dataset/ mkdir publaynet -# 执行命令,下载 +# execute the command,download PubLayNet wget -O publaynet.tar.gz https://dax-cdn.cdn.appdomain.cloud/dax-publaynet/1.0.0/publaynet.tar.gz?_ga=2.104193024.1076900768.1622560733-649911202.1622560733 -# 解压 +# unpack tar -xvf publaynet.tar.gz ``` -解压之后PubLayNet目录结构: +PubLayNet directory structure after decompressing : | File or Folder | Description | num | | :------------- | :----------------------------------------------- | ------- | | `train/` | Images in the training subset | 335,703 | | `val/` | Images in the validation subset | 11,245 | | `test/` | Images in the testing subset | 11,405 | -| `train.json` | Annotations for training images | 1 | -| `val.json` | Annotations for validation images | 1 | -| `LICENSE.txt` | Plaintext version of the CDLA-Permissive license | 1 | -| `README.txt` | Text file with the file names and description | 1 | +| `train.json` | Annotations for training images | 1 | +| `val.json` | Annotations for validation images | 1 | +| `LICENSE.txt` | Plaintext version of the CDLA-Permissive license | 1 | +| `README.txt` | Text file with the file names and description | 1 | -如果使用其它数据集,请参考[准备训练数据](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/docs/tutorials/PrepareDataSet.md) +For other datasets,please refer to [the PrepareDataSet]((https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/docs/tutorials/PrepareDataSet.md) ) - + -## 3. 配置文件改动和说明 +## 3. Configuration -我们使用 `configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml`配置进行训练,配置文件摘要如下: +We use the `configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml` configuration for training,the configuration file is as follows ```bash _BASE_: [ @@ -98,96 +98,96 @@ _BASE_: [ snapshot_epoch: 8 weights: output/ppyolov2_r50vd_dcn_365e_coco/model_final ``` -从中可以看到 `ppyolov2_r50vd_dcn_365e_coco.yml` 配置需要依赖其他的配置文件,在该例子中需要依赖: +The `ppyolov2_r50vd_dcn_365e_coco.yml` configuration depends on other configuration files, in this case: -- coco_detection.yml:主要说明了训练数据和验证数据的路径 +- coco_detection.yml:mainly explains the path of training data and verification data -- runtime.yml:主要说明了公共的运行参数,比如是否使用GPU、每多少个epoch存储checkpoint等 +- runtime.yml:mainly describes the common parameters, such as whether to use the GPU and how many epoch to save model etc. -- optimizer_365e.yml:主要说明了学习率和优化器的配置 +- optimizer_365e.yml:mainly explains the learning rate and optimizer configuration -- ppyolov2_r50vd_dcn.yml:主要说明模型和主干网络的情况 +- ppyolov2_r50vd_dcn.yml:mainly describes the model and the network -- ppyolov2_reader.yml:主要说明数据读取器配置,如batch size,并发加载子进程数等,同时包含读取后预处理操作,如resize、数据增强等等 +- ppyolov2_reader.yml:mainly describes the configuration of data readers, such as batch size and number of concurrent loading child processes, and also includes post preprocessing, such as resize and data augmention etc. -根据实际情况,修改上述文件,比如数据集路径、batch size等。 +Modify the preceding files, such as the dataset path and batch size etc. - + -## 4. PaddleDetection训练 +## 4. Training -PaddleDetection提供了单卡/多卡训练模式,满足用户多种训练需求 +PaddleDetection provides single-card/multi-card training mode to meet various training needs of users: -* GPU 单卡训练 +* GPU single card training ```bash -export CUDA_VISIBLE_DEVICES=0 #windows和Mac下不需要执行该命令 +export CUDA_VISIBLE_DEVICES=0 #Don't need to run this command on Windows and Mac python tools/train.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml ``` -* GPU多卡训练 +* GPU multi-card training ```bash export CUDA_VISIBLE_DEVICES=0,1,2,3 python -m paddle.distributed.launch --gpus 0,1,2,3 tools/train.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --eval ``` ---eval:表示边训练边验证 +--eval: training while verifying -* 模型恢复训练 +* Model recovery training -在日常训练过程中,有的用户由于一些原因导致训练中断,用户可以使用-r的命令恢复训练: +During the daily training, if training is interrupted due to some reasons, you can use the -r command to resume the training: ```bash export CUDA_VISIBLE_DEVICES=0,1,2,3 python -m paddle.distributed.launch --gpus 0,1,2,3 tools/train.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --eval -r output/ppyolov2_r50vd_dcn_365e_coco/10000 ``` -注意:如果遇到 "`Out of memory error`" 问题, 尝试在 `ppyolov2_reader.yml` 文件中调小`batch_size` +Note: If you encounter "`Out of memory error`" , try reducing `batch_size` in the `ppyolov2_reader.yml` file - +prediction -## 5. PaddleDetection预测 +## 5. Prediction -设置参数,使用PaddleDetection预测: +Set parameters and use PaddleDetection to predict: ```bash export CUDA_VISIBLE_DEVICES=0 python tools/infer.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --infer_img=images/paper-image.jpg --output_dir=infer_output/ --draw_threshold=0.5 -o weights=output/ppyolov2_r50vd_dcn_365e_coco/model_final --use_vdl=Ture ``` -`--draw_threshold` 是个可选参数. 根据 [NMS](https://ieeexplore.ieee.org/document/1699659) 的计算,不同阈值会产生不同的结果 `keep_top_k`表示设置输出目标的最大数量,默认值为100,用户可以根据自己的实际情况进行设定。 +`--draw_threshold` is an optional parameter. According to the calculation of [NMS](https://ieeexplore.ieee.org/document/1699659), different threshold will produce different results, ` keep_top_k ` represent the maximum amount of output target, the default value is 10. You can set different value according to your own actual situation。 - + -## 6. 预测部署 +## 6. Deployment -在layout parser中使用自己训练好的模型。 +Use your trained model in Layout Parser - + -### 6.1 模型导出 +### 6.1 Export model -在模型训练过程中保存的模型文件是包含前向预测和反向传播的过程,在实际的工业部署则不需要反向传播,因此需要将模型进行导成部署需要的模型格式。 在PaddleDetection中提供了 `tools/export_model.py`脚本来导出模型。 +n the process of model training, the model file saved contains the process of forward prediction and back propagation. In the actual industrial deployment, there is no need for back propagation. Therefore, the model should be translated into the model format required by the deployment. The `tools/export_model.py` script is provided in PaddleDetection to export the model. -导出模型名称默认是`model.*`,layout parser代码模型名称是`inference.*`, 所以修改[PaddleDetection/ppdet/engine/trainer.py ](https://github.com/PaddlePaddle/PaddleDetection/blob/b87a1ea86fa18ce69e44a17ad1b49c1326f19ff9/ppdet/engine/trainer.py#L512) (点开链接查看详细代码行),将`model`改为`inference`即可。 +The exported model name defaults to `model.*`, Layout Parser's code model is `inference.*`, So change [PaddleDetection/ppdet/engine/trainer. Py ](https://github.com/PaddlePaddle/PaddleDetection/blob/b87a1ea86fa18ce69e44a17ad1b49c1326f19ff9/ppdet/engine/trainer.py# L512) (click on the link to see the detailed line of code), change 'model' to 'inference'. -执行导出模型脚本: +Execute the script to export model: ```bash python tools/export_model.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --output_dir=./inference -o weights=output/ppyolov2_r50vd_dcn_365e_coco/model_final.pdparams ``` -预测模型会导出到`inference/ppyolov2_r50vd_dcn_365e_coco`目录下,分别为`infer_cfg.yml`(预测不需要), `inference.pdiparams`, `inference.pdiparams.info`,`inference.pdmodel` 。 +The prediction model is exported to `inference/ppyolov2_r50vd_dcn_365e_coco` ,including:`infer_cfg.yml`(prediction not required), `inference.pdiparams`, `inference.pdiparams.info`,`inference.pdmodel` -更多模型导出教程,请参考:[EXPORT_MODEL](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/deploy/EXPORT_MODEL.md) +More model export tutorials, please refer to:[EXPORT_MODEL](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/deploy/EXPORT_MODEL.md) - + -### 6.2 layout_parser预测 +### 6.2 Inference -`model_path`指定训练好的模型路径,使用layout parser进行预测: +`model_path` represent the trained model path, and layoutparser is used to predict: ```bash import layoutparser as lp @@ -198,7 +198,6 @@ model = lp.PaddleDetectionLayoutModel(model_path="inference/ppyolov2_r50vd_dcn_3 *** -更多PaddleDetection训练教程,请参考:[PaddleDetection训练](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/docs/tutorials/GETTING_STARTED_cn.md) +More PaddleDetection training tutorials,please reference:[PaddleDetection Training](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/docs/tutorials/GETTING_STARTED_cn.md) *** - diff --git a/ppstructure/layout/train_layoutparser_model_ch.md b/ppstructure/layout/train_layoutparser_model_ch.md new file mode 100644 index 0000000000000000000000000000000000000000..2f73c63adcea3f82ae579222e658291224f46237 --- /dev/null +++ b/ppstructure/layout/train_layoutparser_model_ch.md @@ -0,0 +1,203 @@ +# 训练版面分析 + +[1. 安装](#安装) + +​ [1.1 环境要求](#环境要求) + +​ [1.2 安装PaddleDetection](#安装PaddleDetection) + +[2. 准备数据](#准备数据) + +[3. 配置文件改动和说明](#配置文件改动和说明) + +[4. PaddleDetection训练](#训练) + +[5. PaddleDetection预测](#预测) + +[6. 预测部署](#预测部署) + +​ [6.1 模型导出](#模型导出) + +​ [6.2 layout parser预测](#layout_parser预测) + + + +## 1. 安装 + + + +### 1.1 环境要求 + +- PaddlePaddle 2.1 +- OS 64 bit +- Python 3(3.5.1+/3.6/3.7/3.8/3.9),64 bit +- pip/pip3(9.0.1+), 64 bit +- CUDA >= 10.1 +- cuDNN >= 7.6 + + + +### 1.2 安装PaddleDetection + +```bash +# 克隆PaddleDetection仓库 +cd +git clone https://github.com/PaddlePaddle/PaddleDetection.git + +cd PaddleDetection +# 安装其他依赖 +pip install -r requirements.txt +``` + +更多安装教程,请参考: [Install doc](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/docs/tutorials/INSTALL_cn.md) + + + +## 2. 准备数据 + +下载 [PubLayNet](https://github.com/ibm-aur-nlp/PubLayNet) 数据集: + +```bash +cd PaddleDetection/dataset/ +mkdir publaynet +# 执行命令,下载 +wget -O publaynet.tar.gz https://dax-cdn.cdn.appdomain.cloud/dax-publaynet/1.0.0/publaynet.tar.gz?_ga=2.104193024.1076900768.1622560733-649911202.1622560733 +# 解压 +tar -xvf publaynet.tar.gz +``` + +解压之后PubLayNet目录结构: + +| File or Folder | Description | num | +| :------------- | :----------------------------------------------- | ------- | +| `train/` | Images in the training subset | 335,703 | +| `val/` | Images in the validation subset | 11,245 | +| `test/` | Images in the testing subset | 11,405 | +| `train.json` | Annotations for training images | 1 | +| `val.json` | Annotations for validation images | 1 | +| `LICENSE.txt` | Plaintext version of the CDLA-Permissive license | 1 | +| `README.txt` | Text file with the file names and description | 1 | + +如果使用其它数据集,请参考[准备训练数据](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/docs/tutorials/PrepareDataSet.md) + + + +## 3. 配置文件改动和说明 + +我们使用 `configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml`配置进行训练,配置文件摘要如下: + +```bash +_BASE_: [ + '../datasets/coco_detection.yml', + '../runtime.yml', + './_base_/ppyolov2_r50vd_dcn.yml', + './_base_/optimizer_365e.yml', + './_base_/ppyolov2_reader.yml', +] + +snapshot_epoch: 8 +weights: output/ppyolov2_r50vd_dcn_365e_coco/model_final +``` +从中可以看到 `ppyolov2_r50vd_dcn_365e_coco.yml` 配置需要依赖其他的配置文件,在该例子中需要依赖: + +- coco_detection.yml:主要说明了训练数据和验证数据的路径 + +- runtime.yml:主要说明了公共的运行参数,比如是否使用GPU、每多少个epoch存储checkpoint等 + +- optimizer_365e.yml:主要说明了学习率和优化器的配置 + +- ppyolov2_r50vd_dcn.yml:主要说明模型和主干网络的情况 + +- ppyolov2_reader.yml:主要说明数据读取器配置,如batch size,并发加载子进程数等,同时包含读取后预处理操作,如resize、数据增强等等 + + +根据实际情况,修改上述文件,比如数据集路径、batch size等。 + + + +## 4. PaddleDetection训练 + +PaddleDetection提供了单卡/多卡训练模式,满足用户多种训练需求 + +* GPU 单卡训练 + +```bash +export CUDA_VISIBLE_DEVICES=0 #windows和Mac下不需要执行该命令 +python tools/train.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml +``` + +* GPU多卡训练 + +```bash +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python -m paddle.distributed.launch --gpus 0,1,2,3 tools/train.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --eval +``` + +--eval:表示边训练边验证 + +* 模型恢复训练 + +在日常训练过程中,有的用户由于一些原因导致训练中断,用户可以使用-r的命令恢复训练: + +```bash +export CUDA_VISIBLE_DEVICES=0,1,2,3 +python -m paddle.distributed.launch --gpus 0,1,2,3 tools/train.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --eval -r output/ppyolov2_r50vd_dcn_365e_coco/10000 +``` + +注意:如果遇到 "`Out of memory error`" 问题, 尝试在 `ppyolov2_reader.yml` 文件中调小`batch_size` + + + +## 5. PaddleDetection预测 + +设置参数,使用PaddleDetection预测: + +```bash +export CUDA_VISIBLE_DEVICES=0 +python tools/infer.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --infer_img=images/paper-image.jpg --output_dir=infer_output/ --draw_threshold=0.5 -o weights=output/ppyolov2_r50vd_dcn_365e_coco/model_final --use_vdl=Ture +``` + +`--draw_threshold` 是个可选参数. 根据 [NMS](https://ieeexplore.ieee.org/document/1699659) 的计算,不同阈值会产生不同的结果 `keep_top_k`表示设置输出目标的最大数量,默认值为100,用户可以根据自己的实际情况进行设定。 + + + +## 6. 预测部署 + +在layout parser中使用自己训练好的模型。 + + + +### 6.1 模型导出 + +在模型训练过程中保存的模型文件是包含前向预测和反向传播的过程,在实际的工业部署则不需要反向传播,因此需要将模型进行导成部署需要的模型格式。 在PaddleDetection中提供了 `tools/export_model.py`脚本来导出模型。 + +导出模型名称默认是`model.*`,layout parser代码模型名称是`inference.*`, 所以修改[PaddleDetection/ppdet/engine/trainer.py ](https://github.com/PaddlePaddle/PaddleDetection/blob/b87a1ea86fa18ce69e44a17ad1b49c1326f19ff9/ppdet/engine/trainer.py#L512) (点开链接查看详细代码行),将`model`改为`inference`即可。 + +执行导出模型脚本: + +```bash +python tools/export_model.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --output_dir=./inference -o weights=output/ppyolov2_r50vd_dcn_365e_coco/model_final.pdparams +``` + +预测模型会导出到`inference/ppyolov2_r50vd_dcn_365e_coco`目录下,分别为`infer_cfg.yml`(预测不需要), `inference.pdiparams`, `inference.pdiparams.info`,`inference.pdmodel` 。 + +更多模型导出教程,请参考:[EXPORT_MODEL](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/deploy/EXPORT_MODEL.md) + + + +### 6.2 layout_parser预测 + +`model_path`指定训练好的模型路径,使用layout parser进行预测: + +```bash +import layoutparser as lp +model = lp.PaddleDetectionLayoutModel(model_path="inference/ppyolov2_r50vd_dcn_365e_coco", threshold=0.5,label_map={0: "Text", 1: "Title", 2: "List", 3:"Table", 4:"Figure"},enforce_cpu=True,enable_mkldnn=True) +``` + + + +*** + +更多PaddleDetection训练教程,请参考:[PaddleDetection训练](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/docs/tutorials/GETTING_STARTED_cn.md) + +*** diff --git a/ppstructure/layout/train_layoutparser_model_en.md b/ppstructure/layout/train_layoutparser_model_en.md deleted file mode 100644 index ffe1026278704dc9f7994ee8cc514823c1515163..0000000000000000000000000000000000000000 --- a/ppstructure/layout/train_layoutparser_model_en.md +++ /dev/null @@ -1,204 +0,0 @@ -# Training layout-parse - -[1. Installation](#Installation) - -​ [1.1 Requirements](#Requirements) - -​ [1.2 Install PaddleDetection](#Install PaddleDetection) - -[2. Data preparation](#Data preparation) - -[3. Configuration](#Configuration) - -[4. Training](#Training) - -[5. Prediction](#Prediction) - -[6. Deployment](#Deployment) - -​ [6.1 Export model](#Export model) - -​ [6.2 Inference](#Inference) - - - -## 1. Installation - - - -### 1.1 Requirements - -- PaddlePaddle 2.1 -- OS 64 bit -- Python 3(3.5.1+/3.6/3.7/3.8/3.9),64 bit -- pip/pip3(9.0.1+), 64 bit -- CUDA >= 10.1 -- cuDNN >= 7.6 - - - -### 1.2 Install PaddleDetection - -```bash -# Clone PaddleDetection repository -cd -git clone https://github.com/PaddlePaddle/PaddleDetection.git - -cd PaddleDetection -# Install other dependencies -pip install -r requirements.txt -``` - -For more installation tutorials, please refer to: [Install doc](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/docs/tutorials/INSTALL_cn.md) - - - -## 2. Data preparation - -Download the [PubLayNet](https://github.com/ibm-aur-nlp/PubLayNet) dataset - -```bash -cd PaddleDetection/dataset/ -mkdir publaynet -# execute the command,download PubLayNet -wget -O publaynet.tar.gz https://dax-cdn.cdn.appdomain.cloud/dax-publaynet/1.0.0/publaynet.tar.gz?_ga=2.104193024.1076900768.1622560733-649911202.1622560733 -# unpack -tar -xvf publaynet.tar.gz -``` - -PubLayNet directory structure after decompressing : - -| File or Folder | Description | num | -| :------------- | :----------------------------------------------- | ------- | -| `train/` | Images in the training subset | 335,703 | -| `val/` | Images in the validation subset | 11,245 | -| `test/` | Images in the testing subset | 11,405 | -| `train.json` | Annotations for training images | 1 | -| `val.json` | Annotations for validation images | 1 | -| `LICENSE.txt` | Plaintext version of the CDLA-Permissive license | 1 | -| `README.txt` | Text file with the file names and description | 1 | - -For other datasets,please refer to [the PrepareDataSet]((https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/docs/tutorials/PrepareDataSet.md) ) - - - -## 3. Configuration - -We use the `configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml` configuration for training,the configuration file is as follows - -```bash -_BASE_: [ - '../datasets/coco_detection.yml', - '../runtime.yml', - './_base_/ppyolov2_r50vd_dcn.yml', - './_base_/optimizer_365e.yml', - './_base_/ppyolov2_reader.yml', -] - -snapshot_epoch: 8 -weights: output/ppyolov2_r50vd_dcn_365e_coco/model_final -``` -The `ppyolov2_r50vd_dcn_365e_coco.yml` configuration depends on other configuration files, in this case: - -- coco_detection.yml:mainly explains the path of training data and verification data - -- runtime.yml:mainly describes the common parameters, such as whether to use the GPU and how many epoch to save model etc. - -- optimizer_365e.yml:mainly explains the learning rate and optimizer configuration - -- ppyolov2_r50vd_dcn.yml:mainly describes the model and the network - -- ppyolov2_reader.yml:mainly describes the configuration of data readers, such as batch size and number of concurrent loading child processes, and also includes post preprocessing, such as resize and data augmention etc. - - -Modify the preceding files, such as the dataset path and batch size etc. - - - -## 4. Training - -PaddleDetection provides single-card/multi-card training mode to meet various training needs of users: - -* GPU single card training - -```bash -export CUDA_VISIBLE_DEVICES=0 #Don't need to run this command on Windows and Mac -python tools/train.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml -``` - -* GPU multi-card training - -```bash -export CUDA_VISIBLE_DEVICES=0,1,2,3 -python -m paddle.distributed.launch --gpus 0,1,2,3 tools/train.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --eval -``` - ---eval: training while verifying - -* Model recovery training - -During the daily training, if training is interrupted due to some reasons, you can use the -r command to resume the training: - -```bash -export CUDA_VISIBLE_DEVICES=0,1,2,3 -python -m paddle.distributed.launch --gpus 0,1,2,3 tools/train.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --eval -r output/ppyolov2_r50vd_dcn_365e_coco/10000 -``` - -Note: If you encounter "`Out of memory error`" , try reducing `batch_size` in the `ppyolov2_reader.yml` file - -prediction - -## 5. Prediction - -Set parameters and use PaddleDetection to predict: - -```bash -export CUDA_VISIBLE_DEVICES=0 -python tools/infer.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --infer_img=images/paper-image.jpg --output_dir=infer_output/ --draw_threshold=0.5 -o weights=output/ppyolov2_r50vd_dcn_365e_coco/model_final --use_vdl=Ture -``` - -`--draw_threshold` is an optional parameter. According to the calculation of [NMS](https://ieeexplore.ieee.org/document/1699659), different threshold will produce different results, ` keep_top_k ` represent the maximum amount of output target, the default value is 10. You can set different value according to your own actual situation。 - - - -## 6. Deployment - -Use your trained model in Layout Parser - - - -### 6.1 Export model - -n the process of model training, the model file saved contains the process of forward prediction and back propagation. In the actual industrial deployment, there is no need for back propagation. Therefore, the model should be translated into the model format required by the deployment. The `tools/export_model.py` script is provided in PaddleDetection to export the model. - -The exported model name defaults to `model.*`, Layout Parser's code model is `inference.*`, So change [PaddleDetection/ppdet/engine/trainer. Py ](https://github.com/PaddlePaddle/PaddleDetection/blob/b87a1ea86fa18ce69e44a17ad1b49c1326f19ff9/ppdet/engine/trainer.py# L512) (click on the link to see the detailed line of code), change 'model' to 'inference'. - -Execute the script to export model: - -```bash -python tools/export_model.py -c configs/ppyolo/ppyolov2_r50vd_dcn_365e_coco.yml --output_dir=./inference -o weights=output/ppyolov2_r50vd_dcn_365e_coco/model_final.pdparams -``` - -The prediction model is exported to `inference/ppyolov2_r50vd_dcn_365e_coco` ,including:`infer_cfg.yml`(prediction not required), `inference.pdiparams`, `inference.pdiparams.info`,`inference.pdmodel` - -More model export tutorials, please refer to:[EXPORT_MODEL](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/deploy/EXPORT_MODEL.md) - - - -### 6.2 Inference - -`model_path` represent the trained model path, and layoutparser is used to predict: - -```bash -import layoutparser as lp -model = lp.PaddleDetectionLayoutModel(model_path="inference/ppyolov2_r50vd_dcn_365e_coco", threshold=0.5,label_map={0: "Text", 1: "Title", 2: "List", 3:"Table", 4:"Figure"},enforce_cpu=True,enable_mkldnn=True) -``` - - - -*** - -More PaddleDetection training tutorials,please reference:[PaddleDetection Training](https://github.com/PaddlePaddle/PaddleDetection/blob/release/2.1/docs/tutorials/GETTING_STARTED_cn.md) - -*** - diff --git a/ppstructure/paddlestructure.py b/ppstructure/paddlestructure.py deleted file mode 100644 index d0009ae8a9be0a133e0d56734b739265034dc314..0000000000000000000000000000000000000000 --- a/ppstructure/paddlestructure.py +++ /dev/null @@ -1,147 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import logging -import os -import sys - -__dir__ = os.path.dirname(__file__) -sys.path.append(__dir__) -sys.path.append(os.path.join(__dir__, '..')) - -import cv2 -import numpy as np -from pathlib import Path - -from ppocr.utils.logging import get_logger -from ppstructure.predict_system import OCRSystem, save_res -from ppstructure.utility import init_args, draw_result - -logger = get_logger() -from ppocr.utils.utility import check_and_read_gif, get_image_file_list -from ppocr.utils.network import maybe_download, download_with_progressbar, confirm_model_dir_url, is_link - -__all__ = ['PaddleStructure', 'draw_result', 'save_res'] - -VERSION = '2.1' -BASE_DIR = os.path.expanduser("~/.paddlestructure/") - -model_urls = { - 'det': 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar', - 'rec': 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar', - 'table': 'https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar' - -} - - -def parse_args(mMain=True): - import argparse - parser = init_args() - parser.add_help = mMain - - for action in parser._actions: - if action.dest in ['rec_char_dict_path', 'table_char_dict_path']: - action.default = None - if mMain: - return parser.parse_args() - else: - inference_args_dict = {} - for action in parser._actions: - inference_args_dict[action.dest] = action.default - return argparse.Namespace(**inference_args_dict) - - -class PaddleStructure(OCRSystem): - def __init__(self, **kwargs): - params = parse_args(mMain=False) - params.__dict__.update(**kwargs) - if not params.show_log: - logger.setLevel(logging.INFO) - params.use_angle_cls = False - # init model dir - params.det_model_dir, det_url = confirm_model_dir_url(params.det_model_dir, - os.path.join(BASE_DIR, VERSION, 'det'), - model_urls['det']) - params.rec_model_dir, rec_url = confirm_model_dir_url(params.rec_model_dir, - os.path.join(BASE_DIR, VERSION, 'rec'), - model_urls['rec']) - params.table_model_dir, table_url = confirm_model_dir_url(params.table_model_dir, - os.path.join(BASE_DIR, VERSION, 'table'), - model_urls['table']) - # download model - maybe_download(params.det_model_dir, det_url) - maybe_download(params.rec_model_dir, rec_url) - maybe_download(params.table_model_dir, table_url) - - if params.rec_char_dict_path is None: - params.rec_char_type = 'EN' - if os.path.exists(str(Path(__file__).parent / 'ppocr/utils/dict/table_dict.txt')): - params.rec_char_dict_path = str(Path(__file__).parent / 'ppocr/utils/dict/table_dict.txt') - else: - params.rec_char_dict_path = str(Path(__file__).parent.parent / 'ppocr/utils/dict/table_dict.txt') - if params.table_char_dict_path is None: - if os.path.exists(str(Path(__file__).parent / 'ppocr/utils/dict/table_structure_dict.txt')): - params.table_char_dict_path = str( - Path(__file__).parent / 'ppocr/utils/dict/table_structure_dict.txt') - else: - params.table_char_dict_path = str( - Path(__file__).parent.parent / 'ppocr/utils/dict/table_structure_dict.txt') - - print(params) - super().__init__(params) - - def __call__(self, img): - if isinstance(img, str): - # download net image - if img.startswith('http'): - download_with_progressbar(img, 'tmp.jpg') - img = 'tmp.jpg' - image_file = img - img, flag = check_and_read_gif(image_file) - if not flag: - with open(image_file, 'rb') as f: - np_arr = np.frombuffer(f.read(), dtype=np.uint8) - img = cv2.imdecode(np_arr, cv2.IMREAD_COLOR) - if img is None: - logger.error("error in loading image:{}".format(image_file)) - return None - if isinstance(img, np.ndarray) and len(img.shape) == 2: - img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) - - res = super().__call__(img) - return res - - -def main(): - # for cmd - args = parse_args(mMain=True) - image_dir = args.image_dir - save_folder = args.output - if image_dir.startswith('http'): - download_with_progressbar(image_dir, 'tmp.jpg') - image_file_list = ['tmp.jpg'] - else: - image_file_list = get_image_file_list(args.image_dir) - if len(image_file_list) == 0: - logger.error('no images find in {}'.format(args.image_dir)) - return - - structure_engine = PaddleStructure(**(args.__dict__)) - for img_path in image_file_list: - img_name = os.path.basename(img_path).split('.')[0] - logger.info('{}{}{}'.format('*' * 10, img_path, '*' * 10)) - result = structure_engine(img_path) - for item in result: - logger.info(item['res']) - save_res(result, save_folder, img_name) - logger.info('result save to {}'.format(os.path.join(save_folder, img_name))) \ No newline at end of file diff --git a/ppstructure/predict_system.py b/ppstructure/predict_system.py index 60e2574515aeabaedc4f23d1589677c03543ce40..b2de3d4de80b39f046cf6cbc8a9ebbc52bf69334 100644 --- a/ppstructure/predict_system.py +++ b/ppstructure/predict_system.py @@ -26,26 +26,33 @@ import numpy as np import time import logging -import layoutparser as lp - from ppocr.utils.utility import get_image_file_list, check_and_read_gif from ppocr.utils.logging import get_logger from tools.infer.predict_system import TextSystem from ppstructure.table.predict_table import TableSystem, to_excel -from ppstructure.utility import parse_args, draw_result +from ppstructure.utility import parse_args, draw_structure_result logger = get_logger() class OCRSystem(object): def __init__(self, args): - args.det_limit_type = 'resize_long' + import layoutparser as lp + # args.det_limit_type = 'resize_long' args.drop_score = 0 if not args.show_log: logger.setLevel(logging.INFO) self.text_system = TextSystem(args) self.table_system = TableSystem(args, self.text_system.text_detector, self.text_system.text_recognizer) - self.table_layout = lp.PaddleDetectionLayoutModel("lp://PubLayNet/ppyolov2_r50vd_dcn_365e_publaynet/config", + + config_path = None + model_path = None + if os.path.isdir(args.layout_path_model): + model_path = args.layout_path_model + else: + config_path = args.layout_path_model + self.table_layout = lp.PaddleDetectionLayoutModel(config_path=config_path, + model_path=model_path, threshold=0.5, enable_mkldnn=args.enable_mkldnn, enforce_cpu=not args.use_gpu, thread_num=args.cpu_threads) self.use_angle_cls = args.use_angle_cls @@ -66,21 +73,21 @@ class OCRSystem(object): filter_boxes = [x + [x1, y1] for x in filter_boxes] filter_boxes = [x.reshape(-1).tolist() for x in filter_boxes] # remove style char - style_token = ['','','','','','','','', - '','','','','',''] + style_token = ['', '', '', '', '', '', '', '', + '', '', '', '', '', ''] filter_rec_res_tmp = [] for rec_res in filter_rec_res: rec_str, rec_conf = rec_res for token in style_token: if token in rec_str: rec_str = rec_str.replace(token, '') - filter_rec_res_tmp.append((rec_str,rec_conf)) + filter_rec_res_tmp.append((rec_str, rec_conf)) res = (filter_boxes, filter_rec_res_tmp) - res_list.append({'type': region.type, 'bbox': [x1, y1, x2, y2], 'res': res}) + res_list.append({'type': region.type, 'bbox': [x1, y1, x2, y2], 'img': roi_img, 'res': res}) return res_list -def save_res(res, save_folder, img_name): +def save_structure_res(res, save_folder, img_name): excel_save_folder = os.path.join(save_folder, img_name) os.makedirs(excel_save_folder, exist_ok=True) # save res @@ -89,6 +96,10 @@ def save_res(res, save_folder, img_name): if region['type'] == 'Table': excel_path = os.path.join(excel_save_folder, '{}.xlsx'.format(region['bbox'])) to_excel(region['res'], excel_path) + if region['type'] == 'Figure': + roi_img = region['img'] + img_path = os.path.join(excel_save_folder, '{}.jpg'.format(region['bbox'])) + cv2.imwrite(img_path, roi_img) else: for box, rec_res in zip(region['res'][0], region['res'][1]): f.write('{}\t{}\n'.format(np.array(box).reshape(-1).tolist(), rec_res)) @@ -115,8 +126,8 @@ def main(args): continue starttime = time.time() res = structure_sys(img) - save_res(res, save_folder, img_name) - draw_img = draw_result(img, res, args.vis_font_path) + save_structure_res(res, save_folder, img_name) + draw_img = draw_structure_result(img, res, args.vis_font_path) cv2.imwrite(os.path.join(save_folder, img_name, 'show.jpg'), draw_img) logger.info('result save to {}'.format(os.path.join(save_folder, img_name))) elapse = time.time() - starttime diff --git a/ppstructure/setup.py b/ppstructure/setup.py deleted file mode 100644 index c99d71fe37419b50badd3cf910fec6bb5d2cc67f..0000000000000000000000000000000000000000 --- a/ppstructure/setup.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import os - -from setuptools import setup -from io import open -import shutil - -with open('../requirements.txt', encoding="utf-8-sig") as f: - requirements = f.readlines() - requirements.append('tqdm') - - -def readme(): - with open('README_ch.md', encoding="utf-8-sig") as f: - README = f.read() - return README - - -shutil.copytree('./table', './ppstructure/table') -shutil.copyfile('./predict_system.py', './ppstructure/predict_system.py') -shutil.copyfile('./utility.py', './ppstructure/utility.py') -shutil.copytree('../ppocr', './ppocr') -shutil.copytree('../tools', './tools') -shutil.copyfile('../LICENSE', './LICENSE') - -setup( - name='paddlestructure', - packages=['paddlestructure'], - package_dir={'paddlestructure': ''}, - include_package_data=True, - entry_points={"console_scripts": ["paddlestructure= paddlestructure.paddlestructure:main"]}, - version='1.0', - install_requires=requirements, - license='Apache License 2.0', - description='Awesome OCR toolkits based on PaddlePaddle (8.6M ultra-lightweight pre-trained model, support training and deployment among server, mobile, embeded and IoT devices', - long_description=readme(), - long_description_content_type='text/markdown', - url='https://github.com/PaddlePaddle/PaddleOCR', - download_url='https://github.com/PaddlePaddle/PaddleOCR.git', - keywords=[ - 'ocr textdetection textrecognition paddleocr crnn east star-net rosetta ocrlite db chineseocr chinesetextdetection chinesetextrecognition' - ], - classifiers=[ - 'Intended Audience :: Developers', 'Operating System :: OS Independent', - 'Natural Language :: Chinese (Simplified)', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.2', - 'Programming Language :: Python :: 3.3', - 'Programming Language :: Python :: 3.4', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', 'Topic :: Utilities' - ], ) - -shutil.rmtree('ppocr') -shutil.rmtree('tools') -shutil.rmtree('ppstructure') -os.remove('LICENSE') diff --git a/ppstructure/table/README.md b/ppstructure/table/README.md index c538db275844e8eb21f405728fe09ed10c070760..a8d10b79e507ab59ef2481982a33902e4a95e73e 100644 --- a/ppstructure/table/README.md +++ b/ppstructure/table/README.md @@ -1,12 +1,12 @@ -# Table structure and content prediction +# Table Recognition ## 1. pipeline -The ocr of the table mainly contains three models +The table recognition mainly contains three models 1. Single line text detection-DB 2. Single line text recognition-CRNN 3. Table structure and cell coordinate prediction-RARE -The table ocr flow chart is as follows +The table recognition flow chart is as follows ![tableocr_pipeline](../../doc/table/tableocr_pipeline_en.jpg) @@ -15,10 +15,39 @@ The table ocr flow chart is as follows 3. The recognition result of the cell is combined by the coordinates, recognition result of the single line and the coordinates of the cell. 4. The cell recognition result and the table structure together construct the html string of the table. -## 2. How to use +## 2. Performance +We evaluated the algorithm on the PubTabNet[1] eval dataset, and the performance is as follows: -### 2.1 Train +|Method|[TEDS(Tree-Edit-Distance-based Similarity)](https://github.com/ibm-aur-nlp/PubTabNet/tree/master/src)| +| --- | --- | +| EDD[2] | 88.3 | +| Ours | 93.32 | + +## 3. How to use + +### 3.1 quick start + +```python +cd PaddleOCR/ppstructure + +# download model +mkdir inference && cd inference +# Download the detection model of the ultra-lightweight table English OCR model and unzip it +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar && tar xf en_ppocr_mobile_v2.0_table_det_infer.tar +# Download the recognition model of the ultra-lightweight table English OCR model and unzip it +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar && tar xf en_ppocr_mobile_v2.0_table_rec_infer.tar +# Download the ultra-lightweight English table inch model and unzip it +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar && tar xf en_ppocr_mobile_v2.0_table_structure_infer.tar +cd .. +# run +python3 table/predict_table.py --det_model_dir=inference/en_ppocr_mobile_v2.0_table_det_infer --rec_model_dir=inference/en_ppocr_mobile_v2.0_table_rec_infer --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer --image_dir=../doc/table/table.jpg --rec_char_dict_path=../ppocr/utils/ppocr_keys_v1.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=ch --det_limit_side_len=736 --det_limit_type=min --output ../output/table +``` +Note: The above model is trained on the PubLayNet dataset and only supports English scanning scenarios. If you need to identify other scenarios, you need to train the model yourself and replace the three fields `det_model_dir`, `rec_model_dir`, `table_model_dir`. + +After running, the excel sheet of each picture will be saved in the directory specified by the output field + +### 3.2 Train In this chapter, we only introduce the training of the table structure model, For model training of [text detection](../../doc/doc_en/detection_en.md) and [text recognition](../../doc/doc_en/recognition_en.md), please refer to the corresponding documents @@ -48,9 +77,9 @@ python3 tools/train.py -c configs/table/table_mv3.yml -o Global.checkpoints=./yo **Note**: The priority of `Global.checkpoints` is higher than that of `Global.pretrain_weights`, that is, when two parameters are specified at the same time, the model specified by `Global.checkpoints` will be loaded first. If the model path specified by `Global.checkpoints` is wrong, the one specified by `Global.pretrain_weights` will be loaded. -### 2.2 Eval +### 3.3 Eval -The table uses TEDS (Tree-Edit-Distance-based Similarity) as the evaluation metric of the model. Before the model evaluation, the three models in the pipeline need to be exported as inference models (we have provided them), and the gt for evaluation needs to be prepared. Examples of gt are as follows: +The table uses [TEDS(Tree-Edit-Distance-based Similarity)](https://github.com/ibm-aur-nlp/PubTabNet/tree/master/src) as the evaluation metric of the model. Before the model evaluation, the three models in the pipeline need to be exported as inference models (we have provided them), and the gt for evaluation needs to be prepared. Examples of gt are as follows: ```json {"PMC4289340_004_00.png": [ ["", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "
", "", "", "
", "", "", "
", "", ""], @@ -69,11 +98,19 @@ cd PaddleOCR/ppstructure python3 table/eval_table.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --gt_path=path/to/gt.json ``` +If the PubLatNet eval dataset is used, it will be output +```bash +teds: 93.32 +``` -### 2.3 Inference +### 3.4 Inference ```python cd PaddleOCR/ppstructure python3 table/predict_table.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --output ../output/table ``` -After running, the excel sheet of each picture will be saved in the directory specified by the output field \ No newline at end of file +After running, the excel sheet of each picture will be saved in the directory specified by the output field + +Reference +1. https://github.com/ibm-aur-nlp/PubTabNet +2. https://arxiv.org/pdf/1911.10683 \ No newline at end of file diff --git a/ppstructure/table/README_ch.md b/ppstructure/table/README_ch.md index 5981dab4b85d751ad26a9ba08ca4c9056d253961..2ded403c371984a447f94268d23ca1c6240cf432 100644 --- a/ppstructure/table/README_ch.md +++ b/ppstructure/table/README_ch.md @@ -1,7 +1,7 @@ -# Table OCR +# 表格识别 -## 1. Table OCR pineline -表格的ocr主要包含三个模型 +## 1. 表格识别 pipeline +表格识别主要包含三个模型 1. 单行文本检测-DB 2. 单行文本识别-CRNN 3. 表格结构和cell坐标预测-RARE @@ -17,9 +17,39 @@ 3. 由单行文字的坐标、识别结果和单元格的坐标一起组合出单元格的识别结果。 4. 单元格的识别结果和表格结构一起构造表格的html字符串。 -## 2. 使用 +## 2. 性能 +我们在 PubTabNet[1] 评估数据集上对算法进行了评估,性能如下 -### 2.1 训练 + +|算法|[TEDS(Tree-Edit-Distance-based Similarity)](https://github.com/ibm-aur-nlp/PubTabNet/tree/master/src)| +| --- | --- | +| EDD[2] | 88.3 | +| Ours | 93.32 | + +## 3. 使用 + +### 3.1 快速开始 + +```python +cd PaddleOCR/ppstructure + +# 下载模型 +mkdir inference && cd inference +# 下载超轻量级表格英文OCR模型的检测模型并解压 +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_det_infer.tar && tar xf en_ppocr_mobile_v2.0_table_det_infer.tar +# 下载超轻量级表格英文OCR模型的识别模型并解压 +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_rec_infer.tar && tar xf en_ppocr_mobile_v2.0_table_rec_infer.tar +# 下载超轻量级英文表格英寸模型并解压 +wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/table/en_ppocr_mobile_v2.0_table_structure_infer.tar && tar xf en_ppocr_mobile_v2.0_table_structure_infer.tar +cd .. +# 执行预测 +python3 table/predict_table.py --det_model_dir=inference/en_ppocr_mobile_v2.0_table_det_infer --rec_model_dir=inference/en_ppocr_mobile_v2.0_table_rec_infer --table_model_dir=inference/en_ppocr_mobile_v2.0_table_structure_infer --image_dir=../doc/table/table.jpg --rec_char_dict_path=../ppocr/utils/ppocr_keys_v1.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=ch --det_limit_side_len=736 --det_limit_type=min --output ../output/table +``` +运行完成后,每张图片的excel表格会保存到output字段指定的目录下 + +note: 上述模型是在 PubLayNet 数据集上训练的表格识别模型,仅支持英文扫描场景,如需识别其他场景需要自己训练模型后替换 `det_model_dir`,`rec_model_dir`,`table_model_dir`三个字段即可。 + +### 3.2 训练 在这一章节中,我们仅介绍表格结构模型的训练,[文字检测](../../doc/doc_ch/detection.md)和[文字识别](../../doc/doc_ch/recognition.md)的模型训练请参考对应的文档。 #### 数据准备 @@ -46,9 +76,9 @@ python3 tools/train.py -c configs/table/table_mv3.yml -o Global.checkpoints=./yo **注意**:`Global.checkpoints`的优先级高于`Global.pretrain_weights`的优先级,即同时指定两个参数时,优先加载`Global.checkpoints`指定的模型,如果`Global.checkpoints`指定的模型路径有误,会加载`Global.pretrain_weights`指定的模型。 -### 2.2 评估 +### 3.3 评估 -表格使用 TEDS(Tree-Edit-Distance-based Similarity) 作为模型的评估指标。在进行模型评估之前,需要将pipeline中的三个模型分别导出为inference模型(我们已经提供好),还需要准备评估的gt, gt示例如下: +表格使用 [TEDS(Tree-Edit-Distance-based Similarity)](https://github.com/ibm-aur-nlp/PubTabNet/tree/master/src) 作为模型的评估指标。在进行模型评估之前,需要将pipeline中的三个模型分别导出为inference模型(我们已经提供好),还需要准备评估的gt, gt示例如下: ```json {"PMC4289340_004_00.png": [ ["", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "
", "", "", "
", "", "", "
", "", ""], @@ -56,7 +86,7 @@ python3 tools/train.py -c configs/table/table_mv3.yml -o Global.checkpoints=./yo [["", "F", "e", "a", "t", "u", "r", "e", ""], ["", "G", "b", "3", " ", "+", ""], ["", "G", "b", "3", " ", "-", ""], ["", "P", "a", "t", "i", "e", "n", "t", "s", ""], ["6", "2"], ["4", "5"]] ]} ``` -json 中,key为图片名,value为对应的gt,gt是一个由四个item组成的list,每个item分别为 +json 中,key为图片名,value为对应的gt,gt是一个由三个item组成的list,每个item分别为 1. 表格结构的html字符串list 2. 每个cell的坐标 (不包括cell里文字为空的) 3. 每个cell里的文字信息 (不包括cell里文字为空的) @@ -66,11 +96,18 @@ json 中,key为图片名,value为对应的gt,gt是一个由四个item组 cd PaddleOCR/ppstructure python3 table/eval_table.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --gt_path=path/to/gt.json ``` +如使用PubLatNet评估数据集,将会输出 +```bash +teds: 93.32 +``` +### 3.4 预测 -### 2.3 预测 ```python cd PaddleOCR/ppstructure python3 table/predict_table.py --det_model_dir=path/to/det_model_dir --rec_model_dir=path/to/rec_model_dir --table_model_dir=path/to/table_model_dir --image_dir=../doc/table/1.png --rec_char_dict_path=../ppocr/utils/dict/table_dict.txt --table_char_dict_path=../ppocr/utils/dict/table_structure_dict.txt --rec_char_type=EN --det_limit_side_len=736 --det_limit_type=min --output ../output/table ``` -运行完成后,每张图片的excel表格会保存到output字段指定的目录下 + +Reference +1. https://github.com/ibm-aur-nlp/PubTabNet +2. https://arxiv.org/pdf/1911.10683 \ No newline at end of file diff --git a/ppstructure/utility.py b/ppstructure/utility.py index 29daeef4347be5b8db0f9fdeda0dd4d8864ef595..7d9fa76d0ada58e363243c114519d001de3fbf2a 100644 --- a/ppstructure/utility.py +++ b/ppstructure/utility.py @@ -27,7 +27,7 @@ def init_args(): parser.add_argument("--table_model_dir", type=str) parser.add_argument("--table_char_type", type=str, default='en') parser.add_argument("--table_char_dict_path", type=str, default="../ppocr/utils/dict/table_structure_dict.txt") - + parser.add_argument("--layout_path_model", type=str, default="lp://PubLayNet/ppyolov2_r50vd_dcn_365e_publaynet/config") return parser @@ -36,7 +36,7 @@ def parse_args(): return parser.parse_args() -def draw_result(image, result, font_path): +def draw_structure_result(image, result, font_path): if isinstance(image, np.ndarray): image = Image.fromarray(image) boxes, txts, scores = [], [], [] diff --git a/setup.py b/setup.py index a1ddbbb6d6d0c2657bb699a72bde75ef07ab3a94..7d4d871d89defcf832910c60f18b094f10ba11db 100644 --- a/setup.py +++ b/setup.py @@ -14,6 +14,7 @@ from setuptools import setup from io import open +from paddleocr import VERSION with open('requirements.txt', encoding="utf-8-sig") as f: requirements = f.readlines() @@ -32,7 +33,7 @@ setup( package_dir={'paddleocr': ''}, include_package_data=True, entry_points={"console_scripts": ["paddleocr= paddleocr.paddleocr:main"]}, - version='2.0.6', + version=VERSION, install_requires=requirements, license='Apache License 2.0', description='Awesome OCR toolkits based on PaddlePaddle (8.6M ultra-lightweight pre-trained model, support training and deployment among server, mobile, embeded and IoT devices', diff --git a/test/ocr_det_params.txt b/test/ocr_det_params.txt deleted file mode 100644 index bdfd4d4f47431bca97437963e1dc56d1b57838bb..0000000000000000000000000000000000000000 --- a/test/ocr_det_params.txt +++ /dev/null @@ -1,35 +0,0 @@ -model_name:ocr_det -python:python3.7 -gpu_list:0|0,1 -Global.auto_cast:null -Global.epoch_num:10 -Global.save_model_dir:./output/ -Train.loader.batch_size_per_card: -Global.use_gpu: -Global.pretrained_model:null - -trainer:norm|pact -norm_train:tools/train.py -c configs/det/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained -quant_train:deploy/slim/quantization/quant.py -c configs/det/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy -fpgm_train:null -distill_train:null - -eval:tools/eval.py -c configs/det/det_mv3_db.yml -o - -Global.save_inference_dir:./output/ -Global.pretrained_model: -norm_export:tools/export_model.py -c configs/det/det_mv3_db.yml -o -quant_export:deploy/slim/quantization/export_model.py -c configs/det/det_mv3_db.yml -o -fpgm_export:deploy/slim/prune/export_prune_model.py -distill_export:null - -inference:tools/infer/predict_det.py ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:True|False ---precision:fp32|fp16|int8 ---det_model_dir:./inference/ch_ppocr_mobile_v2.0_det_infer/ ---image_dir:./inference/ch_det_data_50/all-sum-510/ ---save_log_path:./test/output/ diff --git a/test/ocr_rec_params.txt b/test/ocr_rec_params.txt deleted file mode 100644 index 6ce081ec0523e86ee22c192cde5e631ebe1f63b0..0000000000000000000000000000000000000000 --- a/test/ocr_rec_params.txt +++ /dev/null @@ -1,35 +0,0 @@ -model_name:ocr_rec -python:python -gpu_list:0|0,1 -Global.auto_cast:null -Global.epoch_num:10 -Global.save_model_dir:./output/ -Train.loader.batch_size_per_card: -Global.use_gpu: -Global.pretrained_model:null - -trainer:norm|pact -norm_train:tools/train.py -c configs/rec/rec_mv3_none_bilstm_ctc.yml -quant_train:deploy/slim/quantization/quant.py -c configs/rec/rec_mv3_none_bilstm_ctc.yml -fpgm_train:null -distill_train:null - -eval:tools/eval.py -c configs/rec/rec_mv3_none_bilstm_ctc.yml -o - -Global.save_inference_dir:./output/ -Global.pretrained_model: -norm_export:tools/export_model.py -c configs/rec/rec_mv3_none_bilstm_ctc.yml -o -quant_export:deploy/slim/quantization/export_model.py -c configs/rec/rec_mv3_none_bilstm_ctc.yml -o -fpgm_export:null -distill_export:null - -inference:tools/infer/predict_rec.py ---use_gpu:True|False ---enable_mkldnn:True|False ---cpu_threads:1|6 ---rec_batch_num:1 ---use_tensorrt:True|False ---precision:fp32|fp16|int8 ---rec_model_dir:./inference/ch_ppocr_mobile_v2.0_rec_infer/ ---image_dir:./inference/rec_inference ---save_log_path:./test/output/ \ No newline at end of file diff --git a/test/prepare.sh b/test/prepare.sh deleted file mode 100644 index f6941b9ced8eb3b2fc6dda2a7ac76d025f7a18e1..0000000000000000000000000000000000000000 --- a/test/prepare.sh +++ /dev/null @@ -1,146 +0,0 @@ -#!/bin/bash -FILENAME=$1 -# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer'] -MODE=$2 - -dataline=$(cat ${FILENAME}) - -# parser params -IFS=$'\n' -lines=(${dataline}) -function func_parser_key(){ - strs=$1 - IFS=":" - array=(${strs}) - tmp=${array[0]} - echo ${tmp} -} -function func_parser_value(){ - strs=$1 - IFS=":" - array=(${strs}) - tmp=${array[1]} - echo ${tmp} -} -IFS=$'\n' -# The training params -model_name=$(func_parser_value "${lines[0]}") -train_model_list=$(func_parser_value "${lines[0]}") - -trainer_list=$(func_parser_value "${lines[10]}") - -# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer'] -MODE=$2 -# prepare pretrained weights and dataset -if [ ${train_model_list[*]} = "ocr_det" ]; then - wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams - wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar - cd pretrain_models && tar xf det_mv3_db_v2.0_train.tar && cd ../ - fi -if [ ${MODE} = "lite_train_infer" ];then - # pretrain lite train data - rm -rf ./train_data/icdar2015 - wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar - wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ic15_data.tar # todo change to bcebos - - cd ./train_data/ && tar xf icdar2015_lite.tar && tar xf ic15_data.tar - ln -s ./icdar2015_lite ./icdar2015 - cd ../ - epoch=10 - eval_batch_step=10 -elif [ ${MODE} = "whole_train_infer" ];then - rm -rf ./train_data/icdar2015 - wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar - wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ic15_data.tar - cd ./train_data/ && tar xf icdar2015.tar && tar xf ic15_data.tar && cd ../ - epoch=500 - eval_batch_step=200 -elif [ ${MODE} = "whole_infer" ];then - rm -rf ./train_data/icdar2015 - wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_infer.tar - wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ic15_data.tar - cd ./train_data/ && tar xf icdar2015_infer.tar && tar xf ic15_data.tar - ln -s ./icdar2015_infer ./icdar2015 - cd ../ - epoch=10 - eval_batch_step=10 -else - rm -rf ./train_data/icdar2015 - wget -nc -P ./train_data https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar - if [ ${model_name} = "ocr_det" ]; then - eval_model_name="ch_ppocr_mobile_v2.0_det_infer" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar - cd ./inference && tar xf ${eval_model_name}.tar && cd ../ - else - eval_model_name="ch_ppocr_mobile_v2.0_rec_train" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_train.tar - cd ./inference && tar xf ${eval_model_name}.tar && cd ../ - fi -fi - - -IFS='|' -for train_model in ${train_model_list[*]}; do - if [ ${train_model} = "ocr_det" ];then - model_name="ocr_det" - yml_file="configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar - cd ./inference && tar xf ch_det_data_50.tar && cd ../ - img_dir="./inference/ch_det_data_50/all-sum-510" - data_dir=./inference/ch_det_data_50/ - data_label_file=[./inference/ch_det_data_50/test_gt_50.txt] - elif [ ${train_model} = "ocr_rec" ];then - model_name="ocr_rec" - yml_file="configs/rec/rec_mv3_none_bilstm_ctc.yml" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/rec_inference.tar - cd ./inference && tar xf rec_inference.tar && cd ../ - img_dir="./inference/rec_inference/" - data_dir=./inference/rec_inference - data_label_file=[./inference/rec_inference/rec_gt_test.txt] - fi - - # eval - for slim_trainer in ${trainer_list[*]}; do - if [ ${slim_trainer} = "norm" ]; then - if [ ${model_name} = "det" ]; then - eval_model_name="ch_ppocr_mobile_v2.0_det_train" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar - cd ./inference && tar xf ${eval_model_name}.tar && cd ../ - else - eval_model_name="ch_ppocr_mobile_v2.0_rec_train" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_train.tar - cd ./inference && tar xf ${eval_model_name}.tar && cd ../ - fi - elif [ ${slim_trainer} = "pact" ]; then - if [ ${model_name} = "det" ]; then - eval_model_name="ch_ppocr_mobile_v2.0_det_quant_train" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_quant_train.tar - cd ./inference && tar xf ${eval_model_name}.tar && cd ../ - else - eval_model_name="ch_ppocr_mobile_v2.0_rec_quant_train" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_quant_train.tar - cd ./inference && tar xf ${eval_model_name}.tar && cd ../ - fi - elif [ ${slim_trainer} = "distill" ]; then - if [ ${model_name} = "det" ]; then - eval_model_name="ch_ppocr_mobile_v2.0_det_distill_train" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_distill_train.tar - cd ./inference && tar xf ${eval_model_name}.tar && cd ../ - else - eval_model_name="ch_ppocr_mobile_v2.0_rec_distill_train" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_distill_train.tar - cd ./inference && tar xf ${eval_model_name}.tar && cd ../ - fi - elif [ ${slim_trainer} = "fpgm" ]; then - if [ ${model_name} = "det" ]; then - eval_model_name="ch_ppocr_mobile_v2.0_det_prune_train" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_train.tar - cd ./inference && tar xf ${eval_model_name}.tar && cd ../ - else - eval_model_name="ch_ppocr_mobile_v2.0_rec_prune_train" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_prune_train.tar - cd ./inference && tar xf ${eval_model_name}.tar && cd ../ - fi - fi - done -done diff --git a/test/test.sh b/test/test.sh deleted file mode 100644 index f2ac3f8b29af1be08e8eb5b836133dc53ad3d5b2..0000000000000000000000000000000000000000 --- a/test/test.sh +++ /dev/null @@ -1,237 +0,0 @@ -#!/bin/bash -FILENAME=$1 -# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer'] -MODE=$2 - -dataline=$(cat ${FILENAME}) - -# parser params -IFS=$'\n' -lines=(${dataline}) -function func_parser_key(){ - strs=$1 - IFS=":" - array=(${strs}) - tmp=${array[0]} - echo ${tmp} -} -function func_parser_value(){ - strs=$1 - IFS=":" - array=(${strs}) - tmp=${array[1]} - echo ${tmp} -} -function status_check(){ - last_status=$1 # the exit code - run_command=$2 - run_log=$3 - if [ $last_status -eq 0 ]; then - echo -e "\033[33m Run successfully with command - ${run_command}! \033[0m" | tee -a ${run_log} - else - echo -e "\033[33m Run failed with command - ${run_command}! \033[0m" | tee -a ${run_log} - fi -} - -IFS=$'\n' -# The training params -model_name=$(func_parser_value "${lines[0]}") -python=$(func_parser_value "${lines[1]}") -gpu_list=$(func_parser_value "${lines[2]}") -autocast_list=$(func_parser_value "${lines[3]}") -autocast_key=$(func_parser_key "${lines[3]}") -epoch_key=$(func_parser_key "${lines[4]}") -epoch_num=$(func_parser_value "${lines[4]}") -save_model_key=$(func_parser_key "${lines[5]}") -train_batch_key=$(func_parser_key "${lines[6]}") -train_use_gpu_key=$(func_parser_key "${lines[7]}") -pretrain_model_key=$(func_parser_key "${lines[8]}") -pretrain_model_value=$(func_parser_value "${lines[8]}") - -trainer_list=$(func_parser_value "${lines[9]}") -norm_trainer=$(func_parser_value "${lines[10]}") -pact_trainer=$(func_parser_value "${lines[11]}") -fpgm_trainer=$(func_parser_value "${lines[12]}") -distill_trainer=$(func_parser_value "${lines[13]}") - -eval_py=$(func_parser_value "${lines[14]}") - -save_infer_key=$(func_parser_key "${lines[15]}") -export_weight=$(func_parser_key "${lines[16]}") -norm_export=$(func_parser_value "${lines[17]}") -pact_export=$(func_parser_value "${lines[18]}") -fpgm_export=$(func_parser_value "${lines[19]}") -distill_export=$(func_parser_value "${lines[20]}") - -inference_py=$(func_parser_value "${lines[21]}") -use_gpu_key=$(func_parser_key "${lines[22]}") -use_gpu_list=$(func_parser_value "${lines[22]}") -use_mkldnn_key=$(func_parser_key "${lines[23]}") -use_mkldnn_list=$(func_parser_value "${lines[23]}") -cpu_threads_key=$(func_parser_key "${lines[24]}") -cpu_threads_list=$(func_parser_value "${lines[24]}") -batch_size_key=$(func_parser_key "${lines[25]}") -batch_size_list=$(func_parser_value "${lines[25]}") -use_trt_key=$(func_parser_key "${lines[26]}") -use_trt_list=$(func_parser_value "${lines[26]}") -precision_key=$(func_parser_key "${lines[27]}") -precision_list=$(func_parser_value "${lines[27]}") -infer_model_key=$(func_parser_key "${lines[28]}") -infer_model=$(func_parser_value "${lines[28]}") -image_dir_key=$(func_parser_key "${lines[29]}") -infer_img_dir=$(func_parser_value "${lines[29]}") -save_log_key=$(func_parser_key "${lines[30]}") - -LOG_PATH="./test/output" -mkdir -p ${LOG_PATH} -status_log="${LOG_PATH}/results.log" - - -function func_inference(){ - IFS='|' - _python=$1 - _script=$2 - _model_dir=$3 - _log_path=$4 - _img_dir=$5 - - # inference - for use_gpu in ${use_gpu_list[*]}; do - if [ ${use_gpu} = "False" ]; then - for use_mkldnn in ${use_mkldnn_list[*]}; do - for threads in ${cpu_threads_list[*]}; do - for batch_size in ${batch_size_list[*]}; do - _save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}.log" - command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${cpu_threads_key}=${threads} ${infer_model_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True" - eval $command - status_check $? "${command}" "${status_log}" - done - done - done - else - for use_trt in ${use_trt_list[*]}; do - for precision in ${precision_list[*]}; do - if [ ${use_trt} = "False" ] && [ ${precision} != "fp32" ]; then - continue - fi - for batch_size in ${batch_size_list[*]}; do - _save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" - command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_trt_key}=${use_trt} ${precision_key}=${precision} ${infer_model_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True" - eval $command - status_check $? "${command}" "${status_log}" - done - done - done - fi - done -} - -if [ ${MODE} != "infer" ]; then - -IFS="|" -for gpu in ${gpu_list[*]}; do - use_gpu=True - if [ ${gpu} = "-1" ];then - use_gpu=False - env="" - elif [ ${#gpu} -le 1 ];then - env="export CUDA_VISIBLE_DEVICES=${gpu}" - eval ${env} - elif [ ${#gpu} -le 15 ];then - IFS="," - array=(${gpu}) - env="export CUDA_VISIBLE_DEVICES=${array[0]}" - IFS="|" - else - IFS=";" - array=(${gpu}) - ips=${array[0]} - gpu=${array[1]} - IFS="|" - env=" " - fi - for autocast in ${autocast_list[*]}; do - for trainer in ${trainer_list[*]}; do - if [ ${trainer} = "pact" ]; then - run_train=${pact_trainer} - run_export=${pact_export} - elif [ ${trainer} = "fpgm" ]; then - run_train=${fpgm_trainer} - run_export=${fpgm_export} - elif [ ${trainer} = "distill" ]; then - run_train=${distill_trainer} - run_export=${distill_export} - else - run_train=${norm_trainer} - run_export=${norm_export} - fi - - if [ ${run_train} = "null" ]; then - continue - fi - if [ ${run_export} = "null" ]; then - continue - fi - - # not set autocast when autocast is null - if [ ${autocast} = "null" ]; then - set_autocast=" " - else - set_autocast="${autocast_key}=${autocast}" - fi - # not set epoch when whole_train_infer - if [ ${MODE} != "whole_train_infer" ]; then - set_epoch="${epoch_key}=${epoch_num}" - else - set_epoch=" " - fi - # set pretrain - if [ ${pretrain_model_value} != "null" ]; then - set_pretrain="${pretrain_model_key}=${pretrain_model_value}" - else - set_pretrain=" " - fi - - save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}" - if [ ${#gpu} -le 2 ];then # train with cpu or single gpu - cmd="${python} ${run_train} ${train_use_gpu_key}=${use_gpu} ${save_model_key}=${save_log} ${set_epoch} ${set_pretrain} ${set_autocast}" - elif [ ${#gpu} -le 15 ];then # train with multi-gpu - cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${save_model_key}=${save_log} ${set_epoch} ${set_pretrain} ${set_autocast}" - else # train with multi-machine - cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${save_model_key}=${save_log} ${set_pretrain} ${set_epoch} ${set_autocast}" - fi - # run train - eval $cmd - status_check $? "${cmd}" "${status_log}" - - # run eval - eval_cmd="${python} ${eval_py} ${save_model_key}=${save_log} ${pretrain_model_key}=${save_log}/latest" - eval $eval_cmd - status_check $? "${eval_cmd}" "${status_log}" - - # run export model - save_infer_path="${save_log}" - export_cmd="${python} ${run_export} ${save_model_key}=${save_log} ${export_weight}=${save_log}/latest ${save_infer_key}=${save_infer_path}" - eval $export_cmd - status_check $? "${export_cmd}" "${status_log}" - - #run inference - eval $env - save_infer_path="${save_log}" - func_inference "${python}" "${inference_py}" "${save_infer_path}" "${LOG_PATH}" "${infer_img_dir}" - eval "unset CUDA_VISIBLE_DEVICES" - done - done -done - -else - GPUID=$3 - if [ ${#GPUID} -le 0 ];then - env=" " - else - env="export CUDA_VISIBLE_DEVICES=${GPUID}" - fi - echo $env - #run inference - func_inference "${python}" "${inference_py}" "${infer_model}" "${LOG_PATH}" "${infer_img_dir}" -fi diff --git a/tests/ocr_det_params.txt b/tests/ocr_det_params.txt new file mode 100644 index 0000000000000000000000000000000000000000..6aff66c6aa8591c9f48c81cf857809f956a3cda2 --- /dev/null +++ b/tests/ocr_det_params.txt @@ -0,0 +1,52 @@ +===========================train_params=========================== +model_name:ocr_det +python:python3.7 +gpu_list:0|0,1 +Global.use_gpu:True|True +Global.auto_cast:null +Global.epoch_num:lite_train_infer=2|whole_train_infer=300 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_infer=2|whole_train_infer=4 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./train_data/icdar2015/text_localization/ch4_test_images/ +null:null +## +trainer:norm_train|pact_train +norm_train:tools/train.py -c configs/det/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained +pact_train:deploy/slim/quantization/quant.py -c configs/det/det_mv3_db.yml -o +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c configs/det/det_mv3_db.yml -o +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.pretrained_model: +norm_export:tools/export_model.py -c configs/det/det_mv3_db.yml -o +quant_export:deploy/slim/quantization/export_model.py -c configs/det/det_mv3_db.yml -o +fpgm_export:deploy/slim/prune/export_prune_model.py +distill_export:null +export1:null +export2:null +## +infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/ +infer_export:null +infer_quant:False +inference:tools/infer/predict_det.py +--use_gpu:True|False +--enable_mkldnn:True|False +--cpu_threads:1|6 +--rec_batch_num:1 +--use_tensorrt:False|True +--precision:fp32|fp16|int8 +--det_model_dir: +--image_dir:./inference/ch_det_data_50/all-sum-510/ +--save_log_path:null +--benchmark:True +null:null + diff --git a/tests/ocr_rec_params.txt b/tests/ocr_rec_params.txt new file mode 100644 index 0000000000000000000000000000000000000000..71d12f90b3bda128c3f6047c6740911dac417954 --- /dev/null +++ b/tests/ocr_rec_params.txt @@ -0,0 +1,51 @@ +===========================train_params=========================== +model_name:ocr_rec +python:python3.7 +gpu_list:0|2,3 +Global.use_gpu:True|True +Global.auto_cast:null +Global.epoch_num:lite_train_infer=2|whole_train_infer=300 +Global.save_model_dir:./output/ +Train.loader.batch_size_per_card:lite_train_infer=128|whole_train_infer=128 +Global.pretrained_model:null +train_model_name:latest +train_infer_img_dir:./train_data/ic15_data/train +null:null +## +trainer:norm_train|pact_train +norm_train:tools/train.py -c configs/rec/rec_icdar15_train.yml -o +pact_train:deploy/slim/quantization/quant.py -c configs/rec/rec_icdar15_train.yml -o +fpgm_train:null +distill_train:null +null:null +null:null +## +===========================eval_params=========================== +eval:tools/eval.py -c configs/rec/rec_icdar15_train.yml -o +null:null +## +===========================infer_params=========================== +Global.save_inference_dir:./output/ +Global.pretrained_model: +norm_export:tools/export_model.py -c configs/rec/rec_icdar15_train.yml -o +quant_export:deploy/slim/quantization/export_model.py -c configs/rec/rec_icdar15_train.yml -o +fpgm_export:null +distill_export:null +export1:null +export2:null +## +infer_model:./inference/ch_ppocr_mobile_v2.0_rec_infer/ +infer_export:null +infer_quant:False +inference:tools/infer/predict_rec.py +--use_gpu:True|False +--enable_mkldnn:True|False +--cpu_threads:1|6 +--rec_batch_num:1 +--use_tensorrt:True|False +--precision:fp32|fp16|int8 +--rec_model_dir: +--image_dir:./inference/rec_inference +--save_log_path:./test/output/ +--benchmark:True +null:null diff --git a/tests/prepare.sh b/tests/prepare.sh new file mode 100644 index 0000000000000000000000000000000000000000..d27a051cb0a7effc50305db8e2268ad36492d6cb --- /dev/null +++ b/tests/prepare.sh @@ -0,0 +1,76 @@ +#!/bin/bash +FILENAME=$1 +# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer'] +MODE=$2 + +dataline=$(cat ${FILENAME}) + +# parser params +IFS=$'\n' +lines=(${dataline}) +function func_parser_key(){ + strs=$1 + IFS=":" + array=(${strs}) + tmp=${array[0]} + echo ${tmp} +} +function func_parser_value(){ + strs=$1 + IFS=":" + array=(${strs}) + tmp=${array[1]} + echo ${tmp} +} +IFS=$'\n' +# The training params +model_name=$(func_parser_value "${lines[1]}") + +trainer_list=$(func_parser_value "${lines[14]}") + +# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer'] +MODE=$2 + +if [ ${MODE} = "lite_train_infer" ];then + # pretrain lite train data + wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams + rm -rf ./train_data/icdar2015 + rm -rf ./train_data/ic15_data + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ic15_data.tar # todo change to bcebos + + cd ./train_data/ && tar xf icdar2015_lite.tar && tar xf ic15_data.tar + ln -s ./icdar2015_lite ./icdar2015 + cd ../ +elif [ ${MODE} = "whole_train_infer" ];then + wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams + rm -rf ./train_data/icdar2015 + rm -rf ./train_data/ic15_data + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ic15_data.tar + cd ./train_data/ && tar xf icdar2015.tar && tar xf ic15_data.tar && cd ../ +elif [ ${MODE} = "whole_infer" ];then + wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams + rm -rf ./train_data/icdar2015 + rm -rf ./train_data/ic15_data + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_infer.tar + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ic15_data.tar + cd ./train_data/ && tar xf icdar2015_infer.tar && tar xf ic15_data.tar + ln -s ./icdar2015_infer ./icdar2015 + cd ../ +else + if [ ${model_name} = "ocr_det" ]; then + eval_model_name="ch_ppocr_mobile_v2.0_det_infer" + rm -rf ./train_data/icdar2015 + wget -nc -P ./train_data https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar + cd ./inference && tar xf ${eval_model_name}.tar && tar xf ch_det_data_50.tar && cd ../ + else + rm -rf ./train_data/ic15_data + eval_model_name="ch_ppocr_mobile_v2.0_rec_infer" + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ic15_data.tar + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar + cd ./inference && tar xf ${eval_model_name}.tar && tar xf ic15_data.tar && cd ../ + fi +fi + diff --git a/tests/test.sh b/tests/test.sh new file mode 100644 index 0000000000000000000000000000000000000000..9888e0faabb13b00acdf41ad154ba0a0e7ec2b63 --- /dev/null +++ b/tests/test.sh @@ -0,0 +1,365 @@ +#!/bin/bash +FILENAME=$1 +# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer'] +MODE=$2 + +dataline=$(cat ${FILENAME}) + +# parser params +IFS=$'\n' +lines=(${dataline}) + +function func_parser_key(){ + strs=$1 + IFS=":" + array=(${strs}) + tmp=${array[0]} + echo ${tmp} +} +function func_parser_value(){ + strs=$1 + IFS=":" + array=(${strs}) + tmp=${array[1]} + echo ${tmp} +} +function func_set_params(){ + key=$1 + value=$2 + if [ ${key} = "null" ];then + echo " " + elif [[ ${value} = "null" ]] || [[ ${value} = " " ]] || [ ${#value} -le 0 ];then + echo " " + else + echo "${key}=${value}" + fi +} +function func_parser_params(){ + strs=$1 + IFS=":" + array=(${strs}) + key=${array[0]} + tmp=${array[1]} + IFS="|" + res="" + for _params in ${tmp[*]}; do + IFS="=" + array=(${_params}) + mode=${array[0]} + value=${array[1]} + if [[ ${mode} = ${MODE} ]]; then + IFS="|" + #echo $(func_set_params "${mode}" "${value}") + echo $value + break + fi + IFS="|" + done + echo ${res} +} +function status_check(){ + last_status=$1 # the exit code + run_command=$2 + run_log=$3 + if [ $last_status -eq 0 ]; then + echo -e "\033[33m Run successfully with command - ${run_command}! \033[0m" | tee -a ${run_log} + else + echo -e "\033[33m Run failed with command - ${run_command}! \033[0m" | tee -a ${run_log} + fi +} + +IFS=$'\n' +# The training params +model_name=$(func_parser_value "${lines[1]}") +python=$(func_parser_value "${lines[2]}") +gpu_list=$(func_parser_value "${lines[3]}") +train_use_gpu_key=$(func_parser_key "${lines[4]}") +train_use_gpu_value=$(func_parser_value "${lines[4]}") +autocast_list=$(func_parser_value "${lines[5]}") +autocast_key=$(func_parser_key "${lines[5]}") +epoch_key=$(func_parser_key "${lines[6]}") +epoch_num=$(func_parser_params "${lines[6]}") +save_model_key=$(func_parser_key "${lines[7]}") +train_batch_key=$(func_parser_key "${lines[8]}") +train_batch_value=$(func_parser_params "${lines[8]}") +pretrain_model_key=$(func_parser_key "${lines[9]}") +pretrain_model_value=$(func_parser_value "${lines[9]}") +train_model_name=$(func_parser_value "${lines[10]}") +train_infer_img_dir=$(func_parser_value "${lines[11]}") +train_param_key1=$(func_parser_key "${lines[12]}") +train_param_value1=$(func_parser_value "${lines[12]}") + +trainer_list=$(func_parser_value "${lines[14]}") +trainer_norm=$(func_parser_key "${lines[15]}") +norm_trainer=$(func_parser_value "${lines[15]}") +pact_key=$(func_parser_key "${lines[16]}") +pact_trainer=$(func_parser_value "${lines[16]}") +fpgm_key=$(func_parser_key "${lines[17]}") +fpgm_trainer=$(func_parser_value "${lines[17]}") +distill_key=$(func_parser_key "${lines[18]}") +distill_trainer=$(func_parser_value "${lines[18]}") +trainer_key1=$(func_parser_key "${lines[19]}") +trainer_value1=$(func_parser_value "${lines[19]}") +trainer_key2=$(func_parser_key "${lines[20]}") +trainer_value2=$(func_parser_value "${lines[20]}") + +eval_py=$(func_parser_value "${lines[23]}") +eval_key1=$(func_parser_key "${lines[24]}") +eval_value1=$(func_parser_value "${lines[24]}") + +save_infer_key=$(func_parser_key "${lines[27]}") +export_weight=$(func_parser_key "${lines[28]}") +norm_export=$(func_parser_value "${lines[29]}") +pact_export=$(func_parser_value "${lines[30]}") +fpgm_export=$(func_parser_value "${lines[31]}") +distill_export=$(func_parser_value "${lines[32]}") +export_key1=$(func_parser_key "${lines[33]}") +export_value1=$(func_parser_value "${lines[33]}") +export_key2=$(func_parser_key "${lines[34]}") +export_value2=$(func_parser_value "${lines[34]}") + +# parser inference model +infer_model_dir_list=$(func_parser_value "${lines[36]}") +infer_export_list=$(func_parser_value "${lines[37]}") +infer_is_quant=$(func_parser_value "${lines[38]}") +# parser inference +inference_py=$(func_parser_value "${lines[39]}") +use_gpu_key=$(func_parser_key "${lines[40]}") +use_gpu_list=$(func_parser_value "${lines[40]}") +use_mkldnn_key=$(func_parser_key "${lines[41]}") +use_mkldnn_list=$(func_parser_value "${lines[41]}") +cpu_threads_key=$(func_parser_key "${lines[42]}") +cpu_threads_list=$(func_parser_value "${lines[42]}") +batch_size_key=$(func_parser_key "${lines[43]}") +batch_size_list=$(func_parser_value "${lines[43]}") +use_trt_key=$(func_parser_key "${lines[44]}") +use_trt_list=$(func_parser_value "${lines[44]}") +precision_key=$(func_parser_key "${lines[45]}") +precision_list=$(func_parser_value "${lines[45]}") +infer_model_key=$(func_parser_key "${lines[46]}") +image_dir_key=$(func_parser_key "${lines[47]}") +infer_img_dir=$(func_parser_value "${lines[47]}") +save_log_key=$(func_parser_key "${lines[48]}") +benchmark_key=$(func_parser_key "${lines[49]}") +benchmark_value=$(func_parser_value "${lines[49]}") +infer_key1=$(func_parser_key "${lines[50]}") +infer_value1=$(func_parser_value "${lines[50]}") + +LOG_PATH="./tests/output" +mkdir -p ${LOG_PATH} +status_log="${LOG_PATH}/results.log" + + +function func_inference(){ + IFS='|' + _python=$1 + _script=$2 + _model_dir=$3 + _log_path=$4 + _img_dir=$5 + _flag_quant=$6 + # inference + for use_gpu in ${use_gpu_list[*]}; do + if [ ${use_gpu} = "False" ] || [ ${use_gpu} = "cpu" ]; then + for use_mkldnn in ${use_mkldnn_list[*]}; do + if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then + continue + fi + for threads in ${cpu_threads_list[*]}; do + for batch_size in ${batch_size_list[*]}; do + _save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}.log" + set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") + set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") + set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") + set_cpu_threads=$(func_set_params "${cpu_threads_key}" "${threads}") + set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") + set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${set_cpu_threads} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 " + eval $command + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${command}" "${status_log}" + done + done + done + elif [ ${use_gpu} = "True" ] || [ ${use_gpu} = "gpu" ]; then + for use_trt in ${use_trt_list[*]}; do + for precision in ${precision_list[*]}; do + if [[ ${_flag_quant} = "False" ]] && [[ ${precision} =~ "int8" ]]; then + continue + fi + if [[ ${precision} =~ "fp16" || ${precision} =~ "int8" ]] && [ ${use_trt} = "False" ]; then + continue + fi + if [[ ${use_trt} = "False" || ${precision} =~ "int8" ]] && [ ${_flag_quant} = "True" ]; then + continue + fi + for batch_size in ${batch_size_list[*]}; do + _save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log" + set_infer_data=$(func_set_params "${image_dir_key}" "${_img_dir}") + set_benchmark=$(func_set_params "${benchmark_key}" "${benchmark_value}") + set_batchsize=$(func_set_params "${batch_size_key}" "${batch_size}") + set_tensorrt=$(func_set_params "${use_trt_key}" "${use_trt}") + set_precision=$(func_set_params "${precision_key}" "${precision}") + set_model_dir=$(func_set_params "${infer_model_key}" "${_model_dir}") + set_infer_params1=$(func_set_params "${infer_key1}" "${infer_value1}") + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${set_tensorrt} ${set_precision} ${set_model_dir} ${set_batchsize} ${set_infer_data} ${set_benchmark} ${set_infer_params1} > ${_save_log_path} 2>&1 " + eval $command + last_status=${PIPESTATUS[0]} + eval "cat ${_save_log_path}" + status_check $last_status "${command}" "${status_log}" + + done + done + done + else + echo "Does not support hardware other than CPU and GPU Currently!" + fi + done +} + +if [ ${MODE} = "infer" ]; then + GPUID=$3 + if [ ${#GPUID} -le 0 ];then + env=" " + else + env="export CUDA_VISIBLE_DEVICES=${GPUID}" + fi + # set CUDA_VISIBLE_DEVICES + eval $env + export Count=0 + IFS="|" + infer_run_exports=(${infer_export_list}) + infer_quant_flag=(${infer_is_quant}) + for infer_model in ${infer_model_dir_list[*]}; do + # run export + if [ ${infer_run_exports[Count]} != "null" ];then + save_infer_dir=$(dirname $infer_model) + set_export_weight=$(func_set_params "${export_weight}" "${infer_model}") + set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_dir}") + export_cmd="${python} ${norm_export} ${set_export_weight} ${set_save_infer_key}" + eval $export_cmd + status_export=$? + if [ ${status_export} = 0 ];then + status_check $status_export "${export_cmd}" "${status_log}" + fi + else + save_infer_dir=${infer_model} + fi + #run inference + is_quant=${infer_quant_flag[Count]} + func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant} + Count=$(($Count + 1)) + done + +else + IFS="|" + export Count=0 + USE_GPU_KEY=(${train_use_gpu_value}) + for gpu in ${gpu_list[*]}; do + use_gpu=${USE_GPU_KEY[Count]} + Count=$(($Count + 1)) + if [ ${gpu} = "-1" ];then + env="" + elif [ ${#gpu} -le 1 ];then + env="export CUDA_VISIBLE_DEVICES=${gpu}" + eval ${env} + elif [ ${#gpu} -le 15 ];then + IFS="," + array=(${gpu}) + env="export CUDA_VISIBLE_DEVICES=${array[0]}" + IFS="|" + else + IFS=";" + array=(${gpu}) + ips=${array[0]} + gpu=${array[1]} + IFS="|" + env=" " + fi + for autocast in ${autocast_list[*]}; do + for trainer in ${trainer_list[*]}; do + flag_quant=False + if [ ${trainer} = ${pact_key} ]; then + run_train=${pact_trainer} + run_export=${pact_export} + flag_quant=True + elif [ ${trainer} = "${fpgm_key}" ]; then + run_train=${fpgm_trainer} + run_export=${fpgm_export} + elif [ ${trainer} = "${distill_key}" ]; then + run_train=${distill_trainer} + run_export=${distill_export} + elif [ ${trainer} = ${trainer_key1} ]; then + run_train=${trainer_value1} + run_export=${export_value1} + elif [[ ${trainer} = ${trainer_key2} ]]; then + run_train=${trainer_value2} + run_export=${export_value2} + else + run_train=${norm_trainer} + run_export=${norm_export} + fi + + if [ ${run_train} = "null" ]; then + continue + fi + + set_autocast=$(func_set_params "${autocast_key}" "${autocast}") + set_epoch=$(func_set_params "${epoch_key}" "${epoch_num}") + set_pretrain=$(func_set_params "${pretrain_model_key}" "${pretrain_model_value}") + set_batchsize=$(func_set_params "${train_batch_key}" "${train_batch_value}") + set_train_params1=$(func_set_params "${train_param_key1}" "${train_param_value1}") + set_use_gpu=$(func_set_params "${train_use_gpu_key}" "${use_gpu}") + save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}" + + # load pretrain from norm training if current trainer is pact or fpgm trainer + if [ ${trainer} = ${pact_key} ] || [ ${trainer} = ${fpgm_key} ]; then + set_pretrain="${load_norm_train_model}" + fi + + set_save_model=$(func_set_params "${save_model_key}" "${save_log}") + if [ ${#gpu} -le 2 ];then # train with cpu or single gpu + cmd="${python} ${run_train} ${set_use_gpu} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1} " + elif [ ${#gpu} -le 15 ];then # train with multi-gpu + cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${set_save_model} ${set_epoch} ${set_pretrain} ${set_autocast} ${set_batchsize} ${set_train_params1}" + else # train with multi-machine + cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${set_save_model} ${set_pretrain} ${set_epoch} ${set_autocast} ${set_batchsize} ${set_train_params1}" + fi + # run train + eval "unset CUDA_VISIBLE_DEVICES" + eval $cmd + status_check $? "${cmd}" "${status_log}" + + set_eval_pretrain=$(func_set_params "${pretrain_model_key}" "${save_log}/${train_model_name}") + # save norm trained models to set pretrain for pact training and fpgm training + if [ ${trainer} = ${trainer_norm} ]; then + load_norm_train_model=${set_eval_pretrain} + fi + # run eval + if [ ${eval_py} != "null" ]; then + set_eval_params1=$(func_set_params "${eval_key1}" "${eval_value1}") + eval_cmd="${python} ${eval_py} ${set_eval_pretrain} ${set_use_gpu} ${set_eval_params1}" + eval $eval_cmd + status_check $? "${eval_cmd}" "${status_log}" + fi + # run export model + if [ ${run_export} != "null" ]; then + # run export model + save_infer_path="${save_log}" + set_export_weight=$(func_set_params "${export_weight}" "${save_log}/${train_model_name}") + set_save_infer_key=$(func_set_params "${save_infer_key}" "${save_infer_path}") + export_cmd="${python} ${run_export} ${set_export_weight} ${set_save_infer_key}" + eval $export_cmd + status_check $? "${export_cmd}" "${status_log}" + + #run inference + eval $env + save_infer_path="${save_log}" + func_inference "${python}" "${inference_py}" "${save_infer_path}" "${LOG_PATH}" "${train_infer_img_dir}" "${flag_quant}" + eval "unset CUDA_VISIBLE_DEVICES" + fi + done # done with: for trainer in ${trainer_list[*]}; do + done # done with: for autocast in ${autocast_list[*]}; do + done # done with: for gpu in ${gpu_list[*]}; do +fi # end if [ ${MODE} = "infer" ]; then diff --git a/tools/infer/predict_det.py b/tools/infer/predict_det.py index 6a45f81e48371093edc9391bd3b8dd263df25c92..3de00d83a8f9f55af9b89d5d2cd5c877399c5930 100755 --- a/tools/infer/predict_det.py +++ b/tools/infer/predict_det.py @@ -106,7 +106,7 @@ class TextDetector(object): model_precision=args.precision, batch_size=1, data_shape="dynamic", - save_path=args.save_log_path, + save_path=None, inference_config=self.config, pids=pid, process_name=None, @@ -114,7 +114,8 @@ class TextDetector(object): time_keys=[ 'preprocess_time', 'inference_time', 'postprocess_time' ], - warmup=10) + warmup=2, + logger=logger) def order_points_clockwise(self, pts): """ @@ -236,7 +237,7 @@ if __name__ == "__main__": if args.warmup: img = np.random.uniform(0, 255, [640, 640, 3]).astype(np.uint8) - for i in range(10): + for i in range(2): res = text_detector(img) if not os.path.exists(draw_img_save): diff --git a/tools/infer/predict_rec.py b/tools/infer/predict_rec.py index bc9f713aeafb9977c60fe65bea56fbe2b395efd5..bb4a31706471b9b1745519ac9f390d01b60d5d44 100755 --- a/tools/infer/predict_rec.py +++ b/tools/infer/predict_rec.py @@ -73,7 +73,7 @@ class TextRecognizer(object): model_precision=args.precision, batch_size=args.rec_batch_num, data_shape="dynamic", - save_path=args.save_log_path, + save_path=None, #args.save_log_path, inference_config=self.config, pids=pid, process_name=None, @@ -81,7 +81,8 @@ class TextRecognizer(object): time_keys=[ 'preprocess_time', 'inference_time', 'postprocess_time' ], - warmup=10) + warmup=2, + logger=logger) def resize_norm_img(self, img, max_wh_ratio): imgC, imgH, imgW = self.rec_image_shape @@ -272,10 +273,10 @@ def main(args): valid_image_file_list = [] img_list = [] - # warmup 10 times + # warmup 2 times if args.warmup: img = np.random.uniform(0, 255, [32, 320, 3]).astype(np.uint8) - for i in range(10): + for i in range(2): res = text_recognizer([img]) for image_file in image_file_list: diff --git a/tools/infer/utility.py b/tools/infer/utility.py index 28e9818ba604fbf71de356dfce23d8a02ce3d9dd..2e708d204192f62a44e8de04635fc43b2d3e29b6 100755 --- a/tools/infer/utility.py +++ b/tools/infer/utility.py @@ -24,6 +24,7 @@ from paddle import inference import time from ppocr.utils.logging import get_logger + def str2bool(v): return v.lower() in ("true", "t", "1") @@ -47,8 +48,8 @@ def init_args(): # DB parmas parser.add_argument("--det_db_thresh", type=float, default=0.3) - parser.add_argument("--det_db_box_thresh", type=float, default=0.5) - parser.add_argument("--det_db_unclip_ratio", type=float, default=1.6) + parser.add_argument("--det_db_box_thresh", type=float, default=0.6) + parser.add_argument("--det_db_unclip_ratio", type=float, default=1.5) parser.add_argument("--max_batch_size", type=int, default=10) parser.add_argument("--use_dilation", type=bool, default=False) parser.add_argument("--det_db_score_mode", type=str, default="fast") @@ -168,46 +169,67 @@ def create_predictor(args, mode, logger): if mode == "det": min_input_shape = { "x": [1, 3, 50, 50], - "conv2d_92.tmp_0": [1, 96, 20, 20], - "conv2d_91.tmp_0": [1, 96, 10, 10], + "conv2d_92.tmp_0": [1, 120, 20, 20], + "conv2d_91.tmp_0": [1, 24, 10, 10], "conv2d_59.tmp_0": [1, 96, 20, 20], - "nearest_interp_v2_1.tmp_0": [1, 96, 10, 10], - "nearest_interp_v2_2.tmp_0": [1, 96, 20, 20], - "conv2d_124.tmp_0": [1, 96, 20, 20], - "nearest_interp_v2_3.tmp_0": [1, 24, 20, 20], - "nearest_interp_v2_4.tmp_0": [1, 24, 20, 20], - "nearest_interp_v2_5.tmp_0": [1, 24, 20, 20], + "nearest_interp_v2_1.tmp_0": [1, 256, 10, 10], + "nearest_interp_v2_2.tmp_0": [1, 256, 20, 20], + "conv2d_124.tmp_0": [1, 256, 20, 20], + "nearest_interp_v2_3.tmp_0": [1, 64, 20, 20], + "nearest_interp_v2_4.tmp_0": [1, 64, 20, 20], + "nearest_interp_v2_5.tmp_0": [1, 64, 20, 20], "elementwise_add_7": [1, 56, 2, 2], - "nearest_interp_v2_0.tmp_0": [1, 96, 2, 2] + "nearest_interp_v2_0.tmp_0": [1, 256, 2, 2] } max_input_shape = { "x": [1, 3, 2000, 2000], - "conv2d_92.tmp_0": [1, 96, 400, 400], - "conv2d_91.tmp_0": [1, 96, 200, 200], + "conv2d_92.tmp_0": [1, 120, 400, 400], + "conv2d_91.tmp_0": [1, 24, 200, 200], "conv2d_59.tmp_0": [1, 96, 400, 400], - "nearest_interp_v2_1.tmp_0": [1, 96, 200, 200], + "nearest_interp_v2_1.tmp_0": [1, 256, 200, 200], "conv2d_124.tmp_0": [1, 256, 400, 400], - "nearest_interp_v2_2.tmp_0": [1, 96, 400, 400], - "nearest_interp_v2_3.tmp_0": [1, 24, 400, 400], - "nearest_interp_v2_4.tmp_0": [1, 24, 400, 400], - "nearest_interp_v2_5.tmp_0": [1, 24, 400, 400], + "nearest_interp_v2_2.tmp_0": [1, 256, 400, 400], + "nearest_interp_v2_3.tmp_0": [1, 64, 400, 400], + "nearest_interp_v2_4.tmp_0": [1, 64, 400, 400], + "nearest_interp_v2_5.tmp_0": [1, 64, 400, 400], "elementwise_add_7": [1, 56, 400, 400], - "nearest_interp_v2_0.tmp_0": [1, 96, 400, 400] + "nearest_interp_v2_0.tmp_0": [1, 256, 400, 400] } opt_input_shape = { "x": [1, 3, 640, 640], - "conv2d_92.tmp_0": [1, 96, 160, 160], - "conv2d_91.tmp_0": [1, 96, 80, 80], + "conv2d_92.tmp_0": [1, 120, 160, 160], + "conv2d_91.tmp_0": [1, 24, 80, 80], "conv2d_59.tmp_0": [1, 96, 160, 160], - "nearest_interp_v2_1.tmp_0": [1, 96, 80, 80], - "nearest_interp_v2_2.tmp_0": [1, 96, 160, 160], + "nearest_interp_v2_1.tmp_0": [1, 256, 80, 80], + "nearest_interp_v2_2.tmp_0": [1, 256, 160, 160], "conv2d_124.tmp_0": [1, 256, 160, 160], - "nearest_interp_v2_3.tmp_0": [1, 24, 160, 160], - "nearest_interp_v2_4.tmp_0": [1, 24, 160, 160], - "nearest_interp_v2_5.tmp_0": [1, 24, 160, 160], + "nearest_interp_v2_3.tmp_0": [1, 64, 160, 160], + "nearest_interp_v2_4.tmp_0": [1, 64, 160, 160], + "nearest_interp_v2_5.tmp_0": [1, 64, 160, 160], "elementwise_add_7": [1, 56, 40, 40], - "nearest_interp_v2_0.tmp_0": [1, 96, 40, 40] + "nearest_interp_v2_0.tmp_0": [1, 256, 40, 40] + } + min_pact_shape = { + "nearest_interp_v2_26.tmp_0":[1,256,20,20], + "nearest_interp_v2_27.tmp_0":[1,64,20,20], + "nearest_interp_v2_28.tmp_0":[1,64,20,20], + "nearest_interp_v2_29.tmp_0":[1,64,20,20] + } + max_pact_shape = { + "nearest_interp_v2_26.tmp_0":[1,256,400,400], + "nearest_interp_v2_27.tmp_0":[1,64,400,400], + "nearest_interp_v2_28.tmp_0":[1,64,400,400], + "nearest_interp_v2_29.tmp_0":[1,64,400,400] + } + opt_pact_shape = { + "nearest_interp_v2_26.tmp_0":[1,256,160,160], + "nearest_interp_v2_27.tmp_0":[1,64,160,160], + "nearest_interp_v2_28.tmp_0":[1,64,160,160], + "nearest_interp_v2_29.tmp_0":[1,64,160,160] } + min_input_shape.update(min_pact_shape) + max_input_shape.update(max_pact_shape) + opt_input_shape.update(opt_pact_shape) elif mode == "rec": min_input_shape = {"x": [args.rec_batch_num, 3, 32, 10]} max_input_shape = {"x": [args.rec_batch_num, 3, 32, 2000]}