From 09e15a684a3662a41c28de3b7104fa6525d407da Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Mon, 24 Aug 2020 11:30:00 +0800 Subject: [PATCH] =?UTF-8?q?=E4=BF=AE=E6=AD=A3=E6=96=87=E6=A1=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- doc/doc_ch/whl.md | 110 +++++----------------------------------------- doc/doc_en/whl.md | 66 +++++----------------------- 2 files changed, 22 insertions(+), 154 deletions(-) diff --git a/doc/doc_ch/whl.md b/doc/doc_ch/whl.md index 8715276f..1328f8c5 100644 --- a/doc/doc_ch/whl.md +++ b/doc/doc_ch/whl.md @@ -19,7 +19,7 @@ pip install dist/paddleocr-0.0.3-py3-none-any.whl * 检测+识别全流程 ```python from paddleocr import PaddleOCR, draw_ocr -ocr = PaddleOCR(model_storage_directory='./model') # need to run only once to load model into memory +ocr = PaddleOCR() # need to run only once to download and load model into memory img_path = 'PaddleOCR/doc/imgs/11.jpg' result = ocr.ocr(img_path) for line in result: @@ -40,29 +40,7 @@ im_show.save('result.jpg') [[[24.0, 36.0], [304.0, 34.0], [304.0, 72.0], [24.0, 74.0]], ['纯臻营养护发素', 0.964739]] [[[24.0, 80.0], [172.0, 80.0], [172.0, 104.0], [24.0, 104.0]], ['产品信息/参数', 0.98069626]] [[[24.0, 109.0], [333.0, 109.0], [333.0, 136.0], [24.0, 136.0]], ['(45元/每公斤,100公斤起订)', 0.9676722]] -[[[22.0, 140.0], [284.0, 140.0], [284.0, 167.0], [22.0, 167.0]], ['每瓶22元,1000瓶起订)', 0.97444016]] -[[[22.0, 174.0], [85.0, 174.0], [85.0, 198.0], [22.0, 198.0]], ['【品牌】', 0.8187138]] -[[[89.0, 176.0], [301.0, 176.0], [301.0, 196.0], [89.0, 196.0]], [':代加工方式/OEMODM', 0.9421848]] -[[[23.0, 205.0], [85.0, 205.0], [85.0, 229.0], [23.0, 229.0]], ['【品名】', 0.76008326]] -[[[88.0, 204.0], [235.0, 206.0], [235.0, 229.0], [88.0, 227.0]], [':纯臻营养护发素', 0.9633639]] -[[[23.0, 236.0], [121.0, 236.0], [121.0, 261.0], [23.0, 261.0]], ['【产品编号】', 0.84101385]] -[[[110.0, 239.0], [239.0, 239.0], [239.0, 256.0], [110.0, 256.0]], ['1:YM-X-3011', 0.8621878]] -[[[414.0, 233.0], [430.0, 233.0], [430.0, 304.0], [414.0, 304.0]], ['ODM OEM', 0.9084018]] -[[[23.0, 268.0], [183.0, 268.0], [183.0, 292.0], [23.0, 292.0]], ['【净含量】:220ml', 0.9278281]] -[[[24.0, 301.0], [118.0, 301.0], [118.0, 321.0], [24.0, 321.0]], ['【适用人群】', 0.90901047]] -[[[127.0, 300.0], [254.0, 300.0], [254.0, 323.0], [127.0, 323.0]], [':适合所有肤质', 0.95465785]] -[[[24.0, 332.0], [117.0, 332.0], [117.0, 353.0], [24.0, 353.0]], ['【主要成分】', 0.88936955]] -[[[139.0, 332.0], [236.0, 332.0], [236.0, 352.0], [139.0, 352.0]], ['鲸蜡硬脂醇', 0.9447544]] -[[[248.0, 332.0], [345.0, 332.0], [345.0, 352.0], [248.0, 352.0]], ['燕麦B-葡聚', 0.89748293]] -[[[54.0, 363.0], [232.0, 363.0], [232.0, 383.0], [54.0, 383.0]], [' 椰油酰胺丙基甜菜碱', 0.902023]] -[[[25.0, 364.0], [64.0, 364.0], [64.0, 383.0], [25.0, 383.0]], ['糖、', 0.985203]] -[[[244.0, 363.0], [281.0, 363.0], [281.0, 382.0], [244.0, 382.0]], ['泛服', 0.44537082]] -[[[367.0, 367.0], [475.0, 367.0], [475.0, 388.0], [367.0, 388.0]], ['(成品包材)', 0.9834532]] -[[[24.0, 395.0], [120.0, 395.0], [120.0, 416.0], [24.0, 416.0]], ['【主要功能】', 0.88684446]] -[[[128.0, 397.0], [273.0, 397.0], [273.0, 414.0], [128.0, 414.0]], [':可紧致头发磷层', 0.9342501]] -[[[265.0, 395.0], [361.0, 395.0], [361.0, 415.0], [265.0, 415.0]], ['琴,从而达到', 0.8253762]] -[[[25.0, 425.0], [372.0, 425.0], [372.0, 448.0], [25.0, 448.0]], ['即时持久改善头发光泽的效果,给干燥的头', 0.97785276]] -[[[26.0, 457.0], [137.0, 457.0], [137.0, 477.0], [26.0, 477.0]], ['发足够的滋养', 0.9577897]] +...... ``` 结果可视化 @@ -73,7 +51,7 @@ im_show.save('result.jpg') * 单独执行检测 ```python from paddleocr import PaddleOCR, draw_ocr -ocr = PaddleOCR(model_storage_directory='./model') # need to run only once to load model into memory +ocr = PaddleOCR() # need to run only once to download and load model into memory img_path = 'PaddleOCR/doc/imgs/11.jpg' result = ocr.ocr(img_path,rec=False) for line in result: @@ -92,29 +70,7 @@ im_show.save('result.jpg') [[26.0, 457.0], [137.0, 457.0], [137.0, 477.0], [26.0, 477.0]] [[25.0, 425.0], [372.0, 425.0], [372.0, 448.0], [25.0, 448.0]] [[128.0, 397.0], [273.0, 397.0], [273.0, 414.0], [128.0, 414.0]] -[[265.0, 395.0], [361.0, 395.0], [361.0, 415.0], [265.0, 415.0]] -[[24.0, 395.0], [120.0, 395.0], [120.0, 416.0], [24.0, 416.0]] -[[367.0, 367.0], [475.0, 367.0], [475.0, 388.0], [367.0, 388.0]] -[[54.0, 363.0], [232.0, 363.0], [232.0, 383.0], [54.0, 383.0]] -[[25.0, 364.0], [64.0, 364.0], [64.0, 383.0], [25.0, 383.0]] -[[244.0, 363.0], [281.0, 363.0], [281.0, 382.0], [244.0, 382.0]] -[[248.0, 332.0], [345.0, 332.0], [345.0, 352.0], [248.0, 352.0]] -[[139.0, 332.0], [236.0, 332.0], [236.0, 352.0], [139.0, 352.0]] -[[24.0, 332.0], [117.0, 332.0], [117.0, 353.0], [24.0, 353.0]] -[[127.0, 300.0], [254.0, 300.0], [254.0, 323.0], [127.0, 323.0]] -[[24.0, 301.0], [118.0, 301.0], [118.0, 321.0], [24.0, 321.0]] -[[23.0, 268.0], [183.0, 268.0], [183.0, 292.0], [23.0, 292.0]] -[[110.0, 239.0], [239.0, 239.0], [239.0, 256.0], [110.0, 256.0]] -[[23.0, 236.0], [121.0, 236.0], [121.0, 261.0], [23.0, 261.0]] -[[414.0, 233.0], [430.0, 233.0], [430.0, 304.0], [414.0, 304.0]] -[[88.0, 204.0], [235.0, 206.0], [235.0, 229.0], [88.0, 227.0]] -[[23.0, 205.0], [85.0, 205.0], [85.0, 229.0], [23.0, 229.0]] -[[89.0, 176.0], [301.0, 176.0], [301.0, 196.0], [89.0, 196.0]] -[[22.0, 174.0], [85.0, 174.0], [85.0, 198.0], [22.0, 198.0]] -[[22.0, 140.0], [284.0, 140.0], [284.0, 167.0], [22.0, 167.0]] -[[24.0, 109.0], [333.0, 109.0], [333.0, 136.0], [24.0, 136.0]] -[[24.0, 80.0], [172.0, 80.0], [172.0, 104.0], [24.0, 104.0]] -[[24.0, 36.0], [304.0, 34.0], [304.0, 72.0], [24.0, 74.0]] +...... ``` 结果可视化 @@ -126,7 +82,7 @@ im_show.save('result.jpg') * 单独执行识别 ```python from paddleocr import PaddleOCR -ocr = PaddleOCR(model_storage_directory='./model') # need to run only once to load model into memory +ocr = PaddleOCR() # need to run only once to download and load model into memory img_path = 'PaddleOCR/doc/imgs_words/ch/word_1.jpg' result = ocr.ocr(img_path,det=False) for line in result: @@ -153,29 +109,7 @@ paddleocr --image_dir PaddleOCR/doc/imgs/11.jpg [[[24.0, 36.0], [304.0, 34.0], [304.0, 72.0], [24.0, 74.0]], ['纯臻营养护发素', 0.964739]] [[[24.0, 80.0], [172.0, 80.0], [172.0, 104.0], [24.0, 104.0]], ['产品信息/参数', 0.98069626]] [[[24.0, 109.0], [333.0, 109.0], [333.0, 136.0], [24.0, 136.0]], ['(45元/每公斤,100公斤起订)', 0.9676722]] -[[[22.0, 140.0], [284.0, 140.0], [284.0, 167.0], [22.0, 167.0]], ['每瓶22元,1000瓶起订)', 0.97444016]] -[[[22.0, 174.0], [85.0, 174.0], [85.0, 198.0], [22.0, 198.0]], ['【品牌】', 0.8187138]] -[[[89.0, 176.0], [301.0, 176.0], [301.0, 196.0], [89.0, 196.0]], [':代加工方式/OEMODM', 0.9421848]] -[[[23.0, 205.0], [85.0, 205.0], [85.0, 229.0], [23.0, 229.0]], ['【品名】', 0.76008326]] -[[[88.0, 204.0], [235.0, 206.0], [235.0, 229.0], [88.0, 227.0]], [':纯臻营养护发素', 0.9633639]] -[[[23.0, 236.0], [121.0, 236.0], [121.0, 261.0], [23.0, 261.0]], ['【产品编号】', 0.84101385]] -[[[110.0, 239.0], [239.0, 239.0], [239.0, 256.0], [110.0, 256.0]], ['1:YM-X-3011', 0.8621878]] -[[[414.0, 233.0], [430.0, 233.0], [430.0, 304.0], [414.0, 304.0]], ['ODM OEM', 0.9084018]] -[[[23.0, 268.0], [183.0, 268.0], [183.0, 292.0], [23.0, 292.0]], ['【净含量】:220ml', 0.9278281]] -[[[24.0, 301.0], [118.0, 301.0], [118.0, 321.0], [24.0, 321.0]], ['【适用人群】', 0.90901047]] -[[[127.0, 300.0], [254.0, 300.0], [254.0, 323.0], [127.0, 323.0]], [':适合所有肤质', 0.95465785]] -[[[24.0, 332.0], [117.0, 332.0], [117.0, 353.0], [24.0, 353.0]], ['【主要成分】', 0.88936955]] -[[[139.0, 332.0], [236.0, 332.0], [236.0, 352.0], [139.0, 352.0]], ['鲸蜡硬脂醇', 0.9447544]] -[[[248.0, 332.0], [345.0, 332.0], [345.0, 352.0], [248.0, 352.0]], ['燕麦B-葡聚', 0.89748293]] -[[[54.0, 363.0], [232.0, 363.0], [232.0, 383.0], [54.0, 383.0]], [' 椰油酰胺丙基甜菜碱', 0.902023]] -[[[25.0, 364.0], [64.0, 364.0], [64.0, 383.0], [25.0, 383.0]], ['糖、', 0.985203]] -[[[244.0, 363.0], [281.0, 363.0], [281.0, 382.0], [244.0, 382.0]], ['泛服', 0.44537082]] -[[[367.0, 367.0], [475.0, 367.0], [475.0, 388.0], [367.0, 388.0]], ['(成品包材)', 0.9834532]] -[[[24.0, 395.0], [120.0, 395.0], [120.0, 416.0], [24.0, 416.0]], ['【主要功能】', 0.88684446]] -[[[128.0, 397.0], [273.0, 397.0], [273.0, 414.0], [128.0, 414.0]], [':可紧致头发磷层', 0.9342501]] -[[[265.0, 395.0], [361.0, 395.0], [361.0, 415.0], [265.0, 415.0]], ['琴,从而达到', 0.8253762]] -[[[25.0, 425.0], [372.0, 425.0], [372.0, 448.0], [25.0, 448.0]], ['即时持久改善头发光泽的效果,给干燥的头', 0.97785276]] -[[[26.0, 457.0], [137.0, 457.0], [137.0, 477.0], [26.0, 477.0]], ['发足够的滋养', 0.9577897]] +...... ``` * 单独执行检测 @@ -187,29 +121,7 @@ paddleocr --image_dir PaddleOCR/doc/imgs/11.jpg --rec false [[26.0, 457.0], [137.0, 457.0], [137.0, 477.0], [26.0, 477.0]] [[25.0, 425.0], [372.0, 425.0], [372.0, 448.0], [25.0, 448.0]] [[128.0, 397.0], [273.0, 397.0], [273.0, 414.0], [128.0, 414.0]] -[[265.0, 395.0], [361.0, 395.0], [361.0, 415.0], [265.0, 415.0]] -[[24.0, 395.0], [120.0, 395.0], [120.0, 416.0], [24.0, 416.0]] -[[367.0, 367.0], [475.0, 367.0], [475.0, 388.0], [367.0, 388.0]] -[[54.0, 363.0], [232.0, 363.0], [232.0, 383.0], [54.0, 383.0]] -[[25.0, 364.0], [64.0, 364.0], [64.0, 383.0], [25.0, 383.0]] -[[244.0, 363.0], [281.0, 363.0], [281.0, 382.0], [244.0, 382.0]] -[[248.0, 332.0], [345.0, 332.0], [345.0, 352.0], [248.0, 352.0]] -[[139.0, 332.0], [236.0, 332.0], [236.0, 352.0], [139.0, 352.0]] -[[24.0, 332.0], [117.0, 332.0], [117.0, 353.0], [24.0, 353.0]] -[[127.0, 300.0], [254.0, 300.0], [254.0, 323.0], [127.0, 323.0]] -[[24.0, 301.0], [118.0, 301.0], [118.0, 321.0], [24.0, 321.0]] -[[23.0, 268.0], [183.0, 268.0], [183.0, 292.0], [23.0, 292.0]] -[[110.0, 239.0], [239.0, 239.0], [239.0, 256.0], [110.0, 256.0]] -[[23.0, 236.0], [121.0, 236.0], [121.0, 261.0], [23.0, 261.0]] -[[414.0, 233.0], [430.0, 233.0], [430.0, 304.0], [414.0, 304.0]] -[[88.0, 204.0], [235.0, 206.0], [235.0, 229.0], [88.0, 227.0]] -[[23.0, 205.0], [85.0, 205.0], [85.0, 229.0], [23.0, 229.0]] -[[89.0, 176.0], [301.0, 176.0], [301.0, 196.0], [89.0, 196.0]] -[[22.0, 174.0], [85.0, 174.0], [85.0, 198.0], [22.0, 198.0]] -[[22.0, 140.0], [284.0, 140.0], [284.0, 167.0], [22.0, 167.0]] -[[24.0, 109.0], [333.0, 109.0], [333.0, 136.0], [24.0, 136.0]] -[[24.0, 80.0], [172.0, 80.0], [172.0, 104.0], [24.0, 104.0]] -[[24.0, 36.0], [304.0, 34.0], [304.0, 72.0], [24.0, 74.0]] +...... ``` * 单独执行识别 @@ -230,7 +142,7 @@ paddleocr --image_dir PaddleOCR/doc/imgs_words/ch/word_1.jpg --det false | gpu_mem | 初始化占用的GPU内存大小 | 8000M | | image_dir | 通过命令行调用时执行预测的图片或文件夹路径 | | | det_algorithm | 使用的检测算法类型 | DB | -| det_model_name | 有两种使用方式: 1. 检测算法名称,此名称必须在支持列表内(目前只内置了ch_det_mv3_db),传入错误参数时会显示支持的列表 2. 自己转换好的inference模型路径,此时模型路径下必须包含model和params文件。选择此方式时,需要手动指定det_algorithm的值 | ch_det_mv3_db | +| det_model_dir | 检测模型所在文件夹。传参方式有两种,1. None: 自动下载内置模型到 `~/.paddleocr/det`;2.自己转换好的inference模型路径,模型路径下必须包含model和params文件 | None | | det_max_side_len | 检测算法前向时图片长边的最大尺寸,当长边超出这个值时会将长边resize到这个大小,短边等比例缩放 | 960 | | det_db_thresh | DB模型输出预测图的二值化阈值 | 0.3 | | det_db_box_thresh | DB模型输出框的阈值,低于此值的预测框会被丢弃 | 0.5 | @@ -239,13 +151,13 @@ paddleocr --image_dir PaddleOCR/doc/imgs_words/ch/word_1.jpg --det false | det_east_cover_thresh | EAST模型输出框的阈值,低于此值的预测框会被丢弃 | 0.1 | | det_east_nms_thresh | EAST模型输出框NMS的阈值 | 0.2 | | rec_algorithm | 使用的识别算法类型 | CRNN | -| rec_model_name | 有两种使用方式: 1. 识别算法名称,此名称必须在支持列表内(目前支持CRNN,Rosetta,STAR,RARE等算法,但是内置的只有ch_rec_mv3_crnn_enhance),传入错误参数时会显示支持的列表 2. 自己转换好的inference模型路径,此时模型路径下必须包含model和params文件。选择此方式时,需要手动指定rec_algorithm的值 | ch_rec_mv3_crnn_enhance | +| rec_model_dir | 识别模型所在文件夹。传承那方式有两种,1. None: 自动下载内置模型到 `~/.paddleocr/rec`;2.自己转换好的inference模型路径,模型路径下必须包含model和params文件 | None | | rec_image_shape | 识别算法的输入图片尺寸 | "3,32,320" | | rec_char_type | 识别算法的字符类型,中文(ch)或英文(en) | ch | | rec_batch_num | 进行识别时,同时前向的图片数 | 30 | -| rec_char_dict_path | 识别模型字典路径,当rec_model_name使用方式2传参时需要修改为自己的路径 | | +| max_text_length | 识别算法能识别的最大文字长度 | 25 | +| rec_char_dict_path | 识别模型字典路径,当rec_model_dir使用方式2传参时需要修改为自己的字典路径 | ./ppocr/utils/ppocr_keys_v1.txt | | use_space_char | 是否识别空格 | TRUE | | enable_mkldnn | 是否启用mkldnn | FALSE | -| model_storage_directory | 下载模型保存路径 | ~/.paddleocr | | det | 前向时使用启动检测 | TRUE | | rec | 前向时是否启动识别 | TRUE | diff --git a/doc/doc_en/whl.md b/doc/doc_en/whl.md index e073e8e3..2edf2037 100644 --- a/doc/doc_en/whl.md +++ b/doc/doc_en/whl.md @@ -17,7 +17,7 @@ pip install dist/paddleocr-0.0.3-py3-none-any.whl * detection and recognition ```python from paddleocr import PaddleOCR,draw_ocr -ocr = PaddleOCR(model_storage_directory='./model') # need to run only once to load model into memory +ocr = PaddleOCR() # need to run only once to download and load model into memory img_path = 'PaddleOCR/doc/imgs_en/img_12.jpg' result = ocr.ocr(img_path) for line in result: @@ -39,18 +39,7 @@ Output will be a list, each item contains bounding box, text and recognition con [[[442.0, 173.0], [1169.0, 173.0], [1169.0, 225.0], [442.0, 225.0]], ['ACKNOWLEDGEMENTS', 0.99283075]] [[[393.0, 340.0], [1207.0, 342.0], [1207.0, 389.0], [393.0, 387.0]], ['We would like to thank all the designers and', 0.9357758]] [[[399.0, 398.0], [1204.0, 398.0], [1204.0, 433.0], [399.0, 433.0]], ['contributors whohave been involved in the', 0.9592447]] -[[[395.0, 443.0], [1211.0, 443.0], [1211.0, 489.0], [395.0, 489.0]], ['production of this book;their contributions', 0.9713175]] -[[[395.0, 497.0], [1209.0, 495.0], [1209.0, 531.0], [395.0, 533.0]], ['have been indispensable to its creation.We', 0.96009934]] -[[[393.0, 545.0], [1212.0, 545.0], [1212.0, 591.0], [393.0, 591.0]], ['would also like to express our gratitude to al', 0.9371007]] -[[[393.0, 595.0], [1212.0, 593.0], [1212.0, 635.0], [393.0, 637.0]], ['the producers for their invaluable opinions', 0.96872145]] -[[[393.0, 645.0], [1209.0, 645.0], [1209.0, 685.0], [393.0, 685.0]], ['and assistance throughout this proiect.Andto', 0.94448787]] -[[[392.0, 697.0], [1212.0, 693.0], [1212.0, 735.0], [392.0, 739.0]], ['the many others whose names are not credited', 0.93633145]] -[[[397.0, 753.0], [689.0, 755.0], [689.0, 786.0], [397.0, 784.0]], ['buthavemades', 0.99324507]] -[[[813.0, 749.0], [1212.0, 747.0], [1212.0, 784.0], [813.0, 786.0]], ['inputin this book, we', 0.9166398]] -[[[675.0, 760.0], [799.0, 755.0], [799.0, 778.0], [675.0, 784.0]], ['speciti', 0.9063535]] -[[[393.0, 801.0], [715.0, 805.0], [715.0, 839.0], [393.0, 836.0]], ['thankyouforyoul', 0.92475533]] -[[[756.0, 812.0], [805.0, 812.0], [805.0, 830.0], [756.0, 830.0]], ['P', 0.14887337]] -[[[820.0, 803.0], [1085.0, 801.0], [1085.0, 836.0], [820.0, 838.0]], ['nuoussupport', 0.9898951]] +...... ``` Visualization of results @@ -62,7 +51,7 @@ Visualization of results * only detection ```python from paddleocr import PaddleOCR,draw_ocr -ocr = PaddleOCR(model_storage_directory='./model') # need to run only once to load model into memory +ocr = PaddleOCR() # need to run only once to download and load model into memory img_path = 'PaddleOCR/doc/imgs_en/img_12.jpg' result = ocr.ocr(img_path,rec=False) for line in result: @@ -82,18 +71,7 @@ Output will be a list, each item only contains bounding box [[756.0, 812.0], [805.0, 812.0], [805.0, 830.0], [756.0, 830.0]] [[820.0, 803.0], [1085.0, 801.0], [1085.0, 836.0], [820.0, 838.0]] [[393.0, 801.0], [715.0, 805.0], [715.0, 839.0], [393.0, 836.0]] -[[675.0, 760.0], [799.0, 755.0], [799.0, 778.0], [675.0, 784.0]] -[[397.0, 753.0], [689.0, 755.0], [689.0, 786.0], [397.0, 784.0]] -[[813.0, 749.0], [1212.0, 747.0], [1212.0, 784.0], [813.0, 786.0]] -[[392.0, 697.0], [1212.0, 693.0], [1212.0, 735.0], [392.0, 739.0]] -[[393.0, 645.0], [1209.0, 645.0], [1209.0, 685.0], [393.0, 685.0]] -[[393.0, 595.0], [1212.0, 593.0], [1212.0, 635.0], [393.0, 637.0]] -[[393.0, 545.0], [1212.0, 545.0], [1212.0, 591.0], [393.0, 591.0]] -[[395.0, 497.0], [1209.0, 495.0], [1209.0, 531.0], [395.0, 533.0]] -[[395.0, 443.0], [1211.0, 443.0], [1211.0, 489.0], [395.0, 489.0]] -[[399.0, 398.0], [1204.0, 398.0], [1204.0, 433.0], [399.0, 433.0]] -[[393.0, 340.0], [1207.0, 342.0], [1207.0, 389.0], [393.0, 387.0]] -[[442.0, 173.0], [1169.0, 173.0], [1169.0, 225.0], [442.0, 225.0]] +...... ``` Visualization of results @@ -105,7 +83,7 @@ Visualization of results * only recognition ```python from paddleocr import PaddleOCR -ocr = PaddleOCR(model_storage_directory='./model') # need to run only once to load model into memory +ocr = PaddleOCR() # need to run only once to load model into memory img_path = 'PaddleOCR/doc/imgs_words_en/word_10.png' result = ocr.ocr(img_path,det=False) for line in result: @@ -134,18 +112,7 @@ Output will be a list, each item contains bounding box, text and recognition con [[[442.0, 173.0], [1169.0, 173.0], [1169.0, 225.0], [442.0, 225.0]], ['ACKNOWLEDGEMENTS', 0.99283075]] [[[393.0, 340.0], [1207.0, 342.0], [1207.0, 389.0], [393.0, 387.0]], ['We would like to thank all the designers and', 0.9357758]] [[[399.0, 398.0], [1204.0, 398.0], [1204.0, 433.0], [399.0, 433.0]], ['contributors whohave been involved in the', 0.9592447]] -[[[395.0, 443.0], [1211.0, 443.0], [1211.0, 489.0], [395.0, 489.0]], ['production of this book;their contributions', 0.9713175]] -[[[395.0, 497.0], [1209.0, 495.0], [1209.0, 531.0], [395.0, 533.0]], ['have been indispensable to its creation.We', 0.96009934]] -[[[393.0, 545.0], [1212.0, 545.0], [1212.0, 591.0], [393.0, 591.0]], ['would also like to express our gratitude to al', 0.9371007]] -[[[393.0, 595.0], [1212.0, 593.0], [1212.0, 635.0], [393.0, 637.0]], ['the producers for their invaluable opinions', 0.96872145]] -[[[393.0, 645.0], [1209.0, 645.0], [1209.0, 685.0], [393.0, 685.0]], ['and assistance throughout this proiect.Andto', 0.94448787]] -[[[392.0, 697.0], [1212.0, 693.0], [1212.0, 735.0], [392.0, 739.0]], ['the many others whose names are not credited', 0.93633145]] -[[[397.0, 753.0], [689.0, 755.0], [689.0, 786.0], [397.0, 784.0]], ['buthavemades', 0.99324507]] -[[[813.0, 749.0], [1212.0, 747.0], [1212.0, 784.0], [813.0, 786.0]], ['inputin this book, we', 0.9166398]] -[[[675.0, 760.0], [799.0, 755.0], [799.0, 778.0], [675.0, 784.0]], ['speciti', 0.9063535]] -[[[393.0, 801.0], [715.0, 805.0], [715.0, 839.0], [393.0, 836.0]], ['thankyouforyoul', 0.92475533]] -[[[756.0, 812.0], [805.0, 812.0], [805.0, 830.0], [756.0, 830.0]], ['P', 0.14887337]] -[[[820.0, 803.0], [1085.0, 801.0], [1085.0, 836.0], [820.0, 838.0]], ['nuoussupport', 0.9898951]] +...... ``` * only detection @@ -158,18 +125,7 @@ Output will be a list, each item only contains bounding box [[756.0, 812.0], [805.0, 812.0], [805.0, 830.0], [756.0, 830.0]] [[820.0, 803.0], [1085.0, 801.0], [1085.0, 836.0], [820.0, 838.0]] [[393.0, 801.0], [715.0, 805.0], [715.0, 839.0], [393.0, 836.0]] -[[675.0, 760.0], [799.0, 755.0], [799.0, 778.0], [675.0, 784.0]] -[[397.0, 753.0], [689.0, 755.0], [689.0, 786.0], [397.0, 784.0]] -[[813.0, 749.0], [1212.0, 747.0], [1212.0, 784.0], [813.0, 786.0]] -[[392.0, 697.0], [1212.0, 693.0], [1212.0, 735.0], [392.0, 739.0]] -[[393.0, 645.0], [1209.0, 645.0], [1209.0, 685.0], [393.0, 685.0]] -[[393.0, 595.0], [1212.0, 593.0], [1212.0, 635.0], [393.0, 637.0]] -[[393.0, 545.0], [1212.0, 545.0], [1212.0, 591.0], [393.0, 591.0]] -[[395.0, 497.0], [1209.0, 495.0], [1209.0, 531.0], [395.0, 533.0]] -[[395.0, 443.0], [1211.0, 443.0], [1211.0, 489.0], [395.0, 489.0]] -[[399.0, 398.0], [1204.0, 398.0], [1204.0, 433.0], [399.0, 433.0]] -[[393.0, 340.0], [1207.0, 342.0], [1207.0, 389.0], [393.0, 387.0]] -[[442.0, 173.0], [1169.0, 173.0], [1169.0, 225.0], [442.0, 225.0]] +...... ``` * only recognition @@ -190,7 +146,7 @@ Output will be a list, each item contains text and recognition confidence | gpu_mem | GPU memory size used for initialization | 8000M | | image_dir | The images path or folder path for predicting when used by the command line | | | det_algorithm | Type of detection algorithm selected | DB | -| det_model_name | There are two ways to use: 1. The name of the detection algorithm which must be in the support list(only ch_det_mv3_db is built in currently), and the supported list will be displayed when the wrong parameter is passed in. 2. The path of the inference model that has been converted by yourself. At this time, the model path must contains model and params files. When choosing this method, you need to give the name of det_algorithm | ch_det_mv3_db | +| det_model_dir | the text detection inference model folder. There are two ways to transfer parameters, 1. None: Automatically download the built-in model to `~/.paddleocr/det`; 2. The path of the inference model converted by yourself, the model and params files must be included in the model path | None | | det_max_side_len | The maximum size of the long side of the image. When the long side exceeds this value, the long side will be resized to this size, and the short side will be scaled proportionally | 960 | | det_db_thresh | Binarization threshold value of DB output map | 0.3 | | det_db_box_thresh | The threshold value of the DB output box. Boxes score lower than this value will be discarded | 0.5 | @@ -199,13 +155,13 @@ Output will be a list, each item contains text and recognition confidence | det_east_cover_thresh | The threshold value of the EAST output box. Boxes score lower than this value will be discarded | 0.1 | | det_east_nms_thresh | The NMS threshold value of EAST model output box | 0.2 | | rec_algorithm | Type of recognition algorithm selected | CRNN | -| rec_model_name | There are two ways to use: 1. The name of the recognition algorithm which must be in the support list(only supports CRNN, Rosetta, STAR, RARE and other algorithms currently, but only ch_rec_mv3_crnn_enhance is built-in), and the supported list will be displayed when the wrong parameter is passed in. 2. The path of the inference model that has been converted by yourself. At this time, the model path must contains model and params files. When choosing this method, you need to give the name of rec_algorithm | ch_rec_mv3_crnn_enhance | +| rec_model_dir | the text recognition inference model folder. There are two ways to transfer parameters, 1. None: Automatically download the built-in model to `~/.paddleocr/rec`; 2. The path of the inference model converted by yourself, the model and params files must be included in the model path | None | | rec_image_shape | image shape of recognition algorithm | "3,32,320" | | rec_char_type | Character type of recognition algorithm, Chinese (ch) or English (en) | ch | | rec_batch_num | When performing recognition, the batchsize of forward images | 30 | -| rec_char_dict_path | the alphabet path which needs to be modified to your own path when `rec_model_Name` use mode 2 | | +| max_text_length | The maximum text length that the recognition algorithm can recognize | 25 | +| rec_char_dict_path | the alphabet path which needs to be modified to your own path when `rec_model_Name` use mode 2 | ./ppocr/utils/ppocr_keys_v1.txt | | use_space_char | Whether to recognize spaces | TRUE | | enable_mkldnn | Whether to enable mkldnn | FALSE | -| model_storage_directory | Download model save path when det_model_name or rec_model_name use mode 1 | ~/.paddleocr | | det | Enable detction when `ppocr.ocr` func exec | TRUE | | rec | Enable detction when `ppocr.ocr` func exec | TRUE | -- GitLab