From 8346621bcf73a659f9aa4b6dadcc3796758f1629 Mon Sep 17 00:00:00 2001 From: WenmuZhou Date: Mon, 24 Aug 2020 16:25:36 +0800 Subject: [PATCH] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E4=BD=BF=E7=94=A8=E8=87=AA?= =?UTF-8?q?=E5=AE=9A=E4=B9=89=E6=A8=A1=E5=9E=8B=E6=96=87=E6=A1=A3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- doc/doc_ch/whl.md | 31 +++++++++++++++++++++++++++++++ doc/doc_en/{whl.md => whl_en.md} | 32 ++++++++++++++++++++++++++++++++ setup.py | 2 +- 3 files changed, 64 insertions(+), 1 deletion(-) rename doc/doc_en/{whl.md => whl_en.md} (91%) diff --git a/doc/doc_ch/whl.md b/doc/doc_ch/whl.md index 1328f8c5..9df1791d 100644 --- a/doc/doc_ch/whl.md +++ b/doc/doc_ch/whl.md @@ -134,6 +134,37 @@ paddleocr --image_dir PaddleOCR/doc/imgs_words/ch/word_1.jpg --det false ['韩国小馆', 0.9907421] ``` +## 自定义模型 +当内置模型无法满足需求时,需要使用到自己训练的模型。 +首先,参照[inference.md](./inference.md) 第一节转换将检测和识别模型转换为inference模型,然后按照如下方式使用 + +### 代码使用 +```python +from paddleocr import PaddleOCR, draw_ocr +# 检测模型和识别模型路径下必须含有model和params文件 +ocr = PaddleOCR(det_model_dir='your_det_model_dir',rec_model_dir='your_rec_model_dir') +img_path = 'PaddleOCR/doc/imgs/11.jpg' +result = ocr.ocr(img_path) +for line in result: + print(line) + +# 显示结果 +from PIL import Image +image = Image.open(img_path).convert('RGB') +boxes = [line[0] for line in result] +txts = [line[1][0] for line in result] +scores = [line[1][1] for line in result] +im_show = draw_ocr(image, boxes, txts, scores, font_path='/path/to/PaddleOCR/doc/simfang.ttf') +im_show = Image.fromarray(im_show) +im_show.save('result.jpg') +``` + +### 通过命令行使用 + +```bash +paddleocr --image_dir PaddleOCR/doc/imgs/11.jpg --det_model_dir your_det_model_dir --rec_model_dir your_rec_model_dir +``` + ## 参数说明 | 字段 | 说明 | 默认值 | diff --git a/doc/doc_en/whl.md b/doc/doc_en/whl_en.md similarity index 91% rename from doc/doc_en/whl.md rename to doc/doc_en/whl_en.md index 2edf2037..3e12a9b5 100644 --- a/doc/doc_en/whl.md +++ b/doc/doc_en/whl_en.md @@ -138,6 +138,38 @@ Output will be a list, each item contains text and recognition confidence ['PAIN', 0.990372] ``` +## Use custom model +When the built-in model cannot meet the needs, you need to use your own trained model. +First, refer to the first section of [inference_en.md](./inference_en.md) to convert your det and rec model to inference model, and then use it as follows + +### 1. Use by code + +```python +from paddleocr import PaddleOCR,draw_ocr +# The path of detection and recognition model must contain model and params files +ocr = PaddleOCR(det_model_dir='your_det_model_dir',rec_model_dir='your_rec_model_dir') +img_path = 'PaddleOCR/doc/imgs_en/img_12.jpg' +result = ocr.ocr(img_path) +for line in result: + print(line) + +# draw result +from PIL import Image +image = Image.open(img_path).convert('RGB') +boxes = [line[0] for line in result] +txts = [line[1][0] for line in result] +scores = [line[1][1] for line in result] +im_show = draw_ocr(image, boxes, txts, scores, font_path='/path/to/PaddleOCR/doc/simfang.ttf') +im_show = Image.fromarray(im_show) +im_show.save('result.jpg') +``` + +### Use by command line + +```bash +paddleocr --image_dir PaddleOCR/doc/imgs/11.jpg --det_model_dir your_det_model_dir --rec_model_dir your_rec_model_dir +``` + ## Parameter Description | Parameter | Description | Default value | diff --git a/setup.py b/setup.py index 28409139..7141f170 100644 --- a/setup.py +++ b/setup.py @@ -21,7 +21,7 @@ with open('requirments.txt', encoding="utf-8-sig") as f: def readme(): - with open('doc/doc_en/whl.md', encoding="utf-8-sig") as f: + with open('doc/doc_en/whl_en.md', encoding="utf-8-sig") as f: README = f.read() return README -- GitLab