diff --git a/deploy/pdserving/README.md b/deploy/pdserving/README.md
index 88426ba9c508a4020af0a6203010d683cb73eba9..046aa5c74673e564592cc312c737cca04ad25dab 100644
--- a/deploy/pdserving/README.md
+++ b/deploy/pdserving/README.md
@@ -30,38 +30,32 @@ The introduction and tutorial of Paddle Serving service deployment framework ref
PaddleOCR operating environment and Paddle Serving operating environment are needed.
1. Please prepare PaddleOCR operating environment reference [link](../../doc/doc_ch/installation.md).
+ Download the corresponding paddle whl package according to the environment, it is recommended to install version 2.0.1.
+
2. The steps of PaddleServing operating environment prepare are as follows:
Install serving which used to start the service
```
- pip3 install paddle-serving-server==0.5.0 # for CPU
- pip3 install paddle-serving-server-gpu==0.5.0 # for GPU
+ pip3 install paddle-serving-server==0.6.1 # for CPU
+ pip3 install paddle-serving-server-gpu==0.6.1 # for GPU
# Other GPU environments need to confirm the environment and then choose to execute the following commands
- pip3 install paddle-serving-server-gpu==0.5.0.post9 # GPU with CUDA9.0
- pip3 install paddle-serving-server-gpu==0.5.0.post10 # GPU with CUDA10.0
- pip3 install paddle-serving-server-gpu==0.5.0.post101 # GPU with CUDA10.1 + TensorRT6
- pip3 install paddle-serving-server-gpu==0.5.0.post11 # GPU with CUDA10.1 + TensorRT7
+ pip3 install paddle-serving-server-gpu==0.6.1.post101 # GPU with CUDA10.1 + TensorRT6
+ pip3 install paddle-serving-server-gpu==0.6.1.post11 # GPU with CUDA11 + TensorRT7
```
3. Install the client to send requests to the service
- ```
- pip3 install paddle-serving-client==0.5.0 # for CPU
+ In [download link](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md) find the client installation package corresponding to the python version.
+ The python3.7 version is recommended here:
- pip3 install paddle-serving-client-gpu==0.5.0 # for GPU
+ ```
+ wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_client-0.0.0-cp37-none-any.whl
+ pip3 install paddle_serving_client-0.0.0-cp37-none-any.whl
```
4. Install serving-app
```
- pip3 install paddle-serving-app==0.3.0
- # fix local_predict to support load dynamic model
- # find the install directoory of paddle_serving_app
- vim /usr/local/lib/python3.7/site-packages/paddle_serving_app/local_predict.py
- # replace line 85 of local_predict.py config = AnalysisConfig(model_path) with:
- if os.path.exists(os.path.join(model_path, "__params__")):
- config = AnalysisConfig(os.path.join(model_path, "__model__"), os.path.join(model_path, "__params__"))
- else:
- config = AnalysisConfig(model_path)
+ pip3 install paddle-serving-app==0.6.1
```
**note:** If you want to install the latest version of PaddleServing, refer to [link](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md).
@@ -74,38 +68,38 @@ When using PaddleServing for service deployment, you need to convert the saved i
Firstly, download the [inference model](https://github.com/PaddlePaddle/PaddleOCR#pp-ocr-20-series-model-listupdate-on-dec-15) of PPOCR
```
# Download and unzip the OCR text detection model
-wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar && tar xf ch_ppocr_server_v2.0_det_infer.tar
+wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_det_infer.tar
# Download and unzip the OCR text recognition model
-wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar && tar xf ch_ppocr_server_v2.0_rec_infer.tar
+wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar
```
-Then, you can use installed paddle_serving_client tool to convert inference model to server model.
+Then, you can use installed paddle_serving_client tool to convert inference model to mobile model.
```
# Detection model conversion
-python3 -m paddle_serving_client.convert --dirname ./ch_ppocr_server_v2.0_det_infer/ \
+python3 -m paddle_serving_client.convert --dirname ./ch_ppocr_mobile_v2.0_det_infer/ \
--model_filename inference.pdmodel \
--params_filename inference.pdiparams \
- --serving_server ./ppocr_det_server_2.0_serving/ \
- --serving_client ./ppocr_det_server_2.0_client/
+ --serving_server ./ppocr_det_mobile_2.0_serving/ \
+ --serving_client ./ppocr_det_mobile_2.0_client/
# Recognition model conversion
-python3 -m paddle_serving_client.convert --dirname ./ch_ppocr_server_v2.0_rec_infer/ \
+python3 -m paddle_serving_client.convert --dirname ./ch_ppocr_mobile_v2.0_rec_infer/ \
--model_filename inference.pdmodel \
--params_filename inference.pdiparams \
- --serving_server ./ppocr_rec_server_2.0_serving/ \
- --serving_client ./ppocr_rec_server_2.0_client/
+ --serving_server ./ppocr_rec_mobile_2.0_serving/ \
+ --serving_client ./ppocr_rec_mobile_2.0_client/
```
-After the detection model is converted, there will be additional folders of `ppocr_det_server_2.0_serving` and `ppocr_det_server_2.0_client` in the current folder, with the following format:
+After the detection model is converted, there will be additional folders of `ppocr_det_mobile_2.0_serving` and `ppocr_det_mobile_2.0_client` in the current folder, with the following format:
```
-|- ppocr_det_server_2.0_serving/
+|- ppocr_det_mobile_2.0_serving/
|- __model__
|- __params__
|- serving_server_conf.prototxt
|- serving_server_conf.stream.prototxt
-|- ppocr_det_server_2.0_client
+|- ppocr_det_mobile_2.0_client
|- serving_client_conf.prototxt
|- serving_client_conf.stream.prototxt
@@ -147,6 +141,80 @@ The recognition model is the same.
After successfully running, the predicted result of the model will be printed in the cmd window. An example of the result is:
![](./imgs/results.png)
+ Adjust the number of concurrency in config.yml to get the largest QPS. Generally, the number of concurrent detection and recognition is 2:1
+
+ ```
+ det:
+ concurrency: 8
+ ...
+ rec:
+ concurrency: 4
+ ...
+ ```
+
+ Multiple service requests can be sent at the same time if necessary.
+
+ The predicted performance data will be automatically written into the `PipelineServingLogs/pipeline.tracer` file.
+
+ Tested on 200 real pictures, and limited the detection long side to 960. The average QPS on T4 GPU can reach around 23:
+
+ ```
+
+ 2021-05-13 03:42:36,895 ==================== TRACER ======================
+ 2021-05-13 03:42:36,975 Op(rec):
+ 2021-05-13 03:42:36,976 in[14.472382882882883 ms]
+ 2021-05-13 03:42:36,976 prep[9.556855855855856 ms]
+ 2021-05-13 03:42:36,976 midp[59.921905405405404 ms]
+ 2021-05-13 03:42:36,976 postp[15.345945945945946 ms]
+ 2021-05-13 03:42:36,976 out[1.9921216216216215 ms]
+ 2021-05-13 03:42:36,976 idle[0.16254943864471572]
+ 2021-05-13 03:42:36,976 Op(det):
+ 2021-05-13 03:42:36,976 in[315.4468035714286 ms]
+ 2021-05-13 03:42:36,976 prep[69.5980625 ms]
+ 2021-05-13 03:42:36,976 midp[18.989535714285715 ms]
+ 2021-05-13 03:42:36,976 postp[18.857803571428573 ms]
+ 2021-05-13 03:42:36,977 out[3.1337544642857145 ms]
+ 2021-05-13 03:42:36,977 idle[0.7477961159203756]
+ 2021-05-13 03:42:36,977 DAGExecutor:
+ 2021-05-13 03:42:36,977 Query count[224]
+ 2021-05-13 03:42:36,977 QPS[22.4 q/s]
+ 2021-05-13 03:42:36,977 Succ[0.9910714285714286]
+ 2021-05-13 03:42:36,977 Error req[169, 170]
+ 2021-05-13 03:42:36,977 Latency:
+ 2021-05-13 03:42:36,977 ave[535.1678348214285 ms]
+ 2021-05-13 03:42:36,977 .50[172.651 ms]
+ 2021-05-13 03:42:36,977 .60[187.904 ms]
+ 2021-05-13 03:42:36,977 .70[245.675 ms]
+ 2021-05-13 03:42:36,977 .80[526.684 ms]
+ 2021-05-13 03:42:36,977 .90[854.596 ms]
+ 2021-05-13 03:42:36,977 .95[1722.728 ms]
+ 2021-05-13 03:42:36,977 .99[3990.292 ms]
+ 2021-05-13 03:42:36,978 Channel (server worker num[10]):
+ 2021-05-13 03:42:36,978 chl0(In: ['@DAGExecutor'], Out: ['det']) size[0/0]
+ 2021-05-13 03:42:36,979 chl1(In: ['det'], Out: ['rec']) size[6/0]
+ 2021-05-13 03:42:36,979 chl2(In: ['rec'], Out: ['@DAGExecutor']) size[0/0]
+ ```
+
+## WINDOWS Users
+
+Windows does not support Pipeline Serving, if we want to lauch paddle serving on Windows, we should use Web Service, for more infomation please refer to [Paddle Serving for Windows Users](https://github.com/PaddlePaddle/Serving/blob/develop/doc/WINDOWS_TUTORIAL.md)
+
+
+1. Start Server
+
+```
+cd win
+python3 ocr_web_server.py gpu(for gpu user)
+or
+python3 ocr_web_server.py cpu(for cpu user)
+```
+
+2. Client Send Requests
+
+```
+python3 ocr_web_client.py
+```
+
## FAQ
**Q1**: No result return after sending the request.
diff --git a/deploy/pdserving/README_CN.md b/deploy/pdserving/README_CN.md
index 3e3f1bde0e824fe6133a1c169b9b03e614904c26..dd2ce90abf4b9c7f6d72d08529121498d1b0d40f 100644
--- a/deploy/pdserving/README_CN.md
+++ b/deploy/pdserving/README_CN.md
@@ -29,41 +29,31 @@ PaddleOCR提供2种服务部署方式:
需要准备PaddleOCR的运行环境和Paddle Serving的运行环境。
-- 准备PaddleOCR的运行环境参考[链接](../../doc/doc_ch/installation.md)
+- 准备PaddleOCR的运行环境[链接](../../doc/doc_ch/installation.md)
+ 根据环境下载对应的paddle whl包,推荐安装2.0.1版本
- 准备PaddleServing的运行环境,步骤如下
1. 安装serving,用于启动服务
```
- pip3 install paddle-serving-server==0.5.0 # for CPU
- pip3 install paddle-serving-server-gpu==0.5.0 # for GPU
+ pip3 install paddle-serving-server==0.6.1 # for CPU
+ pip3 install paddle-serving-server-gpu==0.6.1 # for GPU
# 其他GPU环境需要确认环境再选择执行如下命令
- pip3 install paddle-serving-server-gpu==0.5.0.post9 # GPU with CUDA9.0
- pip3 install paddle-serving-server-gpu==0.5.0.post10 # GPU with CUDA10.0
- pip3 install paddle-serving-server-gpu==0.5.0.post101 # GPU with CUDA10.1 + TensorRT6
- pip3 install paddle-serving-server-gpu==0.5.0.post11 # GPU with CUDA10.1 + TensorRT7
+ pip3 install paddle-serving-server-gpu==0.6.1.post101 # GPU with CUDA10.1 + TensorRT6
+ pip3 install paddle-serving-server-gpu==0.6.1.post11 # GPU with CUDA11 + TensorRT7
```
2. 安装client,用于向服务发送请求
- ```
- pip3 install paddle-serving-client==0.5.0 # for CPU
+ 在[下载链接](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md)中找到对应python版本的client安装包,这里推荐python3.7版本:
- pip3 install paddle-serving-client-gpu==0.5.0 # for GPU
+ ```
+ wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_client-0.0.0-cp37-none-any.whl
+ pip3 install paddle_serving_client-0.0.0-cp37-none-any.whl
```
3. 安装serving-app
```
- pip3 install paddle-serving-app==0.3.0
- ```
- **note:** 安装0.3.0版本的serving-app后,为了能加载动态图模型,需要修改serving_app的源码,具体为:
- ```
- # 找到paddle_serving_app的安装目录,找到并编辑local_predict.py文件
- vim /usr/local/lib/python3.7/site-packages/paddle_serving_app/local_predict.py
- # 将local_predict.py 的第85行 config = AnalysisConfig(model_path) 替换为:
- if os.path.exists(os.path.join(model_path, "__params__")):
- config = AnalysisConfig(os.path.join(model_path, "__model__"), os.path.join(model_path, "__params__"))
- else:
- config = AnalysisConfig(model_path)
+ pip3 install paddle-serving-app==0.6.1
```
**Note:** 如果要安装最新版本的PaddleServing参考[链接](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md)。
@@ -76,38 +66,38 @@ PaddleOCR提供2种服务部署方式:
首先,下载PPOCR的[inference模型](https://github.com/PaddlePaddle/PaddleOCR#pp-ocr-20-series-model-listupdate-on-dec-15)
```
# 下载并解压 OCR 文本检测模型
-wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_det_infer.tar && tar xf ch_ppocr_server_v2.0_det_infer.tar
+wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_det_infer.tar
# 下载并解压 OCR 文本识别模型
-wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_server_v2.0_rec_infer.tar && tar xf ch_ppocr_server_v2.0_rec_infer.tar
+wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar
```
接下来,用安装的paddle_serving_client把下载的inference模型转换成易于server部署的模型格式。
```
# 转换检测模型
-python3 -m paddle_serving_client.convert --dirname ./ch_ppocr_server_v2.0_det_infer/ \
+python3 -m paddle_serving_client.convert --dirname ./ch_ppocr_mobile_v2.0_det_infer/ \
--model_filename inference.pdmodel \
--params_filename inference.pdiparams \
- --serving_server ./ppocr_det_server_2.0_serving/ \
- --serving_client ./ppocr_det_server_2.0_client/
+ --serving_server ./ppocr_det_mobile_2.0_serving/ \
+ --serving_client ./ppocr_det_mobile_2.0_client/
# 转换识别模型
-python3 -m paddle_serving_client.convert --dirname ./ch_ppocr_server_v2.0_rec_infer/ \
+python3 -m paddle_serving_client.convert --dirname ./ch_ppocr_mobile_v2.0_rec_infer/ \
--model_filename inference.pdmodel \
--params_filename inference.pdiparams \
- --serving_server ./ppocr_rec_server_2.0_serving/ \
- --serving_client ./ppocr_rec_server_2.0_client/
+ --serving_server ./ppocr_rec_mobile_2.0_serving/ \
+ --serving_client ./ppocr_rec_mobile_2.0_client/
```
-检测模型转换完成后,会在当前文件夹多出`ppocr_det_server_2.0_serving` 和`ppocr_det_server_2.0_client`的文件夹,具备如下格式:
+检测模型转换完成后,会在当前文件夹多出`ppocr_det_mobile_2.0_serving` 和`ppocr_det_mobile_2.0_client`的文件夹,具备如下格式:
```
-|- ppocr_det_server_2.0_serving/
+|- ppocr_det_mobile_2.0_serving/
|- __model__
|- __params__
|- serving_server_conf.prototxt
|- serving_server_conf.stream.prototxt
-|- ppocr_det_server_2.0_client
+|- ppocr_det_mobile_2.0_client
|- serving_client_conf.prototxt
|- serving_client_conf.stream.prototxt
@@ -148,6 +138,79 @@ python3 -m paddle_serving_client.convert --dirname ./ch_ppocr_server_v2.0_rec_in
成功运行后,模型预测的结果会打印在cmd窗口中,结果示例为:
![](./imgs/results.png)
+ 调整 config.yml 中的并发个数获得最大的QPS, 一般检测和识别的并发数为2:1
+ ```
+ det:
+ #并发数,is_thread_op=True时,为线程并发;否则为进程并发
+ concurrency: 8
+ ...
+ rec:
+ #并发数,is_thread_op=True时,为线程并发;否则为进程并发
+ concurrency: 4
+ ...
+ ```
+ 有需要的话可以同时发送多个服务请求
+
+ 预测性能数据会被自动写入 `PipelineServingLogs/pipeline.tracer` 文件中。
+
+ 在200张真实图片上测试,把检测长边限制为960。T4 GPU 上 QPS 均值可达到23左右:
+
+ ```
+ 2021-05-13 03:42:36,895 ==================== TRACER ======================
+ 2021-05-13 03:42:36,975 Op(rec):
+ 2021-05-13 03:42:36,976 in[14.472382882882883 ms]
+ 2021-05-13 03:42:36,976 prep[9.556855855855856 ms]
+ 2021-05-13 03:42:36,976 midp[59.921905405405404 ms]
+ 2021-05-13 03:42:36,976 postp[15.345945945945946 ms]
+ 2021-05-13 03:42:36,976 out[1.9921216216216215 ms]
+ 2021-05-13 03:42:36,976 idle[0.16254943864471572]
+ 2021-05-13 03:42:36,976 Op(det):
+ 2021-05-13 03:42:36,976 in[315.4468035714286 ms]
+ 2021-05-13 03:42:36,976 prep[69.5980625 ms]
+ 2021-05-13 03:42:36,976 midp[18.989535714285715 ms]
+ 2021-05-13 03:42:36,976 postp[18.857803571428573 ms]
+ 2021-05-13 03:42:36,977 out[3.1337544642857145 ms]
+ 2021-05-13 03:42:36,977 idle[0.7477961159203756]
+ 2021-05-13 03:42:36,977 DAGExecutor:
+ 2021-05-13 03:42:36,977 Query count[224]
+ 2021-05-13 03:42:36,977 QPS[22.4 q/s]
+ 2021-05-13 03:42:36,977 Succ[0.9910714285714286]
+ 2021-05-13 03:42:36,977 Error req[169, 170]
+ 2021-05-13 03:42:36,977 Latency:
+ 2021-05-13 03:42:36,977 ave[535.1678348214285 ms]
+ 2021-05-13 03:42:36,977 .50[172.651 ms]
+ 2021-05-13 03:42:36,977 .60[187.904 ms]
+ 2021-05-13 03:42:36,977 .70[245.675 ms]
+ 2021-05-13 03:42:36,977 .80[526.684 ms]
+ 2021-05-13 03:42:36,977 .90[854.596 ms]
+ 2021-05-13 03:42:36,977 .95[1722.728 ms]
+ 2021-05-13 03:42:36,977 .99[3990.292 ms]
+ 2021-05-13 03:42:36,978 Channel (server worker num[10]):
+ 2021-05-13 03:42:36,978 chl0(In: ['@DAGExecutor'], Out: ['det']) size[0/0]
+ 2021-05-13 03:42:36,979 chl1(In: ['det'], Out: ['rec']) size[6/0]
+ 2021-05-13 03:42:36,979 chl2(In: ['rec'], Out: ['@DAGExecutor']) size[0/0]
+ ```
+
+## WINDOWS用户
+
+Windows用户不能使用上述的启动方式,需要使用Web Service,详情参见[Windows平台使用Paddle Serving指导](https://github.com/PaddlePaddle/Serving/blob/develop/doc/WINDOWS_TUTORIAL_CN.md)
+
+
+1. 启动服务端程序
+
+```
+cd win
+python3 ocr_web_server.py gpu(使用gpu方式)
+或者
+python3 ocr_web_server.py cpu(使用cpu方式)
+```
+
+2. 发送服务请求
+
+```
+python3 ocr_web_client.py
+```
+
## FAQ
diff --git a/deploy/pdserving/config.yml b/deploy/pdserving/config.yml
index aef735dbfab5b314f9209a7cc91e7fd5b6fc615c..2aae922dfa12f46d1c0ebd352e8d3a7077065cf8 100644
--- a/deploy/pdserving/config.yml
+++ b/deploy/pdserving/config.yml
@@ -1,32 +1,32 @@
#rpc端口, rpc_port和http_port不允许同时为空。当rpc_port为空且http_port不为空时,会自动将rpc_port设置为http_port+1
-rpc_port: 18090
+rpc_port: 18091
#http端口, rpc_port和http_port不允许同时为空。当rpc_port可用且http_port为空时,不自动生成http_port
-http_port: 9999
+http_port: 9998
#worker_num, 最大并发数。当build_dag_each_worker=True时, 框架会创建worker_num个进程,每个进程内构建grpcSever和DAG
##当build_dag_each_worker=False时,框架会设置主线程grpc线程池的max_workers=worker_num
-worker_num: 20
+worker_num: 10
#build_dag_each_worker, False,框架在进程内创建一条DAG;True,框架会每个进程内创建多个独立的DAG
-build_dag_each_worker: false
+build_dag_each_worker: False
dag:
#op资源类型, True, 为线程模型;False,为进程模型
is_thread_op: False
#重试次数
- retry: 1
+ retry: 10
#使用性能分析, True,生成Timeline性能数据,对性能有一定影响;False为不使用
- use_profile: False
+ use_profile: True
tracer:
interval_s: 10
op:
det:
#并发数,is_thread_op=True时,为线程并发;否则为进程并发
- concurrency: 4
+ concurrency: 8
#当op配置没有server_endpoints时,从local_service_conf读取本地服务配置
local_service_conf:
@@ -34,18 +34,18 @@ op:
client_type: local_predictor
#det模型路径
- model_config: /paddle/serving/models/det_serving_server/ #ocr_det_model
+ model_config: ./ppocr_det_mobile_2.0_serving
#Fetch结果列表,以client_config中fetch_var的alias_name为准
fetch_list: ["save_infer_model/scale_0.tmp_1"]
#计算硬件ID,当devices为""或不写时为CPU预测;当devices为"0", "0,1,2"时为GPU预测,表示使用的GPU卡
- devices: "2"
+ devices: "0"
ir_optim: True
rec:
#并发数,is_thread_op=True时,为线程并发;否则为进程并发
- concurrency: 1
+ concurrency: 4
#超时时间, 单位ms
timeout: -1
@@ -60,12 +60,12 @@ op:
client_type: local_predictor
#rec模型路径
- model_config: /paddle/serving/models/rec_serving_server/ #ocr_rec_model
+ model_config: ./ppocr_rec_mobile_2.0_serving
#Fetch结果列表,以client_config中fetch_var的alias_name为准
- fetch_list: ["save_infer_model/scale_0.tmp_1"] #["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"]
+ fetch_list: ["save_infer_model/scale_0.tmp_1"]
#计算硬件ID,当devices为""或不写时为CPU预测;当devices为"0", "0,1,2"时为GPU预测,表示使用的GPU卡
- devices: "2"
+ devices: "0"
ir_optim: True
diff --git a/deploy/pdserving/ocr_reader.py b/deploy/pdserving/ocr_reader.py
index 95110706af13662de11ef0f668558d0dd3abcf52..3f219784fca79715d09ae9353a32d95e2e427cb6 100644
--- a/deploy/pdserving/ocr_reader.py
+++ b/deploy/pdserving/ocr_reader.py
@@ -21,7 +21,6 @@ import sys
import argparse
import string
from copy import deepcopy
-import paddle
class DetResizeForTest(object):
@@ -34,12 +33,12 @@ class DetResizeForTest(object):
elif 'limit_side_len' in kwargs:
self.limit_side_len = kwargs['limit_side_len']
self.limit_type = kwargs.get('limit_type', 'min')
- elif 'resize_long' in kwargs:
- self.resize_type = 2
- self.resize_long = kwargs.get('resize_long', 960)
- else:
+ elif 'resize_short' in kwargs:
self.limit_side_len = 736
self.limit_type = 'min'
+ else:
+ self.resize_type = 2
+ self.resize_long = kwargs.get('resize_long', 960)
def __call__(self, data):
img = deepcopy(data)
@@ -227,8 +226,6 @@ class CTCLabelDecode(BaseRecLabelDecode):
super(CTCLabelDecode, self).__init__(config)
def __call__(self, preds, label=None, *args, **kwargs):
- if isinstance(preds, paddle.Tensor):
- preds = preds.numpy()
preds_idx = preds.argmax(axis=2)
preds_prob = preds.max(axis=2)
text = self.decode(preds_idx, preds_prob, is_remove_duplicate=True)
diff --git a/deploy/pdserving/pipeline_http_client.py b/deploy/pdserving/pipeline_http_client.py
index 88c4a81ea8bbed80d37b5fbfea6bf01b38f9613a..0befe2f6144d18e24fb3f72ed1d919fd8cd7d5a4 100644
--- a/deploy/pdserving/pipeline_http_client.py
+++ b/deploy/pdserving/pipeline_http_client.py
@@ -23,8 +23,8 @@ def cv2_to_base64(image):
return base64.b64encode(image).decode('utf8')
-url = "http://127.0.0.1:9999/ocr/prediction"
-test_img_dir = "../doc/imgs/"
+url = "http://127.0.0.1:9998/ocr/prediction"
+test_img_dir = "../../doc/imgs/"
for idx, img_file in enumerate(os.listdir(test_img_dir)):
with open(os.path.join(test_img_dir, img_file), 'rb') as file:
image_data1 = file.read()
@@ -36,5 +36,5 @@ for idx, img_file in enumerate(os.listdir(test_img_dir)):
r = requests.post(url=url, data=json.dumps(data))
print(r.json())
-test_img_dir = "../doc/imgs/"
+test_img_dir = "../../doc/imgs/"
print("==> total number of test imgs: ", len(os.listdir(test_img_dir)))
diff --git a/deploy/pdserving/pipeline_rpc_client.py b/deploy/pdserving/pipeline_rpc_client.py
index 7471f7ed6c1254d550bcf2c19f6ee7c610a2e20e..79f898faf37f946cdbf4a87d4d62c8b1f9d5c93b 100644
--- a/deploy/pdserving/pipeline_rpc_client.py
+++ b/deploy/pdserving/pipeline_rpc_client.py
@@ -23,7 +23,7 @@ import base64
import os
client = PipelineClient()
-client.connect(['127.0.0.1:18090'])
+client.connect(['127.0.0.1:18091'])
def cv2_to_base64(image):
@@ -39,4 +39,3 @@ for img_file in os.listdir(test_img_dir):
for i in range(1):
ret = client.predict(feed_dict={"image": image}, fetch=["res"])
print(ret)
- #print(ret)
diff --git a/deploy/pdserving/web_service.py b/deploy/pdserving/web_service.py
index b47ef65d09dd7aad0e4d00ca852a5c32161ad45b..21db1e1411a8706dbbd9a22ce2ce7db8e16da5ec 100644
--- a/deploy/pdserving/web_service.py
+++ b/deploy/pdserving/web_service.py
@@ -11,10 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-try:
- from paddle_serving_server_gpu.web_service import WebService, Op
-except ImportError:
- from paddle_serving_server.web_service import WebService, Op
+from paddle_serving_server.web_service import WebService, Op
import logging
import numpy as np
@@ -48,28 +45,24 @@ class DetOp(Op):
def preprocess(self, input_dicts, data_id, log_id):
(_, input_dict), = input_dicts.items()
data = base64.b64decode(input_dict["image"].encode('utf8'))
+ self.raw_im = data
data = np.fromstring(data, np.uint8)
# Note: class variables(self.var) can only be used in process op mode
im = cv2.imdecode(data, cv2.IMREAD_COLOR)
- self.im = im
self.ori_h, self.ori_w, _ = im.shape
-
- det_img = self.det_preprocess(self.im)
+ det_img = self.det_preprocess(im)
_, self.new_h, self.new_w = det_img.shape
- print("det image shape", det_img.shape)
return {"x": det_img[np.newaxis, :].copy()}, False, None, ""
def postprocess(self, input_dicts, fetch_dict, log_id):
- print("input_dicts: ", input_dicts)
det_out = fetch_dict["save_infer_model/scale_0.tmp_1"]
ratio_list = [
float(self.new_h) / self.ori_h, float(self.new_w) / self.ori_w
]
dt_boxes_list = self.post_func(det_out, [ratio_list])
dt_boxes = self.filter_func(dt_boxes_list[0], [self.ori_h, self.ori_w])
- out_dict = {"dt_boxes": dt_boxes, "image": self.im}
+ out_dict = {"dt_boxes": dt_boxes, "image": self.raw_im}
- print("out dict", out_dict["dt_boxes"])
return out_dict, None, ""
@@ -83,35 +76,75 @@ class RecOp(Op):
def preprocess(self, input_dicts, data_id, log_id):
(_, input_dict), = input_dicts.items()
- im = input_dict["image"]
+ raw_im = input_dict["image"]
+ data = np.frombuffer(raw_im, np.uint8)
+ im = cv2.imdecode(data, cv2.IMREAD_COLOR)
dt_boxes = input_dict["dt_boxes"]
dt_boxes = self.sorted_boxes(dt_boxes)
feed_list = []
img_list = []
max_wh_ratio = 0
- for i, dtbox in enumerate(dt_boxes):
- boximg = self.get_rotate_crop_image(im, dt_boxes[i])
- img_list.append(boximg)
- h, w = boximg.shape[0:2]
- wh_ratio = w * 1.0 / h
- max_wh_ratio = max(max_wh_ratio, wh_ratio)
- _, w, h = self.ocr_reader.resize_norm_img(img_list[0],
- max_wh_ratio).shape
-
- imgs = np.zeros((len(img_list), 3, w, h)).astype('float32')
- for id, img in enumerate(img_list):
- norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio)
- imgs[id] = norm_img
- print("rec image shape", imgs.shape)
- feed = {"x": imgs.copy()}
- return feed, False, None, ""
-
- def postprocess(self, input_dicts, fetch_dict, log_id):
- rec_res = self.ocr_reader.postprocess(fetch_dict, with_score=True)
- res_lst = []
- for res in rec_res:
- res_lst.append(res[0])
- res = {"res": str(res_lst)}
+ ## Many mini-batchs, the type of feed_data is list.
+ max_batch_size = 6 # len(dt_boxes)
+
+ # If max_batch_size is 0, skipping predict stage
+ if max_batch_size == 0:
+ return {}, True, None, ""
+ boxes_size = len(dt_boxes)
+ batch_size = boxes_size // max_batch_size
+ rem = boxes_size % max_batch_size
+ for bt_idx in range(0, batch_size + 1):
+ imgs = None
+ boxes_num_in_one_batch = 0
+ if bt_idx == batch_size:
+ if rem == 0:
+ continue
+ else:
+ boxes_num_in_one_batch = rem
+ elif bt_idx < batch_size:
+ boxes_num_in_one_batch = max_batch_size
+ else:
+ _LOGGER.error("batch_size error, bt_idx={}, batch_size={}".
+ format(bt_idx, batch_size))
+ break
+
+ start = bt_idx * max_batch_size
+ end = start + boxes_num_in_one_batch
+ img_list = []
+ for box_idx in range(start, end):
+ boximg = self.get_rotate_crop_image(im, dt_boxes[box_idx])
+ img_list.append(boximg)
+ h, w = boximg.shape[0:2]
+ wh_ratio = w * 1.0 / h
+ max_wh_ratio = max(max_wh_ratio, wh_ratio)
+ _, w, h = self.ocr_reader.resize_norm_img(img_list[0],
+ max_wh_ratio).shape
+
+ imgs = np.zeros((boxes_num_in_one_batch, 3, w, h)).astype('float32')
+ for id, img in enumerate(img_list):
+ norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio)
+ imgs[id] = norm_img
+ feed = {"x": imgs.copy()}
+ feed_list.append(feed)
+
+ return feed_list, False, None, ""
+
+ def postprocess(self, input_dicts, fetch_data, log_id):
+ res_list = []
+ if isinstance(fetch_data, dict):
+ if len(fetch_data) > 0:
+ rec_batch_res = self.ocr_reader.postprocess(
+ fetch_data, with_score=True)
+ for res in rec_batch_res:
+ res_list.append(res[0])
+ elif isinstance(fetch_data, list):
+ for one_batch in fetch_data:
+ one_batch_res = self.ocr_reader.postprocess(
+ one_batch, with_score=True)
+ for res in one_batch_res:
+ res_list.append(res[0])
+
+ res = {"res": str(res_list)}
return res, None, ""
diff --git a/deploy/pdserving/win/ocr_reader.py b/deploy/pdserving/win/ocr_reader.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f219784fca79715d09ae9353a32d95e2e427cb6
--- /dev/null
+++ b/deploy/pdserving/win/ocr_reader.py
@@ -0,0 +1,435 @@
+# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import cv2
+import copy
+import numpy as np
+import math
+import re
+import sys
+import argparse
+import string
+from copy import deepcopy
+
+
+class DetResizeForTest(object):
+ def __init__(self, **kwargs):
+ super(DetResizeForTest, self).__init__()
+ self.resize_type = 0
+ if 'image_shape' in kwargs:
+ self.image_shape = kwargs['image_shape']
+ self.resize_type = 1
+ elif 'limit_side_len' in kwargs:
+ self.limit_side_len = kwargs['limit_side_len']
+ self.limit_type = kwargs.get('limit_type', 'min')
+ elif 'resize_short' in kwargs:
+ self.limit_side_len = 736
+ self.limit_type = 'min'
+ else:
+ self.resize_type = 2
+ self.resize_long = kwargs.get('resize_long', 960)
+
+ def __call__(self, data):
+ img = deepcopy(data)
+ src_h, src_w, _ = img.shape
+
+ if self.resize_type == 0:
+ img, [ratio_h, ratio_w] = self.resize_image_type0(img)
+ elif self.resize_type == 2:
+ img, [ratio_h, ratio_w] = self.resize_image_type2(img)
+ else:
+ img, [ratio_h, ratio_w] = self.resize_image_type1(img)
+
+ return img
+
+ def resize_image_type1(self, img):
+ resize_h, resize_w = self.image_shape
+ ori_h, ori_w = img.shape[:2] # (h, w, c)
+ ratio_h = float(resize_h) / ori_h
+ ratio_w = float(resize_w) / ori_w
+ img = cv2.resize(img, (int(resize_w), int(resize_h)))
+ return img, [ratio_h, ratio_w]
+
+ def resize_image_type0(self, img):
+ """
+ resize image to a size multiple of 32 which is required by the network
+ args:
+ img(array): array with shape [h, w, c]
+ return(tuple):
+ img, (ratio_h, ratio_w)
+ """
+ limit_side_len = self.limit_side_len
+ h, w, _ = img.shape
+
+ # limit the max side
+ if self.limit_type == 'max':
+ if max(h, w) > limit_side_len:
+ if h > w:
+ ratio = float(limit_side_len) / h
+ else:
+ ratio = float(limit_side_len) / w
+ else:
+ ratio = 1.
+ else:
+ if min(h, w) < limit_side_len:
+ if h < w:
+ ratio = float(limit_side_len) / h
+ else:
+ ratio = float(limit_side_len) / w
+ else:
+ ratio = 1.
+ resize_h = int(h * ratio)
+ resize_w = int(w * ratio)
+
+ resize_h = int(round(resize_h / 32) * 32)
+ resize_w = int(round(resize_w / 32) * 32)
+
+ try:
+ if int(resize_w) <= 0 or int(resize_h) <= 0:
+ return None, (None, None)
+ img = cv2.resize(img, (int(resize_w), int(resize_h)))
+ except:
+ print(img.shape, resize_w, resize_h)
+ sys.exit(0)
+ ratio_h = resize_h / float(h)
+ ratio_w = resize_w / float(w)
+ # return img, np.array([h, w])
+ return img, [ratio_h, ratio_w]
+
+ def resize_image_type2(self, img):
+ h, w, _ = img.shape
+
+ resize_w = w
+ resize_h = h
+
+ # Fix the longer side
+ if resize_h > resize_w:
+ ratio = float(self.resize_long) / resize_h
+ else:
+ ratio = float(self.resize_long) / resize_w
+
+ resize_h = int(resize_h * ratio)
+ resize_w = int(resize_w * ratio)
+
+ max_stride = 128
+ resize_h = (resize_h + max_stride - 1) // max_stride * max_stride
+ resize_w = (resize_w + max_stride - 1) // max_stride * max_stride
+ img = cv2.resize(img, (int(resize_w), int(resize_h)))
+ ratio_h = resize_h / float(h)
+ ratio_w = resize_w / float(w)
+
+ return img, [ratio_h, ratio_w]
+
+
+class BaseRecLabelDecode(object):
+ """ Convert between text-label and text-index """
+
+ def __init__(self, config):
+ support_character_type = [
+ 'ch', 'en', 'EN_symbol', 'french', 'german', 'japan', 'korean',
+ 'it', 'xi', 'pu', 'ru', 'ar', 'ta', 'ug', 'fa', 'ur', 'rs', 'oc',
+ 'rsc', 'bg', 'uk', 'be', 'te', 'ka', 'chinese_cht', 'hi', 'mr',
+ 'ne', 'EN'
+ ]
+ character_type = config['character_type']
+ character_dict_path = config['character_dict_path']
+ use_space_char = True
+ assert character_type in support_character_type, "Only {} are supported now but get {}".format(
+ support_character_type, character_type)
+
+ self.beg_str = "sos"
+ self.end_str = "eos"
+
+ if character_type == "en":
+ self.character_str = "0123456789abcdefghijklmnopqrstuvwxyz"
+ dict_character = list(self.character_str)
+ elif character_type == "EN_symbol":
+ # same with ASTER setting (use 94 char).
+ self.character_str = string.printable[:-6]
+ dict_character = list(self.character_str)
+ elif character_type in support_character_type:
+ self.character_str = ""
+ assert character_dict_path is not None, "character_dict_path should not be None when character_type is {}".format(
+ character_type)
+ with open(character_dict_path, "rb") as fin:
+ lines = fin.readlines()
+ for line in lines:
+ line = line.decode('utf-8').strip("\n").strip("\r\n")
+ self.character_str += line
+ if use_space_char:
+ self.character_str += " "
+ dict_character = list(self.character_str)
+
+ else:
+ raise NotImplementedError
+ self.character_type = character_type
+ dict_character = self.add_special_char(dict_character)
+ self.dict = {}
+ for i, char in enumerate(dict_character):
+ self.dict[char] = i
+ self.character = dict_character
+
+ def add_special_char(self, dict_character):
+ return dict_character
+
+ def decode(self, text_index, text_prob=None, is_remove_duplicate=False):
+ """ convert text-index into text-label. """
+ result_list = []
+ ignored_tokens = self.get_ignored_tokens()
+ batch_size = len(text_index)
+ for batch_idx in range(batch_size):
+ char_list = []
+ conf_list = []
+ for idx in range(len(text_index[batch_idx])):
+ if text_index[batch_idx][idx] in ignored_tokens:
+ continue
+ if is_remove_duplicate:
+ # only for predict
+ if idx > 0 and text_index[batch_idx][idx - 1] == text_index[
+ batch_idx][idx]:
+ continue
+ char_list.append(self.character[int(text_index[batch_idx][
+ idx])])
+ if text_prob is not None:
+ conf_list.append(text_prob[batch_idx][idx])
+ else:
+ conf_list.append(1)
+ text = ''.join(char_list)
+ result_list.append((text, np.mean(conf_list)))
+ return result_list
+
+ def get_ignored_tokens(self):
+ return [0] # for ctc blank
+
+
+class CTCLabelDecode(BaseRecLabelDecode):
+ """ Convert between text-label and text-index """
+
+ def __init__(
+ self,
+ config,
+ #character_dict_path=None,
+ #character_type='ch',
+ #use_space_char=False,
+ **kwargs):
+ super(CTCLabelDecode, self).__init__(config)
+
+ def __call__(self, preds, label=None, *args, **kwargs):
+ preds_idx = preds.argmax(axis=2)
+ preds_prob = preds.max(axis=2)
+ text = self.decode(preds_idx, preds_prob, is_remove_duplicate=True)
+ if label is None:
+ return text
+ label = self.decode(label)
+ return text, label
+
+ def add_special_char(self, dict_character):
+ dict_character = ['blank'] + dict_character
+ return dict_character
+
+
+class CharacterOps(object):
+ """ Convert between text-label and text-index """
+
+ def __init__(self, config):
+ self.character_type = config['character_type']
+ self.loss_type = config['loss_type']
+ if self.character_type == "en":
+ self.character_str = "0123456789abcdefghijklmnopqrstuvwxyz"
+ dict_character = list(self.character_str)
+ elif self.character_type == "ch":
+ character_dict_path = config['character_dict_path']
+ self.character_str = ""
+ with open(character_dict_path, "rb") as fin:
+ lines = fin.readlines()
+ for line in lines:
+ line = line.decode('utf-8').strip("\n").strip("\r\n")
+ self.character_str += line
+ dict_character = list(self.character_str)
+ elif self.character_type == "en_sensitive":
+ # same with ASTER setting (use 94 char).
+ self.character_str = string.printable[:-6]
+ dict_character = list(self.character_str)
+ else:
+ self.character_str = None
+ assert self.character_str is not None, \
+ "Nonsupport type of the character: {}".format(self.character_str)
+ self.beg_str = "sos"
+ self.end_str = "eos"
+ if self.loss_type == "attention":
+ dict_character = [self.beg_str, self.end_str] + dict_character
+ self.dict = {}
+ for i, char in enumerate(dict_character):
+ self.dict[char] = i
+ self.character = dict_character
+
+ def encode(self, text):
+ """convert text-label into text-index.
+ input:
+ text: text labels of each image. [batch_size]
+
+ output:
+ text: concatenated text index for CTCLoss.
+ [sum(text_lengths)] = [text_index_0 + text_index_1 + ... + text_index_(n - 1)]
+ length: length of each text. [batch_size]
+ """
+ if self.character_type == "en":
+ text = text.lower()
+
+ text_list = []
+ for char in text:
+ if char not in self.dict:
+ continue
+ text_list.append(self.dict[char])
+ text = np.array(text_list)
+ return text
+
+ def decode(self, text_index, is_remove_duplicate=False):
+ """ convert text-index into text-label. """
+ char_list = []
+ char_num = self.get_char_num()
+
+ if self.loss_type == "attention":
+ beg_idx = self.get_beg_end_flag_idx("beg")
+ end_idx = self.get_beg_end_flag_idx("end")
+ ignored_tokens = [beg_idx, end_idx]
+ else:
+ ignored_tokens = [char_num]
+
+ for idx in range(len(text_index)):
+ if text_index[idx] in ignored_tokens:
+ continue
+ if is_remove_duplicate:
+ if idx > 0 and text_index[idx - 1] == text_index[idx]:
+ continue
+ char_list.append(self.character[text_index[idx]])
+ text = ''.join(char_list)
+ return text
+
+ def get_char_num(self):
+ return len(self.character)
+
+ def get_beg_end_flag_idx(self, beg_or_end):
+ if self.loss_type == "attention":
+ if beg_or_end == "beg":
+ idx = np.array(self.dict[self.beg_str])
+ elif beg_or_end == "end":
+ idx = np.array(self.dict[self.end_str])
+ else:
+ assert False, "Unsupport type %s in get_beg_end_flag_idx"\
+ % beg_or_end
+ return idx
+ else:
+ err = "error in get_beg_end_flag_idx when using the loss %s"\
+ % (self.loss_type)
+ assert False, err
+
+
+class OCRReader(object):
+ def __init__(self,
+ algorithm="CRNN",
+ image_shape=[3, 32, 320],
+ char_type="ch",
+ batch_num=1,
+ char_dict_path="./ppocr_keys_v1.txt"):
+ self.rec_image_shape = image_shape
+ self.character_type = char_type
+ self.rec_batch_num = batch_num
+ char_ops_params = {}
+ char_ops_params["character_type"] = char_type
+ char_ops_params["character_dict_path"] = char_dict_path
+ char_ops_params['loss_type'] = 'ctc'
+ self.char_ops = CharacterOps(char_ops_params)
+ self.label_ops = CTCLabelDecode(char_ops_params)
+
+ def resize_norm_img(self, img, max_wh_ratio):
+ imgC, imgH, imgW = self.rec_image_shape
+ if self.character_type == "ch":
+ imgW = int(32 * max_wh_ratio)
+ h = img.shape[0]
+ w = img.shape[1]
+ ratio = w / float(h)
+ if math.ceil(imgH * ratio) > imgW:
+ resized_w = imgW
+ else:
+ resized_w = int(math.ceil(imgH * ratio))
+ resized_image = cv2.resize(img, (resized_w, imgH))
+ resized_image = resized_image.astype('float32')
+ resized_image = resized_image.transpose((2, 0, 1)) / 255
+ resized_image -= 0.5
+ resized_image /= 0.5
+ padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32)
+
+ padding_im[:, :, 0:resized_w] = resized_image
+ return padding_im
+
+ def preprocess(self, img_list):
+ img_num = len(img_list)
+ norm_img_batch = []
+ max_wh_ratio = 0
+ for ino in range(img_num):
+ h, w = img_list[ino].shape[0:2]
+ wh_ratio = w * 1.0 / h
+ max_wh_ratio = max(max_wh_ratio, wh_ratio)
+
+ for ino in range(img_num):
+ norm_img = self.resize_norm_img(img_list[ino], max_wh_ratio)
+ norm_img = norm_img[np.newaxis, :]
+ norm_img_batch.append(norm_img)
+ norm_img_batch = np.concatenate(norm_img_batch)
+ norm_img_batch = norm_img_batch.copy()
+
+ return norm_img_batch[0]
+
+ def postprocess_old(self, outputs, with_score=False):
+ rec_res = []
+ rec_idx_lod = outputs["ctc_greedy_decoder_0.tmp_0.lod"]
+ rec_idx_batch = outputs["ctc_greedy_decoder_0.tmp_0"]
+ if with_score:
+ predict_lod = outputs["softmax_0.tmp_0.lod"]
+ for rno in range(len(rec_idx_lod) - 1):
+ beg = rec_idx_lod[rno]
+ end = rec_idx_lod[rno + 1]
+ if isinstance(rec_idx_batch, list):
+ rec_idx_tmp = [x[0] for x in rec_idx_batch[beg:end]]
+ else: #nd array
+ rec_idx_tmp = rec_idx_batch[beg:end, 0]
+ preds_text = self.char_ops.decode(rec_idx_tmp)
+ if with_score:
+ beg = predict_lod[rno]
+ end = predict_lod[rno + 1]
+ if isinstance(outputs["softmax_0.tmp_0"], list):
+ outputs["softmax_0.tmp_0"] = np.array(outputs[
+ "softmax_0.tmp_0"]).astype(np.float32)
+ probs = outputs["softmax_0.tmp_0"][beg:end, :]
+ ind = np.argmax(probs, axis=1)
+ blank = probs.shape[1]
+ valid_ind = np.where(ind != (blank - 1))[0]
+ score = np.mean(probs[valid_ind, ind[valid_ind]])
+ rec_res.append([preds_text, score])
+ else:
+ rec_res.append([preds_text])
+ return rec_res
+
+ def postprocess(self, outputs, with_score=False):
+ preds = outputs["save_infer_model/scale_0.tmp_1"]
+ try:
+ preds = preds.numpy()
+ except:
+ pass
+ preds_idx = preds.argmax(axis=2)
+ preds_prob = preds.max(axis=2)
+ text = self.label_ops.decode(
+ preds_idx, preds_prob, is_remove_duplicate=True)
+ return text
diff --git a/deploy/pdserving/win/ocr_web_client.py b/deploy/pdserving/win/ocr_web_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..64f0ab3b391a9c131d9927fe92fe4986cbccd567
--- /dev/null
+++ b/deploy/pdserving/win/ocr_web_client.py
@@ -0,0 +1,50 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# -*- coding: utf-8 -*-
+
+import requests
+import json
+import cv2
+import base64
+import os, sys
+import time
+
+
+def cv2_to_base64(image):
+ #data = cv2.imencode('.jpg', image)[1]
+ return base64.b64encode(image).decode(
+ 'utf8') #data.tostring()).decode('utf8')
+
+
+headers = {"Content-type": "application/json"}
+url = "http://127.0.0.1:9292/ocr/prediction"
+
+test_img_dir = "../../../doc/imgs/"
+for idx, img_file in enumerate(os.listdir(test_img_dir)):
+ with open(os.path.join(test_img_dir, img_file), 'rb') as file:
+ image_data1 = file.read()
+
+ image = cv2_to_base64(image_data1)
+ for i in range(1):
+ data = {
+ "feed": [{
+ "image": image
+ }],
+ "fetch": ["save_infer_model/scale_0.tmp_1"]
+ }
+ r = requests.post(url=url, headers=headers, data=json.dumps(data))
+ print(r.json())
+
+test_img_dir = "../../../doc/imgs/"
+print("==> total number of test imgs: ", len(os.listdir(test_img_dir)))
diff --git a/deploy/pdserving/win/ocr_web_server.py b/deploy/pdserving/win/ocr_web_server.py
new file mode 100644
index 0000000000000000000000000000000000000000..1de6157574b01b6cce93c2854aea495b13adff92
--- /dev/null
+++ b/deploy/pdserving/win/ocr_web_server.py
@@ -0,0 +1,118 @@
+# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from paddle_serving_client import Client
+import cv2
+import sys
+import numpy as np
+import os
+from paddle_serving_client import Client
+from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor
+from paddle_serving_app.reader import Div, Normalize, Transpose
+from paddle_serving_app.reader import DBPostProcess, FilterBoxes, GetRotateCropImage, SortedBoxes
+from ocr_reader import OCRReader
+try:
+ from paddle_serving_server_gpu.web_service import WebService
+except ImportError:
+ from paddle_serving_server.web_service import WebService
+from paddle_serving_app.local_predict import LocalPredictor
+import time
+import re
+import base64
+
+
+class OCRService(WebService):
+ def init_det_debugger(self, det_model_config):
+ self.det_preprocess = Sequential([
+ ResizeByFactor(32, 960), Div(255),
+ Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose(
+ (2, 0, 1))
+ ])
+ self.det_client = LocalPredictor()
+ if sys.argv[1] == 'gpu':
+ self.det_client.load_model_config(
+ det_model_config, use_gpu=True, gpu_id=0)
+ elif sys.argv[1] == 'cpu':
+ self.det_client.load_model_config(det_model_config)
+ self.ocr_reader = OCRReader(
+ char_dict_path="../../../ppocr/utils/ppocr_keys_v1.txt")
+
+ def preprocess(self, feed=[], fetch=[]):
+ data = base64.b64decode(feed[0]["image"].encode('utf8'))
+ data = np.fromstring(data, np.uint8)
+ im = cv2.imdecode(data, cv2.IMREAD_COLOR)
+ ori_h, ori_w, _ = im.shape
+ det_img = self.det_preprocess(im)
+ _, new_h, new_w = det_img.shape
+ det_img = det_img[np.newaxis, :]
+ det_img = det_img.copy()
+ det_out = self.det_client.predict(
+ feed={"x": det_img},
+ fetch=["save_infer_model/scale_0.tmp_1"],
+ batch=True)
+ filter_func = FilterBoxes(10, 10)
+ post_func = DBPostProcess({
+ "thresh": 0.3,
+ "box_thresh": 0.5,
+ "max_candidates": 1000,
+ "unclip_ratio": 1.5,
+ "min_size": 3
+ })
+ sorted_boxes = SortedBoxes()
+ ratio_list = [float(new_h) / ori_h, float(new_w) / ori_w]
+ dt_boxes_list = post_func(det_out["save_infer_model/scale_0.tmp_1"],
+ [ratio_list])
+ dt_boxes = filter_func(dt_boxes_list[0], [ori_h, ori_w])
+ dt_boxes = sorted_boxes(dt_boxes)
+ get_rotate_crop_image = GetRotateCropImage()
+ img_list = []
+ max_wh_ratio = 0
+ for i, dtbox in enumerate(dt_boxes):
+ boximg = get_rotate_crop_image(im, dt_boxes[i])
+ img_list.append(boximg)
+ h, w = boximg.shape[0:2]
+ wh_ratio = w * 1.0 / h
+ max_wh_ratio = max(max_wh_ratio, wh_ratio)
+ if len(img_list) == 0:
+ return [], []
+ _, w, h = self.ocr_reader.resize_norm_img(img_list[0],
+ max_wh_ratio).shape
+ imgs = np.zeros((len(img_list), 3, w, h)).astype('float32')
+ for id, img in enumerate(img_list):
+ norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio)
+ imgs[id] = norm_img
+ feed = {"x": imgs.copy()}
+ fetch = ["save_infer_model/scale_0.tmp_1"]
+ return feed, fetch, True
+
+ def postprocess(self, feed={}, fetch=[], fetch_map=None):
+ rec_res = self.ocr_reader.postprocess(fetch_map, with_score=True)
+ res_lst = []
+ for res in rec_res:
+ res_lst.append(res[0])
+ res = {"res": res_lst}
+ return res
+
+
+ocr_service = OCRService(name="ocr")
+ocr_service.load_model_config("../ppocr_rec_mobile_2.0_serving")
+ocr_service.prepare_server(workdir="workdir", port=9292)
+ocr_service.init_det_debugger(
+ det_model_config="../ppocr_det_mobile_2.0_serving")
+if sys.argv[1] == 'gpu':
+ ocr_service.set_gpus("0")
+ ocr_service.run_debugger_service(gpu=True)
+elif sys.argv[1] == 'cpu':
+ ocr_service.run_debugger_service()
+ocr_service.run_web_service()