From af0f81dfe13ea7ece96281742abb3923d792e48f Mon Sep 17 00:00:00 2001 From: MissPenguin Date: Mon, 14 Dec 2020 11:45:26 +0000 Subject: [PATCH] remove pdserving --- deploy/pdserving/det_local_server.py | 79 ---------------- deploy/pdserving/det_web_server.py | 78 ---------------- deploy/pdserving/ocr_local_server.py | 114 ----------------------- deploy/pdserving/ocr_web_client.py | 37 -------- deploy/pdserving/ocr_web_server.py | 105 --------------------- deploy/pdserving/readme.md | 132 --------------------------- deploy/pdserving/rec_local_server.py | 79 ---------------- deploy/pdserving/rec_web_server.py | 77 ---------------- 8 files changed, 701 deletions(-) delete mode 100644 deploy/pdserving/det_local_server.py delete mode 100644 deploy/pdserving/det_web_server.py delete mode 100644 deploy/pdserving/ocr_local_server.py delete mode 100644 deploy/pdserving/ocr_web_client.py delete mode 100644 deploy/pdserving/ocr_web_server.py delete mode 100644 deploy/pdserving/readme.md delete mode 100644 deploy/pdserving/rec_local_server.py delete mode 100644 deploy/pdserving/rec_web_server.py diff --git a/deploy/pdserving/det_local_server.py b/deploy/pdserving/det_local_server.py deleted file mode 100644 index eb7948da..00000000 --- a/deploy/pdserving/det_local_server.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle_serving_client import Client -import cv2 -import sys -import numpy as np -import os -from paddle_serving_client import Client -from paddle_serving_app.reader import Sequential, ResizeByFactor -from paddle_serving_app.reader import Div, Normalize, Transpose -from paddle_serving_app.reader import DBPostProcess, FilterBoxes -if sys.argv[1] == 'gpu': - from paddle_serving_server_gpu.web_service import WebService -elif sys.argv[1] == 'cpu': - from paddle_serving_server.web_service import WebService -import time -import re -import base64 - - -class OCRService(WebService): - def init_det(self): - self.det_preprocess = Sequential([ - ResizeByFactor(32, 960), Div(255), - Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose( - (2, 0, 1)) - ]) - self.filter_func = FilterBoxes(10, 10) - self.post_func = DBPostProcess({ - "thresh": 0.3, - "box_thresh": 0.5, - "max_candidates": 1000, - "unclip_ratio": 1.5, - "min_size": 3 - }) - - def preprocess(self, feed=[], fetch=[]): - data = base64.b64decode(feed[0]["image"].encode('utf8')) - data = np.fromstring(data, np.uint8) - im = cv2.imdecode(data, cv2.IMREAD_COLOR) - self.ori_h, self.ori_w, _ = im.shape - det_img = self.det_preprocess(im) - _, self.new_h, self.new_w = det_img.shape - return {"image": det_img[np.newaxis, :].copy()}, ["concat_1.tmp_0"] - - def postprocess(self, feed={}, fetch=[], fetch_map=None): - det_out = fetch_map["concat_1.tmp_0"] - ratio_list = [ - float(self.new_h) / self.ori_h, float(self.new_w) / self.ori_w - ] - dt_boxes_list = self.post_func(det_out, [ratio_list]) - dt_boxes = self.filter_func(dt_boxes_list[0], [self.ori_h, self.ori_w]) - return {"dt_boxes": dt_boxes.tolist()} - - -ocr_service = OCRService(name="ocr") -ocr_service.load_model_config("ocr_det_model") -ocr_service.init_det() -if sys.argv[1] == 'gpu': - ocr_service.set_gpus("0") - ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu", gpuid=0) - ocr_service.run_debugger_service(gpu=True) -elif sys.argv[1] == 'cpu': - ocr_service.prepare_server(workdir="workdir", port=9292) - ocr_service.run_debugger_service() -ocr_service.init_det() -ocr_service.run_web_service() diff --git a/deploy/pdserving/det_web_server.py b/deploy/pdserving/det_web_server.py deleted file mode 100644 index 14be7413..00000000 --- a/deploy/pdserving/det_web_server.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle_serving_client import Client -import cv2 -import sys -import numpy as np -import os -from paddle_serving_client import Client -from paddle_serving_app.reader import Sequential, ResizeByFactor -from paddle_serving_app.reader import Div, Normalize, Transpose -from paddle_serving_app.reader import DBPostProcess, FilterBoxes -if sys.argv[1] == 'gpu': - from paddle_serving_server_gpu.web_service import WebService -elif sys.argv[1] == 'cpu': - from paddle_serving_server.web_service import WebService -import time -import re -import base64 - - -class OCRService(WebService): - def init_det(self): - self.det_preprocess = Sequential([ - ResizeByFactor(32, 960), Div(255), - Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose( - (2, 0, 1)) - ]) - self.filter_func = FilterBoxes(10, 10) - self.post_func = DBPostProcess({ - "thresh": 0.3, - "box_thresh": 0.5, - "max_candidates": 1000, - "unclip_ratio": 1.5, - "min_size": 3 - }) - - def preprocess(self, feed=[], fetch=[]): - data = base64.b64decode(feed[0]["image"].encode('utf8')) - data = np.fromstring(data, np.uint8) - im = cv2.imdecode(data, cv2.IMREAD_COLOR) - self.ori_h, self.ori_w, _ = im.shape - det_img = self.det_preprocess(im) - _, self.new_h, self.new_w = det_img.shape - print(det_img) - return {"image": det_img}, ["concat_1.tmp_0"] - - def postprocess(self, feed={}, fetch=[], fetch_map=None): - det_out = fetch_map["concat_1.tmp_0"] - ratio_list = [ - float(self.new_h) / self.ori_h, float(self.new_w) / self.ori_w - ] - dt_boxes_list = self.post_func(det_out, [ratio_list]) - dt_boxes = self.filter_func(dt_boxes_list[0], [self.ori_h, self.ori_w]) - return {"dt_boxes": dt_boxes.tolist()} - - -ocr_service = OCRService(name="ocr") -ocr_service.load_model_config("ocr_det_model") -if sys.argv[1] == 'gpu': - ocr_service.set_gpus("0") - ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu", gpuid=0) -elif sys.argv[1] == 'cpu': - ocr_service.prepare_server(workdir="workdir", port=9292, device="cpu") -ocr_service.init_det() -ocr_service.run_rpc_service() -ocr_service.run_web_service() diff --git a/deploy/pdserving/ocr_local_server.py b/deploy/pdserving/ocr_local_server.py deleted file mode 100644 index de5b3d13..00000000 --- a/deploy/pdserving/ocr_local_server.py +++ /dev/null @@ -1,114 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle_serving_client import Client -from paddle_serving_app.reader import OCRReader -import cv2 -import sys -import numpy as np -import os -from paddle_serving_client import Client -from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor -from paddle_serving_app.reader import Div, Normalize, Transpose -from paddle_serving_app.reader import DBPostProcess, FilterBoxes, GetRotateCropImage, SortedBoxes -if sys.argv[1] == 'gpu': - from paddle_serving_server_gpu.web_service import WebService -elif sys.argv[1] == 'cpu': - from paddle_serving_server.web_service import WebService -from paddle_serving_app.local_predict import Debugger -import time -import re -import base64 - - -class OCRService(WebService): - def init_det_debugger(self, det_model_config): - self.det_preprocess = Sequential([ - ResizeByFactor(32, 960), Div(255), - Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose( - (2, 0, 1)) - ]) - self.det_client = Debugger() - if sys.argv[1] == 'gpu': - self.det_client.load_model_config( - det_model_config, gpu=True, profile=False) - elif sys.argv[1] == 'cpu': - self.det_client.load_model_config( - det_model_config, gpu=False, profile=False) - self.ocr_reader = OCRReader() - - def preprocess(self, feed=[], fetch=[]): - data = base64.b64decode(feed[0]["image"].encode('utf8')) - data = np.fromstring(data, np.uint8) - im = cv2.imdecode(data, cv2.IMREAD_COLOR) - ori_h, ori_w, _ = im.shape - det_img = self.det_preprocess(im) - _, new_h, new_w = det_img.shape - det_img = det_img[np.newaxis, :] - det_img = det_img.copy() - det_out = self.det_client.predict( - feed={"image": det_img}, fetch=["concat_1.tmp_0"]) - filter_func = FilterBoxes(10, 10) - post_func = DBPostProcess({ - "thresh": 0.3, - "box_thresh": 0.5, - "max_candidates": 1000, - "unclip_ratio": 1.5, - "min_size": 3 - }) - sorted_boxes = SortedBoxes() - ratio_list = [float(new_h) / ori_h, float(new_w) / ori_w] - dt_boxes_list = post_func(det_out["concat_1.tmp_0"], [ratio_list]) - dt_boxes = filter_func(dt_boxes_list[0], [ori_h, ori_w]) - dt_boxes = sorted_boxes(dt_boxes) - get_rotate_crop_image = GetRotateCropImage() - img_list = [] - max_wh_ratio = 0 - for i, dtbox in enumerate(dt_boxes): - boximg = get_rotate_crop_image(im, dt_boxes[i]) - img_list.append(boximg) - h, w = boximg.shape[0:2] - wh_ratio = w * 1.0 / h - max_wh_ratio = max(max_wh_ratio, wh_ratio) - if len(img_list) == 0: - return [], [] - _, w, h = self.ocr_reader.resize_norm_img(img_list[0], - max_wh_ratio).shape - imgs = np.zeros((len(img_list), 3, w, h)).astype('float32') - for id, img in enumerate(img_list): - norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio) - imgs[id] = norm_img - feed = {"image": imgs.copy()} - fetch = ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"] - return feed, fetch - - def postprocess(self, feed={}, fetch=[], fetch_map=None): - rec_res = self.ocr_reader.postprocess(fetch_map, with_score=True) - res_lst = [] - for res in rec_res: - res_lst.append(res[0]) - res = {"res": res_lst} - return res - - -ocr_service = OCRService(name="ocr") -ocr_service.load_model_config("ocr_rec_model") -ocr_service.init_det_debugger(det_model_config="ocr_det_model") -if sys.argv[1] == 'gpu': - ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu", gpuid=0) - ocr_service.run_debugger_service(gpu=True) -elif sys.argv[1] == 'cpu': - ocr_service.prepare_server(workdir="workdir", port=9292, device="cpu") - ocr_service.run_debugger_service() -ocr_service.run_web_service() diff --git a/deploy/pdserving/ocr_web_client.py b/deploy/pdserving/ocr_web_client.py deleted file mode 100644 index e2a92eb8..00000000 --- a/deploy/pdserving/ocr_web_client.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -*- coding: utf-8 -*- - -import requests -import json -import cv2 -import base64 -import os, sys -import time - -def cv2_to_base64(image): - #data = cv2.imencode('.jpg', image)[1] - return base64.b64encode(image).decode( - 'utf8') #data.tostring()).decode('utf8') - -headers = {"Content-type": "application/json"} -url = "http://127.0.0.1:9292/ocr/prediction" -test_img_dir = "../../doc/imgs/" -for img_file in os.listdir(test_img_dir): - with open(os.path.join(test_img_dir, img_file), 'rb') as file: - image_data1 = file.read() - image = cv2_to_base64(image_data1) - data = {"feed": [{"image": image}], "fetch": ["res"]} - r = requests.post(url=url, headers=headers, data=json.dumps(data)) - print(r.json()) diff --git a/deploy/pdserving/ocr_web_server.py b/deploy/pdserving/ocr_web_server.py deleted file mode 100644 index 6c0de446..00000000 --- a/deploy/pdserving/ocr_web_server.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle_serving_client import Client -from paddle_serving_app.reader import OCRReader -import cv2 -import sys -import numpy as np -import os -from paddle_serving_client import Client -from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor -from paddle_serving_app.reader import Div, Normalize, Transpose -from paddle_serving_app.reader import DBPostProcess, FilterBoxes, GetRotateCropImage, SortedBoxes -if sys.argv[1] == 'gpu': - from paddle_serving_server_gpu.web_service import WebService -elif sys.argv[1] == 'cpu': - from paddle_serving_server.web_service import WebService -import time -import re -import base64 - - -class OCRService(WebService): - def init_det_client(self, det_port, det_client_config): - self.det_preprocess = Sequential([ - ResizeByFactor(32, 960), Div(255), - Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), Transpose( - (2, 0, 1)) - ]) - self.det_client = Client() - self.det_client.load_client_config(det_client_config) - self.det_client.connect(["127.0.0.1:{}".format(det_port)]) - self.ocr_reader = OCRReader() - - def preprocess(self, feed=[], fetch=[]): - data = base64.b64decode(feed[0]["image"].encode('utf8')) - data = np.fromstring(data, np.uint8) - im = cv2.imdecode(data, cv2.IMREAD_COLOR) - ori_h, ori_w, _ = im.shape - det_img = self.det_preprocess(im) - det_out = self.det_client.predict( - feed={"image": det_img}, fetch=["concat_1.tmp_0"]) - _, new_h, new_w = det_img.shape - filter_func = FilterBoxes(10, 10) - post_func = DBPostProcess({ - "thresh": 0.3, - "box_thresh": 0.5, - "max_candidates": 1000, - "unclip_ratio": 1.5, - "min_size": 3 - }) - sorted_boxes = SortedBoxes() - ratio_list = [float(new_h) / ori_h, float(new_w) / ori_w] - dt_boxes_list = post_func(det_out["concat_1.tmp_0"], [ratio_list]) - dt_boxes = filter_func(dt_boxes_list[0], [ori_h, ori_w]) - dt_boxes = sorted_boxes(dt_boxes) - get_rotate_crop_image = GetRotateCropImage() - feed_list = [] - img_list = [] - max_wh_ratio = 0 - for i, dtbox in enumerate(dt_boxes): - boximg = get_rotate_crop_image(im, dt_boxes[i]) - img_list.append(boximg) - h, w = boximg.shape[0:2] - wh_ratio = w * 1.0 / h - max_wh_ratio = max(max_wh_ratio, wh_ratio) - for img in img_list: - norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio) - feed = {"image": norm_img} - feed_list.append(feed) - fetch = ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"] - return feed_list, fetch - - def postprocess(self, feed={}, fetch=[], fetch_map=None): - rec_res = self.ocr_reader.postprocess(fetch_map, with_score=True) - res_lst = [] - for res in rec_res: - res_lst.append(res[0]) - res = {"res": res_lst} - return res - - -ocr_service = OCRService(name="ocr") -ocr_service.load_model_config("ocr_rec_model") -if sys.argv[1] == 'gpu': - ocr_service.set_gpus("0") - ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu", gpuid=0) -elif sys.argv[1] == 'cpu': - ocr_service.prepare_server(workdir="workdir", port=9292) -ocr_service.init_det_client( - det_port=9293, - det_client_config="ocr_det_client/serving_client_conf.prototxt") -ocr_service.run_rpc_service() -ocr_service.run_web_service() diff --git a/deploy/pdserving/readme.md b/deploy/pdserving/readme.md deleted file mode 100644 index f9ad80b8..00000000 --- a/deploy/pdserving/readme.md +++ /dev/null @@ -1,132 +0,0 @@ -# Paddle Serving 服务部署(Beta) - -本教程将介绍基于[Paddle Serving](https://github.com/PaddlePaddle/Serving)部署PaddleOCR在线预测服务的详细步骤。 - -## 快速启动服务 - -### 1. 准备环境 -我们先安装Paddle Serving相关组件 -我们推荐用户使用GPU来做Paddle Serving的OCR服务部署 - -**CUDA版本:9.0** - -**CUDNN版本:7.0** - -**操作系统版本:CentOS 6以上** - -**Python3操作指南:** -``` -#以下提供beta版本的paddle serving whl包,欢迎试用,正式版会在8月中正式上线 -#GPU用户下载server包使用这个链接 -wget --no-check-certificate https://paddle-serving.bj.bcebos.com/others/paddle_serving_server_gpu-0.3.2-py3-none-any.whl -python -m pip install paddle_serving_server_gpu-0.3.2-py3-none-any.whl -#CPU版本使用这个链接 -wget --no-check-certificate https://paddle-serving.bj.bcebos.com/others/paddle_serving_server-0.3.2-py3-none-any.whl -python -m pip install paddle_serving_server-0.3.2-py3-none-any.whl -#客户端和App包使用以下链接(CPU,GPU通用) -wget --no-check-certificate https://paddle-serving.bj.bcebos.com/others/paddle_serving_client-0.3.2-cp36-none-any.whl -wget --no-check-certificate https://paddle-serving.bj.bcebos.com/others/paddle_serving_app-0.1.2-py3-none-any.whl -python -m pip install paddle_serving_app-0.1.2-py3-none-any.whl paddle_serving_client-0.3.2-cp36-none-any.whl -``` - -**Python2操作指南:** -``` -#以下提供beta版本的paddle serving whl包,欢迎试用,正式版会在8月中正式上线 -#GPU用户下载server包使用这个链接 -wget --no-check-certificate https://paddle-serving.bj.bcebos.com/others/paddle_serving_server_gpu-0.3.2-py2-none-any.whl -python -m pip install paddle_serving_server_gpu-0.3.2-py2-none-any.whl -#CPU版本使用这个链接 -wget --no-check-certificate https://paddle-serving.bj.bcebos.com/others/paddle_serving_server-0.3.2-py2-none-any.whl -python -m pip install paddle_serving_server-0.3.2-py2-none-any.whl - -#客户端和App包使用以下链接(CPU,GPU通用) -wget --no-check-certificate https://paddle-serving.bj.bcebos.com/others/paddle_serving_app-0.1.2-py2-none-any.whl -wget --no-check-certificate https://paddle-serving.bj.bcebos.com/others/paddle_serving_client-0.3.2-cp27-none-any.whl -python -m pip install paddle_serving_app-0.1.2-py2-none-any.whl paddle_serving_client-0.3.2-cp27-none-any.whl -``` - -### 2. 模型转换 -可以使用`paddle_serving_app`提供的模型,执行下列命令 -``` -python -m paddle_serving_app.package --get_model ocr_rec -tar -xzvf ocr_rec.tar.gz -python -m paddle_serving_app.package --get_model ocr_det -tar -xzvf ocr_det.tar.gz -``` -执行上述命令会下载`db_crnn_mobile`的模型,如果想要下载规模更大的`db_crnn_server`模型,可以在下载预测模型并解压之后。参考[如何从Paddle保存的预测模型转为Paddle Serving格式可部署的模型](https://github.com/PaddlePaddle/Serving/blob/develop/doc/INFERENCE_TO_SERVING_CN.md)。 - -我们以`ch_rec_r34_vd_crnn`模型作为例子,下载链接在: - -``` -wget --no-check-certificate https://paddleocr.bj.bcebos.com/ch_models/ch_rec_r34_vd_crnn_infer.tar -tar xf ch_rec_r34_vd_crnn_infer.tar -``` -因此我们按照Serving模型转换教程,运行下列python文件。 -``` -from paddle_serving_client.io import inference_model_to_serving -inference_model_dir = "ch_rec_r34_vd_crnn" -serving_client_dir = "serving_client_dir" -serving_server_dir = "serving_server_dir" -feed_var_names, fetch_var_names = inference_model_to_serving( - inference_model_dir, serving_client_dir, serving_server_dir, model_filename="model", params_filename="params") -``` -最终会在`serving_client_dir`和`serving_server_dir`生成客户端和服务端的模型配置。 - -### 3. 启动服务 -启动服务可以根据实际需求选择启动`标准版`或者`快速版`,两种方式的对比如下表: - -|版本|特点|适用场景| -|-|-|-| -|标准版|稳定性高,分布式部署|适用于吞吐量大,需要跨机房部署的情况| -|快速版|部署方便,预测速度快|适用于对预测速度要求高,迭代速度快的场景| - -#### 方式1. 启动标准版服务 - -``` -# cpu,gpu启动二选一,以下是cpu启动 -python -m paddle_serving_server.serve --model ocr_det_model --port 9293 -python ocr_web_server.py cpu -# gpu启动 -python -m paddle_serving_server_gpu.serve --model ocr_det_model --port 9293 --gpu_id 0 -python ocr_web_server.py gpu -``` - -#### 方式2. 启动快速版服务 - -``` -# cpu,gpu启动二选一,以下是cpu启动 -python ocr_local_server.py cpu -# gpu启动 -python ocr_local_server.py gpu -``` - -## 发送预测请求 - -``` -python ocr_web_client.py -``` - -## 返回结果格式说明 - -返回结果是json格式 -``` -{u'result': {u'res': [u'\u571f\u5730\u6574\u6cbb\u4e0e\u571f\u58e4\u4fee\u590d\u7814\u7a76\u4e2d\u5fc3', u'\u534e\u5357\u519c\u4e1a\u5927\u5b661\u7d20\u56fe']}} -``` -我们也可以打印结果json串中`res`字段的每一句话 -``` -土地整治与土壤修复研究中心 -华南农业大学1素图 -``` - -## 自定义修改服务逻辑 - -在`ocr_web_server.py`或是`ocr_local_server.py`当中的`preprocess`函数里面做了检测服务和识别服务的前处理,`postprocess`函数里面做了识别的后处理服务,可以在相应的函数中做修改。调用了`paddle_serving_app`库提供的常见CV模型的前处理/后处理库。 - -如果想要单独启动Paddle Serving的检测服务和识别服务,参见下列表格, 执行对应的脚本即可,并且在命令行参数注明用的CPU或是GPU来提供服务。 - -| 模型 | 标准版 | 快速版 | -| ---- | ----------------- | ------------------- | -| 检测 | det_web_server.py | det_local_server.py | -| 识别 | rec_web_server.py | rec_local_server.py | - -更多信息参见[Paddle Serving](https://github.com/PaddlePaddle/Serving) diff --git a/deploy/pdserving/rec_local_server.py b/deploy/pdserving/rec_local_server.py deleted file mode 100644 index ba021c1c..00000000 --- a/deploy/pdserving/rec_local_server.py +++ /dev/null @@ -1,79 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle_serving_client import Client -from paddle_serving_app.reader import OCRReader -import cv2 -import sys -import numpy as np -import os -from paddle_serving_client import Client -from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor -from paddle_serving_app.reader import Div, Normalize, Transpose -from paddle_serving_app.reader import DBPostProcess, FilterBoxes, GetRotateCropImage, SortedBoxes -if sys.argv[1] == 'gpu': - from paddle_serving_server_gpu.web_service import WebService -elif sys.argv[1] == 'cpu': - from paddle_serving_server.web_service import WebService -import time -import re -import base64 - - -class OCRService(WebService): - def init_rec(self): - self.ocr_reader = OCRReader() - - def preprocess(self, feed=[], fetch=[]): - img_list = [] - for feed_data in feed: - data = base64.b64decode(feed_data["image"].encode('utf8')) - data = np.fromstring(data, np.uint8) - im = cv2.imdecode(data, cv2.IMREAD_COLOR) - img_list.append(im) - max_wh_ratio = 0 - for i, boximg in enumerate(img_list): - h, w = boximg.shape[0:2] - wh_ratio = w * 1.0 / h - max_wh_ratio = max(max_wh_ratio, wh_ratio) - _, w, h = self.ocr_reader.resize_norm_img(img_list[0], - max_wh_ratio).shape - imgs = np.zeros((len(img_list), 3, w, h)).astype('float32') - for i, img in enumerate(img_list): - norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio) - imgs[i] = norm_img - feed = {"image": imgs.copy()} - fetch = ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"] - return feed, fetch - - def postprocess(self, feed={}, fetch=[], fetch_map=None): - rec_res = self.ocr_reader.postprocess(fetch_map, with_score=True) - res_lst = [] - for res in rec_res: - res_lst.append(res[0]) - res = {"res": res_lst} - return res - - -ocr_service = OCRService(name="ocr") -ocr_service.load_model_config("ocr_rec_model") -ocr_service.init_rec() -if sys.argv[1] == 'gpu': - ocr_service.set_gpus("0") - ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu", gpuid=0) - ocr_service.run_debugger_service(gpu=True) -elif sys.argv[1] == 'cpu': - ocr_service.prepare_server(workdir="workdir", port=9292, device="cpu") - ocr_service.run_debugger_service() -ocr_service.run_web_service() diff --git a/deploy/pdserving/rec_web_server.py b/deploy/pdserving/rec_web_server.py deleted file mode 100644 index 0f4e9f6d..00000000 --- a/deploy/pdserving/rec_web_server.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from paddle_serving_client import Client -from paddle_serving_app.reader import OCRReader -import cv2 -import sys -import numpy as np -import os -from paddle_serving_client import Client -from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor -from paddle_serving_app.reader import Div, Normalize, Transpose -from paddle_serving_app.reader import DBPostProcess, FilterBoxes, GetRotateCropImage, SortedBoxes -if sys.argv[1] == 'gpu': - from paddle_serving_server_gpu.web_service import WebService -elif sys.argv[1] == 'cpu': - from paddle_serving_server.web_service import WebService -import time -import re -import base64 - - -class OCRService(WebService): - def init_rec(self): - self.ocr_reader = OCRReader() - - def preprocess(self, feed=[], fetch=[]): - # TODO: to handle batch rec images - img_list = [] - for feed_data in feed: - data = base64.b64decode(feed_data["image"].encode('utf8')) - data = np.fromstring(data, np.uint8) - im = cv2.imdecode(data, cv2.IMREAD_COLOR) - img_list.append(im) - feed_list = [] - max_wh_ratio = 0 - for i, boximg in enumerate(img_list): - h, w = boximg.shape[0:2] - wh_ratio = w * 1.0 / h - max_wh_ratio = max(max_wh_ratio, wh_ratio) - for img in img_list: - norm_img = self.ocr_reader.resize_norm_img(img, max_wh_ratio) - feed = {"image": norm_img} - feed_list.append(feed) - fetch = ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"] - return feed_list, fetch - - def postprocess(self, feed={}, fetch=[], fetch_map=None): - rec_res = self.ocr_reader.postprocess(fetch_map, with_score=True) - res_lst = [] - for res in rec_res: - res_lst.append(res[0]) - res = {"res": res_lst} - return res - - -ocr_service = OCRService(name="ocr") -ocr_service.load_model_config("ocr_rec_model") -ocr_service.init_rec() -if sys.argv[1] == 'gpu': - ocr_service.set_gpus("0") - ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu", gpuid=0) -elif sys.argv[1] == 'cpu': - ocr_service.prepare_server(workdir="workdir", port=9292, device="cpu") -ocr_service.run_rpc_service() -ocr_service.run_web_service() -- GitLab