diff --git a/deploy/pdserving/clas_local_server.py b/deploy/pdserving/clas_local_server.py index 1d48adcfdb8c3543205f31b7d1345338c589ddcf..10de66ce16562af6c5b3474b3269a268d615b07a 100644 --- a/deploy/pdserving/clas_local_server.py +++ b/deploy/pdserving/clas_local_server.py @@ -74,6 +74,7 @@ class TextClassifierHelper(TextClassifier): prob_out = outputs[0] label_out = outputs[1] indices = args["indices"] + img_list = args["img_list"] cls_res = [['', 0.0]] * len(label_out) if len(label_out.shape) != 1: prob_out, label_out = label_out, prob_out @@ -84,7 +85,7 @@ class TextClassifierHelper(TextClassifier): cls_res[indices[rno]] = [label, score] if '180' in label and score > self.cls_thresh: img_list[indices[rno]] = cv2.rotate(img_list[indices[rno]], 1) - return args["img_list"], cls_res + return img_list, cls_res class OCRService(WebService): diff --git a/deploy/pdserving/clas_rpc_server.py b/deploy/pdserving/clas_rpc_server.py index 7fad61ee81361957568fbf8dd61e446106062084..9939ba45a8561e7148b72888f83abefb5d64e847 100644 --- a/deploy/pdserving/clas_rpc_server.py +++ b/deploy/pdserving/clas_rpc_server.py @@ -79,6 +79,7 @@ class TextClassifierHelper(TextClassifier): prob_out = outputs[0] label_out = outputs[1] indices = args["indices"] + img_list = args["img_list"] cls_res = [['', 0.0]] * len(label_out) if len(label_out.shape) != 1: prob_out, label_out = label_out, prob_out @@ -89,7 +90,7 @@ class TextClassifierHelper(TextClassifier): cls_res[indices[rno]] = [label, score] if '180' in label and score > self.cls_thresh: img_list[indices[rno]] = cv2.rotate(img_list[indices[rno]], 1) - return args["img_list"], cls_res + return img_list, cls_res class OCRService(WebService): diff --git a/deploy/pdserving/ocr_local_server.py b/deploy/pdserving/ocr_local_server.py index af9e72224d841d7451ef723aa207ebe0cb041ed8..27631a18720f17baad8af70b776d05459708645d 100644 --- a/deploy/pdserving/ocr_local_server.py +++ b/deploy/pdserving/ocr_local_server.py @@ -50,7 +50,7 @@ class TextSystemHelper(TextSystem): self.det_client = Debugger() self.det_client.load_model_config( global_args.det_model_dir, gpu=True, profile=False) - self.fetch = ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"] + self.fetch = ["save_infer_model/scale_0.tmp_0", "save_infer_model/scale_1.tmp_0"] def preprocess(self, img): feed, fetch, self.tmp_args = self.text_detector.preprocess(img) diff --git a/deploy/pdserving/ocr_rpc_server.py b/deploy/pdserving/ocr_rpc_server.py index 8229c141560423c8bff0dbec43877d6b1007d827..5567b225fc49d915c8150c118f9e5136c32d773d 100644 --- a/deploy/pdserving/ocr_rpc_server.py +++ b/deploy/pdserving/ocr_rpc_server.py @@ -43,14 +43,14 @@ class TextSystemHelper(TextSystem): if self.use_angle_cls: self.clas_client = Client() self.clas_client.load_client_config( - "ocr_clas_client/serving_client_conf.prototxt") + "cls_infer_client/serving_client_conf.prototxt") self.clas_client.connect(["127.0.0.1:9294"]) self.text_classifier = TextClassifierHelper(args) self.det_client = Client() self.det_client.load_client_config( - "det_db_client/serving_client_conf.prototxt") + "det_infer_client/serving_client_conf.prototxt") self.det_client.connect(["127.0.0.1:9293"]) - self.fetch = ["ctc_greedy_decoder_0.tmp_0", "softmax_0.tmp_0"] + self.fetch = ["save_infer_model/scale_0.tmp_0", "save_infer_model/scale_1.tmp_0"] def preprocess(self, img): feed, fetch, self.tmp_args = self.text_detector.preprocess(img) diff --git a/doc/doc_ch/serving_inference.md b/doc/doc_ch/serving_inference.md index f2215bcffb62205726e864170e177f0784d16094..5e27d1561fb5ca5d5295bd2a02b4f07f624f1c63 100644 --- a/doc/doc_ch/serving_inference.md +++ b/doc/doc_ch/serving_inference.md @@ -16,17 +16,23 @@ **操作系统版本:Linux/Windows** -**Python版本: 2.7/3.6/3.7** +**Python版本: 2.7/3.5/3.6/3.7** **Python操作指南:** + +目前Serving用于OCR的部分功能还在测试当中,因此在这里我们给出[Servnig latest package](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md) +大家根据自己的环境选择需要安装的whl包即可,例如以Python 3.5为例,执行下列命令 ``` #CPU/GPU版本选择一个 #GPU版本服务端 -python -m pip install paddle_serving_server_gpu +#CUDA 9 +python -m pip install -U https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.0.0.post9-py3-none-any.whl +#CUDA 10 +python -m pip install -U https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server_gpu-0.0.0.post10-py3-none-any.whl #CPU版本服务端 -python -m pip install paddle_serving_server +python -m pip install -U https://paddle-serving.bj.bcebos.com/whl/paddle_serving_server-0.0.0-py3-none-any.whl #客户端和App包使用以下链接(CPU,GPU通用) -python -m pip install paddle_serving_app paddle_serving_client +python -m pip install -U https://paddle-serving.bj.bcebos.com/whl/paddle_serving_client-0.0.0-cp36-none-any.whl https://paddle-serving.bj.bcebos.com/whl/paddle_serving_app-0.0.0-py3-none-any.whl ``` ## 二、训练模型转Serving模型 @@ -214,12 +220,12 @@ python rec_web_client.py ``` #标准版,Linux用户 #GPU用户 -python -m paddle_serving_server_gpu.serve --model det_mv_server --port 9293 --gpu_id 0 -python -m paddle_serving_server_gpu.serve --model ocr_cls_server --port 9294 --gpu_id 0 +python -m paddle_serving_server_gpu.serve --model det_infer_server --port 9293 --gpu_id 0 +python -m paddle_serving_server_gpu.serve --model cls_infer_server --port 9294 --gpu_id 0 python ocr_rpc_server.py #CPU用户 -python -m paddle_serving_server.serve --model det_mv_server --port 9293 -python -m paddle_serving_server.serve --model ocr_cls_server --port 9294 +python -m paddle_serving_server.serve --model det_infer_server --port 9293 +python -m paddle_serving_server.serve --model cls_infer_server --port 9294 python ocr_rpc_server.py #快速版,Windows/Linux用户