diff --git a/deploy/pdserving/det_local_server.py b/deploy/pdserving/det_local_server.py index 78970af4d1a8a89f976f48f2c29ec97732afa0ce..eb7948daadd018810997bba78367e86aa3398e31 100644 --- a/deploy/pdserving/det_local_server.py +++ b/deploy/pdserving/det_local_server.py @@ -23,7 +23,7 @@ from paddle_serving_app.reader import Div, Normalize, Transpose from paddle_serving_app.reader import DBPostProcess, FilterBoxes if sys.argv[1] == 'gpu': from paddle_serving_server_gpu.web_service import WebService -elif sys.argv[1] == 'cpu' +elif sys.argv[1] == 'cpu': from paddle_serving_server.web_service import WebService import time import re @@ -67,11 +67,13 @@ class OCRService(WebService): ocr_service = OCRService(name="ocr") ocr_service.load_model_config("ocr_det_model") +ocr_service.init_det() if sys.argv[1] == 'gpu': ocr_service.set_gpus("0") ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu", gpuid=0) + ocr_service.run_debugger_service(gpu=True) elif sys.argv[1] == 'cpu': ocr_service.prepare_server(workdir="workdir", port=9292) + ocr_service.run_debugger_service() ocr_service.init_det() -ocr_service.run_debugger_service() ocr_service.run_web_service() diff --git a/deploy/pdserving/ocr_local_server.py b/deploy/pdserving/ocr_local_server.py index f7458c3036734e4bb6e554097029270e11912a3a..de5b3d13f12afd4a84c5d46625682c42f418d6bb 100644 --- a/deploy/pdserving/ocr_local_server.py +++ b/deploy/pdserving/ocr_local_server.py @@ -104,10 +104,11 @@ class OCRService(WebService): ocr_service = OCRService(name="ocr") ocr_service.load_model_config("ocr_rec_model") -ocr_service.prepare_server(workdir="workdir", port=9292) ocr_service.init_det_debugger(det_model_config="ocr_det_model") if sys.argv[1] == 'gpu': + ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu", gpuid=0) ocr_service.run_debugger_service(gpu=True) elif sys.argv[1] == 'cpu': + ocr_service.prepare_server(workdir="workdir", port=9292, device="cpu") ocr_service.run_debugger_service() ocr_service.run_web_service() diff --git a/deploy/pdserving/readme.md b/deploy/pdserving/readme.md index 9472e94cffcd483a85850f6e7ea9c8bc172aaf3b..f9ad80b896be0be29e3a7bb17e4aa119af81d5c4 100644 --- a/deploy/pdserving/readme.md +++ b/deploy/pdserving/readme.md @@ -55,6 +55,23 @@ tar -xzvf ocr_det.tar.gz ``` 执行上述命令会下载`db_crnn_mobile`的模型,如果想要下载规模更大的`db_crnn_server`模型,可以在下载预测模型并解压之后。参考[如何从Paddle保存的预测模型转为Paddle Serving格式可部署的模型](https://github.com/PaddlePaddle/Serving/blob/develop/doc/INFERENCE_TO_SERVING_CN.md)。 +我们以`ch_rec_r34_vd_crnn`模型作为例子,下载链接在: + +``` +wget --no-check-certificate https://paddleocr.bj.bcebos.com/ch_models/ch_rec_r34_vd_crnn_infer.tar +tar xf ch_rec_r34_vd_crnn_infer.tar +``` +因此我们按照Serving模型转换教程,运行下列python文件。 +``` +from paddle_serving_client.io import inference_model_to_serving +inference_model_dir = "ch_rec_r34_vd_crnn" +serving_client_dir = "serving_client_dir" +serving_server_dir = "serving_server_dir" +feed_var_names, fetch_var_names = inference_model_to_serving( + inference_model_dir, serving_client_dir, serving_server_dir, model_filename="model", params_filename="params") +``` +最终会在`serving_client_dir`和`serving_server_dir`生成客户端和服务端的模型配置。 + ### 3. 启动服务 启动服务可以根据实际需求选择启动`标准版`或者`快速版`,两种方式的对比如下表: diff --git a/deploy/pdserving/rec_local_server.py b/deploy/pdserving/rec_local_server.py index fbe67aafee5c8dcae269cd4ad6f6100ed514f0b7..ba021c1cd5054071eb115b3e6e9c64cb572ff871 100644 --- a/deploy/pdserving/rec_local_server.py +++ b/deploy/pdserving/rec_local_server.py @@ -22,7 +22,10 @@ from paddle_serving_client import Client from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor from paddle_serving_app.reader import Div, Normalize, Transpose from paddle_serving_app.reader import DBPostProcess, FilterBoxes, GetRotateCropImage, SortedBoxes -from paddle_serving_server_gpu.web_service import WebService +if sys.argv[1] == 'gpu': + from paddle_serving_server_gpu.web_service import WebService +elif sys.argv[1] == 'cpu': + from paddle_serving_server.web_service import WebService import time import re import base64 @@ -65,8 +68,12 @@ class OCRService(WebService): ocr_service = OCRService(name="ocr") ocr_service.load_model_config("ocr_rec_model") -ocr_service.set_gpus("0") ocr_service.init_rec() -ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu", gpuid=0) -ocr_service.run_debugger_service() +if sys.argv[1] == 'gpu': + ocr_service.set_gpus("0") + ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu", gpuid=0) + ocr_service.run_debugger_service(gpu=True) +elif sys.argv[1] == 'cpu': + ocr_service.prepare_server(workdir="workdir", port=9292, device="cpu") + ocr_service.run_debugger_service() ocr_service.run_web_service()