diff --git a/deploy/pdserving/README.md b/deploy/pdserving/README.md index cb2845c581d244e80ca597e0eb485a16ad369f20..c461fd5e54d3a51ad3427f83a1fca35cbe3ab2d8 100644 --- a/deploy/pdserving/README.md +++ b/deploy/pdserving/README.md @@ -45,63 +45,67 @@ PaddleOCR operating environment and Paddle Serving operating environment are nee ``` 3. Install the client to send requests to the service - In [download link](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md) find the client installation package corresponding to the python version. - The python3.7 version is recommended here: - ``` - wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_client-0.0.0-cp37-none-any.whl - pip3 install paddle_serving_client-0.0.0-cp37-none-any.whl - ``` - -4. Install serving-app - ``` - pip3 install paddle-serving-app==0.6.1 - ``` +```bash +# 安装serving,用于启动服务 +wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_server_gpu-0.7.0.post102-py3-none-any.whl +pip3 install paddle_serving_server_gpu-0.7.0.post102-py3-none-any.whl +# 如果是cuda10.1环境,可以使用下面的命令安装paddle-serving-server +# wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_server_gpu-0.7.0.post101-py3-none-any.whl +# pip3 install paddle_serving_server_gpu-0.7.0.post101-py3-none-any.whl + +# 安装client,用于向服务发送请求 +wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_client-0.7.0-cp37-none-any.whl +pip3 install paddle_serving_client-0.7.0-cp37-none-any.whl + +# 安装serving-app +wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_app-0.7.0-py3-none-any.whl +pip3 install paddle_serving_app-0.7.0-py3-none-any.whl +``` - **note:** If you want to install the latest version of PaddleServing, refer to [link](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md). + **note:** If you want to install the latest version of PaddleServing, refer to [link](https://github.com/PaddlePaddle/Serving/blob/v0.7.0/doc/Latest_Packages_CN.md). ## Model conversion When using PaddleServing for service deployment, you need to convert the saved inference model into a serving model that is easy to deploy. -Firstly, download the [inference model](https://github.com/PaddlePaddle/PaddleOCR#pp-ocr-20-series-model-listupdate-on-dec-15) of PPOCR +Firstly, download the [inference model](https://github.com/PaddlePaddle/PaddleOCR/blob/release/2.3/README_ch.md#pp-ocr%E7%B3%BB%E5%88%97%E6%A8%A1%E5%9E%8B%E5%88%97%E8%A1%A8%E6%9B%B4%E6%96%B0%E4%B8%AD) of PPOCR ``` # Download and unzip the OCR text detection model -wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_det_infer.tar +wget https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar -O ch_PP-OCRv2_det_infer.tar && tar -xf ch_PP-OCRv2_det_infer.tar # Download and unzip the OCR text recognition model -wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar - +wget https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar -O ch_PP-OCRv2_rec_infer.tar && tar -xf ch_PP-OCRv2_rec_infer.tar ``` Then, you can use installed paddle_serving_client tool to convert inference model to mobile model. ``` # Detection model conversion -python3 -m paddle_serving_client.convert --dirname ./ch_ppocr_mobile_v2.0_det_infer/ \ +python3 -m paddle_serving_client.convert --dirname ./ch_PP-OCRv2_det_infer/ \ --model_filename inference.pdmodel \ --params_filename inference.pdiparams \ - --serving_server ./ppocr_det_mobile_2.0_serving/ \ - --serving_client ./ppocr_det_mobile_2.0_client/ + --serving_server ./ppocrv2_det_serving/ \ + --serving_client ./ppocrv2_det_client/ # Recognition model conversion -python3 -m paddle_serving_client.convert --dirname ./ch_ppocr_mobile_v2.0_rec_infer/ \ +python3 -m paddle_serving_client.convert --dirname ./ch_PP-OCRv2_rec_infer/ \ --model_filename inference.pdmodel \ --params_filename inference.pdiparams \ - --serving_server ./ppocr_rec_mobile_2.0_serving/ \ - --serving_client ./ppocr_rec_mobile_2.0_client/ + --serving_server ./ppocrv2_rec_serving/ \ + --serving_client ./ppocrv2_rec_client/ ``` After the detection model is converted, there will be additional folders of `ppocr_det_mobile_2.0_serving` and `ppocr_det_mobile_2.0_client` in the current folder, with the following format: ``` -|- ppocr_det_mobile_2.0_serving/ - |- __model__ - |- __params__ - |- serving_server_conf.prototxt - |- serving_server_conf.stream.prototxt - -|- ppocr_det_mobile_2.0_client - |- serving_client_conf.prototxt - |- serving_client_conf.stream.prototxt +|- ppocrv2_det_serving/ + |- __model__ + |- __params__ + |- serving_server_conf.prototxt + |- serving_server_conf.stream.prototxt + +|- ppocrv2_det_client + |- serving_client_conf.prototxt + |- serving_client_conf.stream.prototxt ``` The recognition model is the same. diff --git a/deploy/pdserving/README_CN.md b/deploy/pdserving/README_CN.md index 067be8bbda10d971b709afdf822aea96a979d000..00024639b0b108225a0835499f62174b6618ae47 100644 --- a/deploy/pdserving/README_CN.md +++ b/deploy/pdserving/README_CN.md @@ -34,70 +34,66 @@ PaddleOCR提供2种服务部署方式: - 准备PaddleServing的运行环境,步骤如下 -1. 安装serving,用于启动服务 - ``` - pip3 install paddle-serving-server==0.6.1 # for CPU - pip3 install paddle-serving-server-gpu==0.6.1 # for GPU - # 其他GPU环境需要确认环境再选择执行如下命令 - pip3 install paddle-serving-server-gpu==0.6.1.post101 # GPU with CUDA10.1 + TensorRT6 - pip3 install paddle-serving-server-gpu==0.6.1.post11 # GPU with CUDA11 + TensorRT7 - ``` - -2. 安装client,用于向服务发送请求 - 在[下载链接](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md)中找到对应python版本的client安装包,这里推荐python3.7版本: - - ``` - wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_client-0.0.0-cp37-none-any.whl - pip3 install paddle_serving_client-0.0.0-cp37-none-any.whl - ``` - -3. 安装serving-app - ``` - pip3 install paddle-serving-app==0.6.1 - ``` +```bash +# 安装serving,用于启动服务 +wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_server_gpu-0.7.0.post102-py3-none-any.whl +pip3 install paddle_serving_server_gpu-0.7.0.post102-py3-none-any.whl +# 如果是cuda10.1环境,可以使用下面的命令安装paddle-serving-server +# wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_server_gpu-0.7.0.post101-py3-none-any.whl +# pip3 install paddle_serving_server_gpu-0.7.0.post101-py3-none-any.whl + +# 安装client,用于向服务发送请求 +wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_client-0.7.0-cp37-none-any.whl +pip3 install paddle_serving_client-0.7.0-cp37-none-any.whl + +# 安装serving-app +wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_app-0.7.0-py3-none-any.whl +pip3 install paddle_serving_app-0.7.0-py3-none-any.whl +``` - **Note:** 如果要安装最新版本的PaddleServing参考[链接](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md)。 +**Note:** 如果要安装最新版本的PaddleServing参考[链接](https://github.com/PaddlePaddle/Serving/blob/v0.7.0/doc/Latest_Packages_CN.md)。 ## 模型转换 使用PaddleServing做服务化部署时,需要将保存的inference模型转换为serving易于部署的模型。 -首先,下载PPOCR的[inference模型](https://github.com/PaddlePaddle/PaddleOCR#pp-ocr-20-series-model-listupdate-on-dec-15) -``` +首先,下载PPOCR的[inference模型](https://github.com/PaddlePaddle/PaddleOCR#pp-ocr-series-model-listupdate-on-september-8th) + +```bash # 下载并解压 OCR 文本检测模型 -wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_infer.tar && tar xf ch_ppocr_mobile_v2.0_det_infer.tar +wget https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar -O ch_PP-OCRv2_det_infer.tar && tar -xf ch_PP-OCRv2_det_infer.tar # 下载并解压 OCR 文本识别模型 -wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_infer.tar && tar xf ch_ppocr_mobile_v2.0_rec_infer.tar +wget https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar -O ch_PP-OCRv2_rec_infer.tar && tar -xf ch_PP-OCRv2_rec_infer.tar ``` 接下来,用安装的paddle_serving_client把下载的inference模型转换成易于server部署的模型格式。 -``` +```bash # 转换检测模型 -python3 -m paddle_serving_client.convert --dirname ./ch_ppocr_mobile_v2.0_det_infer/ \ +python3 -m paddle_serving_client.convert --dirname ./ch_PP-OCRv2_det_infer/ \ --model_filename inference.pdmodel \ --params_filename inference.pdiparams \ - --serving_server ./ppocr_det_mobile_2.0_serving/ \ - --serving_client ./ppocr_det_mobile_2.0_client/ + --serving_server ./ppocrv2_det_serving/ \ + --serving_client ./ppocrv2_det_client/ # 转换识别模型 -python3 -m paddle_serving_client.convert --dirname ./ch_ppocr_mobile_v2.0_rec_infer/ \ +python3 -m paddle_serving_client.convert --dirname ./ch_PP-OCRv2_rec_infer/ \ --model_filename inference.pdmodel \ --params_filename inference.pdiparams \ - --serving_server ./ppocr_rec_mobile_2.0_serving/ \ - --serving_client ./ppocr_rec_mobile_2.0_client/ + --serving_server ./ppocrv2_rec_serving/ \ + --serving_client ./ppocrv2_rec_client/ ``` -检测模型转换完成后,会在当前文件夹多出`ppocr_det_mobile_2.0_serving` 和`ppocr_det_mobile_2.0_client`的文件夹,具备如下格式: +检测模型转换完成后,会在当前文件夹多出`ppocrv2_det_serving` 和`ppocrv2_det_client`的文件夹,具备如下格式: ``` -|- ppocr_det_mobile_2.0_serving/ +|- ppocrv2_det_serving/ |- __model__ |- __params__ |- serving_server_conf.prototxt |- serving_server_conf.stream.prototxt -|- ppocr_det_mobile_2.0_client +|- ppocrv2_det_client |- serving_client_conf.prototxt |- serving_client_conf.stream.prototxt diff --git a/deploy/pdserving/config.yml b/deploy/pdserving/config.yml index 2aae922dfa12f46d1c0ebd352e8d3a7077065cf8..f3b0f7ec5a47bb9c513ab3d75f7d2d4138f88c4a 100644 --- a/deploy/pdserving/config.yml +++ b/deploy/pdserving/config.yml @@ -34,7 +34,7 @@ op: client_type: local_predictor #det模型路径 - model_config: ./ppocr_det_mobile_2.0_serving + model_config: ./ppocrv2_det_serving #Fetch结果列表,以client_config中fetch_var的alias_name为准 fetch_list: ["save_infer_model/scale_0.tmp_1"] @@ -60,7 +60,7 @@ op: client_type: local_predictor #rec模型路径 - model_config: ./ppocr_rec_mobile_2.0_serving + model_config: ./ppocrv2_rec_serving #Fetch结果列表,以client_config中fetch_var的alias_name为准 fetch_list: ["save_infer_model/scale_0.tmp_1"] diff --git a/deploy/pdserving/web_service.py b/deploy/pdserving/web_service.py index 21db1e1411a8706dbbd9a22ce2ce7db8e16da5ec..b97c6e1f564a61bb9792542b9e9f1e88d782e80d 100644 --- a/deploy/pdserving/web_service.py +++ b/deploy/pdserving/web_service.py @@ -54,7 +54,7 @@ class DetOp(Op): _, self.new_h, self.new_w = det_img.shape return {"x": det_img[np.newaxis, :].copy()}, False, None, "" - def postprocess(self, input_dicts, fetch_dict, log_id): + def postprocess(self, input_dicts, fetch_dict, data_id, log_id): det_out = fetch_dict["save_infer_model/scale_0.tmp_1"] ratio_list = [ float(self.new_h) / self.ori_h, float(self.new_w) / self.ori_w @@ -129,7 +129,7 @@ class RecOp(Op): return feed_list, False, None, "" - def postprocess(self, input_dicts, fetch_data, log_id): + def postprocess(self, input_dicts, fetch_data, data_id, log_id): res_list = [] if isinstance(fetch_data, dict): if len(fetch_data) > 0: diff --git a/deploy/pdserving/web_service_det.py b/deploy/pdserving/web_service_det.py index 25ac2f37dbd3cdf05b3503abaab0c5651867fae9..ee39388425763d789ada76cf0a9db9f812fe8d2a 100644 --- a/deploy/pdserving/web_service_det.py +++ b/deploy/pdserving/web_service_det.py @@ -54,7 +54,7 @@ class DetOp(Op): _, self.new_h, self.new_w = det_img.shape return {"x": det_img[np.newaxis, :].copy()}, False, None, "" - def postprocess(self, input_dicts, fetch_dict, log_id): + def postprocess(self, input_dicts, fetch_dict, data_id, log_id): det_out = fetch_dict["save_infer_model/scale_0.tmp_1"] ratio_list = [ float(self.new_h) / self.ori_h, float(self.new_w) / self.ori_w diff --git a/deploy/pdserving/web_service_rec.py b/deploy/pdserving/web_service_rec.py index 6b3cf707f0f19034a0734fd27824feb4fb6cce20..f5cd8bf053c604786fecb9b71749b3c98f2552a2 100644 --- a/deploy/pdserving/web_service_rec.py +++ b/deploy/pdserving/web_service_rec.py @@ -56,7 +56,7 @@ class RecOp(Op): feed_list.append(feed) return feed_list, False, None, "" - def postprocess(self, input_dicts, fetch_data, log_id): + def postprocess(self, input_dicts, fetch_data, data_id, log_id): res_list = [] if isinstance(fetch_data, dict): if len(fetch_data) > 0: