提交 0f908e75 编写于 作者: S stephon

update paddleserving

上级 c8e387f5
...@@ -18,7 +18,7 @@ op: ...@@ -18,7 +18,7 @@ op:
local_service_conf: local_service_conf:
#uci模型路径 #uci模型路径
model_config: ../../models/product_ResNet50_vd_aliproduct_v1.0_serving model_config: ../../models/general_PPLCNet_x2_5_lite_v1.0_serving
#计算硬件类型: 空缺时由devices决定(CPU/GPU),0=cpu, 1=gpu, 2=tensorRT, 3=arm cpu, 4=kunlun xpu #计算硬件类型: 空缺时由devices决定(CPU/GPU),0=cpu, 1=gpu, 2=tensorRT, 3=arm cpu, 4=kunlun xpu
device_type: 1 device_type: 1
...@@ -40,4 +40,4 @@ op: ...@@ -40,4 +40,4 @@ op:
devices: '0' devices: '0'
fetch_list: fetch_list:
- save_infer_model/scale_0.tmp_1 - save_infer_model/scale_0.tmp_1
model_config: ../../models/ppyolov2_r50vd_dcn_mainbody_v1.0_serving/ model_config: ../../models/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/
\ No newline at end of file \ No newline at end of file
...@@ -3,11 +3,13 @@ import json ...@@ -3,11 +3,13 @@ import json
import base64 import base64
import os import os
imgpath = "daoxiangcunjinzhubing_6.jpg" imgpath = "../../drink_dataset_v1.0/test_images/001.jpeg"
def cv2_to_base64(image): def cv2_to_base64(image):
return base64.b64encode(image).decode('utf8') return base64.b64encode(image).decode('utf8')
if __name__ == "__main__": if __name__ == "__main__":
url = "http://127.0.0.1:18081/recognition/prediction" url = "http://127.0.0.1:18081/recognition/prediction"
......
...@@ -23,6 +23,7 @@ import faiss ...@@ -23,6 +23,7 @@ import faiss
import pickle import pickle
import json import json
class DetOp(Op): class DetOp(Op):
def init_op(self): def init_op(self):
self.img_preprocess = Sequential([ self.img_preprocess = Sequential([
...@@ -65,22 +66,28 @@ class DetOp(Op): ...@@ -65,22 +66,28 @@ class DetOp(Op):
imgs.append({ imgs.append({
"image": im[np.newaxis, :], "image": im[np.newaxis, :],
"im_shape": np.array(list(im.shape[1:])).reshape(-1)[np.newaxis,:], "im_shape":
"scale_factor": np.array([im_scale_y, im_scale_x]).astype('float32'), np.array(list(im.shape[1:])).reshape(-1)[np.newaxis, :],
"scale_factor":
np.array([im_scale_y, im_scale_x]).astype('float32'),
}) })
self.raw_img = raw_imgs self.raw_img = raw_imgs
feed_dict = { feed_dict = {
"image": np.concatenate([x["image"] for x in imgs], axis=0), "image": np.concatenate(
"im_shape": np.concatenate([x["im_shape"] for x in imgs], axis=0), [x["image"] for x in imgs], axis=0),
"scale_factor": np.concatenate([x["scale_factor"] for x in imgs], axis=0) "im_shape": np.concatenate(
[x["im_shape"] for x in imgs], axis=0),
"scale_factor": np.concatenate(
[x["scale_factor"] for x in imgs], axis=0)
} }
return feed_dict, False, None, "" return feed_dict, False, None, ""
def postprocess(self, input_dicts, fetch_dict, log_id): def postprocess(self, input_dicts, fetch_dict, log_id):
boxes = self.img_postprocess(fetch_dict, visualize=False) boxes = self.img_postprocess(fetch_dict, visualize=False)
boxes.sort(key = lambda x: x["score"], reverse = True) boxes.sort(key=lambda x: x["score"], reverse=True)
boxes = filter(lambda x: x["score"] >= self.threshold, boxes[:self.max_det_results]) boxes = filter(lambda x: x["score"] >= self.threshold,
boxes[:self.max_det_results])
boxes = list(boxes) boxes = list(boxes)
for i in range(len(boxes)): for i in range(len(boxes)):
boxes[i]["bbox"][2] += boxes[i]["bbox"][0] - 1 boxes[i]["bbox"][2] += boxes[i]["bbox"][0] - 1
...@@ -89,15 +96,16 @@ class DetOp(Op): ...@@ -89,15 +96,16 @@ class DetOp(Op):
res_dict = {"bbox_result": result, "image": self.raw_img} res_dict = {"bbox_result": result, "image": self.raw_img}
return res_dict, None, "" return res_dict, None, ""
class RecOp(Op): class RecOp(Op):
def init_op(self): def init_op(self):
self.seq = Sequential([ self.seq = Sequential([
BGR2RGB(), Resize((224, 224)), BGR2RGB(), Resize((224, 224)), Div(255),
Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225],
False), Transpose((2, 0, 1)) False), Transpose((2, 0, 1))
]) ])
index_dir = "../../recognition_demo_data_v1.1/gallery_product/index" index_dir = "../../drink_dataset_v1.0/index"
assert os.path.exists(os.path.join( assert os.path.exists(os.path.join(
index_dir, "vector.index")), "vector.index not found ..." index_dir, "vector.index")), "vector.index not found ..."
assert os.path.exists(os.path.join( assert os.path.exists(os.path.join(
...@@ -121,7 +129,8 @@ class RecOp(Op): ...@@ -121,7 +129,8 @@ class RecOp(Op):
origin_img = cv2.imdecode(data, cv2.IMREAD_COLOR) origin_img = cv2.imdecode(data, cv2.IMREAD_COLOR)
dt_boxes = input_dict["bbox_result"] dt_boxes = input_dict["bbox_result"]
boxes = json.loads(dt_boxes) boxes = json.loads(dt_boxes)
boxes.append({"category_id": 0, boxes.append({
"category_id": 0,
"score": 1.0, "score": 1.0,
"bbox": [0, 0, origin_img.shape[1], origin_img.shape[0]] "bbox": [0, 0, origin_img.shape[1], origin_img.shape[0]]
}) })
...@@ -131,14 +140,14 @@ class RecOp(Op): ...@@ -131,14 +140,14 @@ class RecOp(Op):
imgs = [] imgs = []
for box in boxes: for box in boxes:
box = [int(x) for x in box["bbox"]] box = [int(x) for x in box["bbox"]]
im = origin_img[box[1]: box[3], box[0]: box[2]].copy() im = origin_img[box[1]:box[3], box[0]:box[2]].copy()
img = self.seq(im) img = self.seq(im)
imgs.append(img[np.newaxis, :].copy()) imgs.append(img[np.newaxis, :].copy())
input_imgs = np.concatenate(imgs, axis=0) input_imgs = np.concatenate(imgs, axis=0)
return {"x": input_imgs}, False, None, "" return {"x": input_imgs}, False, None, ""
def nms_to_rec_results(self, results, thresh = 0.1): def nms_to_rec_results(self, results, thresh=0.1):
filtered_results = [] filtered_results = []
x1 = np.array([r["bbox"][0] for r in results]).astype("float32") x1 = np.array([r["bbox"][0] for r in results]).astype("float32")
y1 = np.array([r["bbox"][1] for r in results]).astype("float32") y1 = np.array([r["bbox"][1] for r in results]).astype("float32")
...@@ -187,12 +196,14 @@ class RecOp(Op): ...@@ -187,12 +196,14 @@ class RecOp(Op):
results = self.nms_to_rec_results(results, self.rec_nms_thresold) results = self.nms_to_rec_results(results, self.rec_nms_thresold)
return {"result": str(results)}, None, "" return {"result": str(results)}, None, ""
class RecognitionService(WebService): class RecognitionService(WebService):
def get_pipeline_response(self, read_op): def get_pipeline_response(self, read_op):
det_op = DetOp(name="det", input_ops=[read_op]) det_op = DetOp(name="det", input_ops=[read_op])
rec_op = RecOp(name="rec", input_ops=[det_op]) rec_op = RecOp(name="rec", input_ops=[det_op])
return rec_op return rec_op
product_recog_service = RecognitionService(name="recognition") product_recog_service = RecognitionService(name="recognition")
product_recog_service.prepare_pipeline_config("config.yml") product_recog_service.prepare_pipeline_config("config.yml")
product_recog_service.run_service() product_recog_service.run_service()
# 模型服务化部署 # 模型服务化部署
- [简介](#简介)
- [Serving安装](#Serving安装)
- [图像分类服务部署](#图像分类服务部署)
- [图像识别服务部署](#图像识别服务部署)
- [FAQ](#FAQ)
<a name="简介"></a>
## 1. 简介 ## 1. 简介
[Paddle Serving](https://github.com/PaddlePaddle/Serving) 旨在帮助深度学习开发者轻松部署在线预测服务,支持一键部署工业级的服务能力、客户端和服务端之间高并发和高效通信、并支持多种编程语言开发客户端。 [Paddle Serving](https://github.com/PaddlePaddle/Serving) 旨在帮助深度学习开发者轻松部署在线预测服务,支持一键部署工业级的服务能力、客户端和服务端之间高并发和高效通信、并支持多种编程语言开发客户端。
该部分以 HTTP 预测服务部署为例,介绍怎样在 PaddleClas 中使用 PaddleServing 部署模型服务。 该部分以 HTTP 预测服务部署为例,介绍怎样在 PaddleClas 中使用 PaddleServing 部署模型服务。
<a name="Serving安装"></a>
## 2. Serving安装 ## 2. Serving安装
Serving 官网推荐使用 docker 安装并部署 Serving 环境。首先需要拉取 docker 环境并创建基于 Serving 的 docker。 Serving 官网推荐使用 docker 安装并部署 Serving 环境。首先需要拉取 docker 环境并创建基于 Serving 的 docker。
...@@ -22,6 +28,7 @@ nvidia-docker exec -it test bash ...@@ -22,6 +28,7 @@ nvidia-docker exec -it test bash
pip install paddlepaddle-gpu pip install paddlepaddle-gpu
pip install paddle-serving-client pip install paddle-serving-client
pip install paddle-serving-server-gpu pip install paddle-serving-server-gpu
pip install paddle-serving-app
``` ```
* 如果安装速度太慢,可以通过 `-i https://pypi.tuna.tsinghua.edu.cn/simple` 更换源,加速安装过程。 * 如果安装速度太慢,可以通过 `-i https://pypi.tuna.tsinghua.edu.cn/simple` 更换源,加速安装过程。
...@@ -31,35 +38,186 @@ pip install paddle-serving-server-gpu ...@@ -31,35 +38,186 @@ pip install paddle-serving-server-gpu
```shell ```shell
pip install paddle-serving-server pip install paddle-serving-server
``` ```
<a name="图像分类服务部署"></a>
## 3. 导出模型 ## 3. 图像分类服务部署
### 3.1 模型转换
使用 `tools/export_serving_model.py` 脚本导出 Serving 模型,以 `ResNet50_vd` 为例,使用方法如下 使用PaddleServing做服务化部署时,需要将保存的inference模型转换为Serving模型。下面以经典的ResNet50_vd模型为例,介绍如何部署图像分类服务
- 进入工作目录:
```shell ```shell
python tools/export_serving_model.py -m ResNet50_vd -p ./pretrained/ResNet50_vd_pretrained/ -o serving cd deploy/paddleserving
```
- 下载ResNet50_vd的inference模型:
```shell
# 下载并解压ResNet50_vd模型
wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/inference/ResNet50_vd_infer.tar && tar xf ResNet50_vd_infer.tar
```
- 用paddle_serving_client把下载的inference模型转换成易于Server部署的模型格式:
```
# 转换ResNet50_vd模型
python3 -m paddle_serving_client.convert --dirname ./ResNet50_vd_infer/ \
--model_filename inference.pdmodel \
--params_filename inference.pdiparams \
--serving_server ./ResNet50_vd_serving/ \
--serving_client ./ResNet50_vd_client/
```
ResNet50_vd推理模型转换完成后,会在当前文件夹多出`ResNet50_vd_serving``ResNet50_vd_client`的文件夹,具备如下格式:
``` ```
|- ResNet50_vd_client/
|- __model__
|- __params__
|- serving_server_conf.prototxt
|- serving_server_conf.stream.prototxt
|- ResNet50_vd_client
|- serving_client_conf.prototxt
|- serving_client_conf.stream.prototxt
```
得到模型文件之后,需要修改serving_server_conf.prototxt中的alias名字: 将`feed_var`中的`alias_name`改为`image`, 将`fetch_var`中的`alias_name`改为`prediction`
最终在 serving 文件夹下会生成 `ppcls_client_conf``ppcls_model` 两个文件夹,分别存储了 client 配置、模型参数与结构文件。 **备注**: Serving为了兼容不同模型的部署,提供了输入输出重命名的功能。这样,不同的模型在推理部署时,只需要修改配置文件的alias_name即可,无需修改代码即可完成推理部署。
修改后的serving_server_conf.prototxt如下所示:
```
feed_var {
name: "inputs"
alias_name: "image"
is_lod_tensor: false
feed_type: 1
shape: 3
shape: 224
shape: 224
}
fetch_var {
name: "save_infer_model/scale_0.tmp_1"
alias_name: "prediction"
is_lod_tensor: true
fetch_type: 1
shape: -1
}
```
### 3.2 服务部署和请求
paddleserving目录包含了启动pipeline服务和发送预测请求的代码,包括:
```shell
__init__.py
config.yml # 启动服务的配置文件
pipeline_http_client.py # http方式发送pipeline预测请求的脚本
pipeline_rpc_client.py # rpc方式发送pipeline预测请求的脚本
classification_web_service.py # 启动pipeline服务端的脚本
```
- 启动服务:
```shell
# 启动服务,运行日志保存在log.txt
python3 classification_web_service.py &>log.txt &
```
成功启动服务后,log.txt中会打印类似如下日志
![](../../../deploy/paddleserving/imgs/start_server.png)
## 4. 服务部署与请求 - 发送请求:
```shell
# 发送服务请求
python3 pipeline_http_client.py
```
成功运行后,模型预测的结果会打印在cmd窗口中,结果示例为:
![](../../../deploy/paddleserving/imgs/results.png)
<a name="图像识别服务部署"></a>
## 4.图像识别服务部署
使用PaddleServing做服务化部署时,需要将保存的inference模型转换为Serving模型。 下面以PP-ShiTu中的超轻量图像识别模型为例,介绍图像识别服务的部署。
## 4.1 模型转换
- 下载通用检测inference模型和通用识别inference模型
```
cd deploy
# 下载并解压通用识别模型
wget -P models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/general_PPLCNet_x2_5_lite_v1.0_infer.tar
cd models
tar -xf general_PPLCNet_x2_5_lite_v1.0_infer.tar
# 下载并解压通用检测模型
wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar
tar -xf picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer.tar
```
- 转换识别inference模型为Serving模型:
```
# 转换识别模型
python3 -m paddle_serving_client.convert --dirname ./general_PPLCNet_x2_5_lite_v1.0_infer/ \
--model_filename inference.pdmodel \
--params_filename inference.pdiparams \
--serving_server ./general_PPLCNet_x2_5_lite_v1.0_serving/ \
--serving_client ./general_PPLCNet_x2_5_lite_v1.0_client/
```
识别推理模型转换完成后,会在当前文件夹多出`general_PPLCNet_x2_5_lite_v1.0_serving/``general_PPLCNet_x2_5_lite_v1.0_serving/`的文件夹。修改`general_PPLCNet_x2_5_lite_v1.0_serving/`目录下的serving_server_conf.prototxt中的alias名字: 将`fetch_var`中的`alias_name`改为`features`
修改后的serving_server_conf.prototxt内容如下:
```
feed_var {
name: "x"
alias_name: "x"
is_lod_tensor: false
feed_type: 1
shape: 3
shape: 224
shape: 224
}
fetch_var {
name: "save_infer_model/scale_0.tmp_1"
alias_name: "features"
is_lod_tensor: true
fetch_type: 1
shape: -1
}
```
- 转换通用检测inference模型为Serving模型:
```
# 转换通用检测模型
python3 -m paddle_serving_client.convert --dirname ./picodet_PPLCNet_x2_5_mainbody_lite_v1.0_infer/ \
--model_filename inference.pdmodel \
--params_filename inference.pdiparams \
--serving_server ./picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/ \
--serving_client ./picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/
```
检测inference模型转换完成后,会在当前文件夹多出`picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/``picodet_PPLCNet_x2_5_mainbody_lite_v1.0_client/`的文件夹。
* 使用下面的方式启动 Serving 服务 **注意:** 此处不需要修改`picodet_PPLCNet_x2_5_mainbody_lite_v1.0_serving/`目录下的serving_server_conf.prototxt中的alias名字
- 下载并解压已经构建后的检索库index
```
cd ../
wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/drink_dataset_v1.0.tar && tar -xf drink_dataset_v1.0.tar
```
## 4.2 服务部署和请求
**注意:** 识别服务涉及到多个模型,出于性能考虑采用PipeLine部署方式。Pipeline部署方式当前不支持windows平台。
- 进入到工作目录
```shell ```shell
python tools/serving/image_service_gpu.py serving/ppcls_model workdir 9292 cd ./deploy/paddleserving/recognition
``` ```
paddleserving目录包含启动pipeline服务和发送预测请求的代码,包括:
```
__init__.py
config.yml # 启动服务的配置文件
pipeline_http_client.py # http方式发送pipeline预测请求的脚本
pipeline_rpc_client.py # rpc方式发送pipeline预测请求的脚本
recognition_web_service.py # 启动pipeline服务端的脚本
```
- 启动服务:
```
# 启动服务,运行日志保存在log.txt
python3 recognition_web_service.py &>log.txt &
```
成功启动服务后,log.txt中会打印类似如下日志
![](../../../deploy/paddleserving/imgs/start_server_shitu.png)
其中 `serving/ppcls_model` 为刚才保存的 Serving 模型地址,`workdir` 为工作目录,`9292` 为服务的端口号。 - 发送请求:
```
python3 pipeline_http_client.py
```
成功运行后,模型预测的结果会打印在cmd窗口中,结果示例为:
![](../../../deploy/paddleserving/imgs/results_shitu.png)
* 使用下面的脚本向 Serving 服务发送识别请求,并返回结果。 <a name="FAQ"></a>
## 5.FAQ
**Q1**: 发送请求后没有结果返回或者提示输出解码报错
**A1**: 启动服务和发送请求时不要设置代理,可以在启动服务前和发送请求前关闭代理,关闭代理的命令是:
``` ```
python tools/serving/image_http_client.py 9292 ./docs/images/logo.png unset https_proxy
unset http_proxy
``` ```
`9292` 为发送请求的端口号,需要与服务启动时的端口号保持一致,`./docs/images/logo.png` 为待识别的图像文件。最终返回 Top1 识别结果的类别 ID 以及概率值。 更多的服务部署类型,如 `RPC预测服务` 等,可以参考 Serving 的[github 官网](https://github.com/PaddlePaddle/Serving/tree/develop/python/examples/imagenet)
* 更多的服务部署类型,如 `RPC预测服务` 等,可以参考 Serving 的 github 官网:[https://github.com/PaddlePaddle/Serving/tree/develop/python/examples/imagenet](https://github.com/PaddlePaddle/Serving/tree/develop/python/examples/imagenet)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册