diff --git a/deploy/paddleserving/README.md b/deploy/paddleserving/README.md index 75eb3e35b8ffa03bc6ae69db42fffb33bdccaf14..bb34b12989a56944bb5a3b890dc122cd4beba24f 100644 --- a/deploy/paddleserving/README.md +++ b/deploy/paddleserving/README.md @@ -4,9 +4,9 @@ PaddleClas provides two service deployment methods: - Based on **PaddleHub Serving**: Code path is "`./deploy/hubserving`". Please refer to the [tutorial](../../deploy/hubserving/readme_en.md) -- Based on **PaddleServing**: Code path is "`./deploy/paddleserving`". Please follow this tutorial. +- Based on **PaddleServing**: Code path is "`./deploy/paddleserving`". if you prefer retrieval_based image reocognition service, please refer to [tutorial](./recognition/README.md),if you'd like image classification service, Please follow this tutorial. -# Service deployment based on PaddleServing +# Image Classification Service deployment based on PaddleServing This document will introduce how to use the [PaddleServing](https://github.com/PaddlePaddle/Serving/blob/develop/README.md) to deploy the ResNet50_vd model as a pipeline online service. @@ -131,7 +131,7 @@ fetch_var { config.yml # configuration file of starting the service pipeline_http_client.py # script to send pipeline prediction request by http pipeline_rpc_client.py # script to send pipeline prediction request by rpc - resnet50_web_service.py # start the script of the pipeline server + classification_web_service.py # start the script of the pipeline server ``` 2. Run the following command to start the service. @@ -147,7 +147,7 @@ fetch_var { python3 pipeline_http_client.py ``` After successfully running, the predicted result of the model will be printed in the cmd window. An example of the result is: - ![](./imgs/results.png) + ![](./imgs/results.png) Adjust the number of concurrency in config.yml to get the largest QPS. diff --git a/deploy/paddleserving/README_CN.md b/deploy/paddleserving/README_CN.md index 3394ae5b5a75c774858fb50e429d083f8a19fc07..02ee2093d901251a20cdf67261b0fb882d2736fd 100644 --- a/deploy/paddleserving/README_CN.md +++ b/deploy/paddleserving/README_CN.md @@ -4,9 +4,9 @@ PaddleClas提供2种服务部署方式: - 基于PaddleHub Serving的部署:代码路径为"`./deploy/hubserving`",使用方法参考[文档](../../deploy/hubserving/readme.md); -- 基于PaddleServing的部署:代码路径为"`./deploy/paddleserving`",按照本教程使用。 +- 基于PaddleServing的部署:代码路径为"`./deploy/paddleserving`", 基于检索方式的图像识别服务参考[文档](./recognition/README_CN.md), 图像分类服务按照本教程使用。 -# 基于PaddleServing的服务部署 +# 基于PaddleServing的图像分类服务部署 本文档以经典的ResNet50_vd模型为例,介绍如何使用[PaddleServing](https://github.com/PaddlePaddle/Serving/blob/develop/README_CN.md)工具部署PaddleClas 动态图模型的pipeline在线服务。 @@ -127,7 +127,7 @@ fetch_var { config.yml # 启动服务的配置文件 pipeline_http_client.py # http方式发送pipeline预测请求的脚本 pipeline_rpc_client.py # rpc方式发送pipeline预测请求的脚本 - resnet50_web_service.py # 启动pipeline服务端的脚本 + classification_web_service.py # 启动pipeline服务端的脚本 ``` 2. 启动服务可运行如下命令: diff --git a/deploy/paddleserving/imgs/results_recog.png b/deploy/paddleserving/imgs/results_recog.png new file mode 100644 index 0000000000000000000000000000000000000000..37393d5d64e84de469d78dcc9fad88aa771f57f8 Binary files /dev/null and b/deploy/paddleserving/imgs/results_recog.png differ diff --git a/deploy/paddleserving/imgs/start_server_recog.png b/deploy/paddleserving/imgs/start_server_recog.png new file mode 100644 index 0000000000000000000000000000000000000000..d4344a1e6bdab7ccc4c3c31bc16d1e3186b9b806 Binary files /dev/null and b/deploy/paddleserving/imgs/start_server_recog.png differ diff --git a/deploy/paddleserving/recognition/README.md b/deploy/paddleserving/recognition/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0ece4fbd469840b6f2d29f455cdc7b0dc826739e --- /dev/null +++ b/deploy/paddleserving/recognition/README.md @@ -0,0 +1,176 @@ +# Product Recognition Service deployment based on PaddleServing + +(English|[简体中文](./README_CN.md)) + +This document will introduce how to use the [PaddleServing](https://github.com/PaddlePaddle/Serving/blob/develop/README.md) to deploy the product recognition model based on retrieval method as a pipeline online service. + +Some Key Features of Paddle Serving: +- Integrate with Paddle training pipeline seamlessly, most paddle models can be deployed with one line command. +- Industrial serving features supported, such as models management, online loading, online A/B testing etc. +- Highly concurrent and efficient communication between clients and servers supported. + +The introduction and tutorial of Paddle Serving service deployment framework reference [document](https://github.com/PaddlePaddle/Serving/blob/develop/README.md). + +## Contents +- [Environmental preparation](#environmental-preparation) +- [Model conversion](#model-conversion) +- [Paddle Serving pipeline deployment](#paddle-serving-pipeline-deployment) +- [FAQ](#faq) + + +## Environmental preparation + +PaddleClas operating environment and PaddleServing operating environment are needed. + +1. Please prepare PaddleClas operating environment reference [link](../../docs/zh_CN/tutorials/install.md). + Download the corresponding paddle whl package according to the environment, it is recommended to install version 2.1.0. + +2. The steps of PaddleServing operating environment prepare are as follows: + + Install serving which used to start the service + ``` + pip3 install paddle-serving-server==0.6.1 # for CPU + pip3 install paddle-serving-server-gpu==0.6.1 # for GPU + # Other GPU environments need to confirm the environment and then choose to execute the following commands + pip3 install paddle-serving-server-gpu==0.6.1.post101 # GPU with CUDA10.1 + TensorRT6 + pip3 install paddle-serving-server-gpu==0.6.1.post11 # GPU with CUDA11 + TensorRT7 + ``` + +3. Install the client to send requests to the service + In [download link](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md) find the client installation package corresponding to the python version. + The python3.7 version is recommended here: + + ``` + wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_client-0.0.0-cp37-none-any.whl + pip3 install paddle_serving_client-0.0.0-cp37-none-any.whl + ``` + +4. Install serving-app + ``` + pip3 install paddle-serving-app==0.6.1 + ``` + + **note:** If you want to install the latest version of PaddleServing, refer to [link](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md). + + + +## Model conversion +When using PaddleServing for service deployment, you need to convert the saved inference model into a serving model that is easy to deploy. +The following assumes that the current working directory is the PaddleClas root directory + +Firstly, download the inference model of ResNet50_vd +``` +cd deploy +# Download and unzip the ResNet50_vd model +wget -P models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/product_ResNet50_vd_aliproduct_v1.0_infer.tar +cd models +tar -xf product_ResNet50_vd_aliproduct_v1.0_infer.tar +``` + +Then, you can use installed paddle_serving_client tool to convert inference model to mobile model. +``` +# Product recognition model conversion +python3 -m paddle_serving_client.convert --dirname ./product_ResNet50_vd_aliproduct_v1.0_infer/ \ + --model_filename inference.pdmodel \ + --params_filename inference.pdiparams \ + --serving_server ./product_ResNet50_vd_aliproduct_v1.0_serving/ \ + --serving_client ./product_ResNet50_vd_aliproduct_v1.0_client/ +``` + +After the ResNet50_vd inference model is converted, there will be additional folders of `product_ResNet50_vd_aliproduct_v1.0_serving` and `product_ResNet50_vd_aliproduct_v1.0_client` in the current folder, with the following format: +``` +|- product_ResNet50_vd_aliproduct_v1.0_serving/ + |- __model__ + |- __params__ + |- serving_server_conf.prototxt + |- serving_server_conf.stream.prototxt + +|- product_ResNet50_vd_aliproduct_v1.0_client + |- serving_client_conf.prototxt + |- serving_client_conf.stream.prototxt +``` + +Once you have the model file for deployment, you need to change the alias name in `serving_server_conf.prototxt`: change `alias_name` in `fetch_var` to `features`, +The modified serving_server_conf.prototxt file is as follows: +``` +feed_var { + name: "x" + alias_name: "x" + is_lod_tensor: false + feed_type: 1 + shape: 3 + shape: 224 + shape: 224 +} +fetch_var { + name: "save_infer_model/scale_0.tmp_1" + alias_name: "features" + is_lod_tensor: true + fetch_type: 1 + shape: -1 +} +``` + +Next,download and unpack the built index of product gallery +``` +cd ../ +wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/recognition_demo_data_v1.1.tar && tar -xf recognition_demo_data_v1.1.tar +``` + + + +## Paddle Serving pipeline deployment + +1. Download the PaddleClas code, if you have already downloaded it, you can skip this step. + ``` + git clone https://github.com/PaddlePaddle/PaddleClas + + # Enter the working directory + cd PaddleClas/deploy/paddleserving/recognition + ``` + + The paddleserving directory contains the code to start the pipeline service and send prediction requests, including: + ``` + __init__.py + config.yml # configuration file of starting the service + pipeline_http_client.py # script to send pipeline prediction request by http + pipeline_rpc_client.py # script to send pipeline prediction request by rpc + recognition_web_service.py # start the script of the pipeline server + ``` + +2. Run the following command to start the service. + ``` + # Start the service and save the running log in log.txt + python3 recognition_web_service.py &>log.txt & + ``` + After the service is successfully started, a log similar to the following will be printed in log.txt + ![](../imgs/start_server_recog.png) + +3. Send service request + ``` + python3 pipeline_http_client.py + ``` + After successfully running, the predicted result of the model will be printed in the cmd window. An example of the result is: + ![](../imgs/results_recog.png) + + Adjust the number of concurrency in config.yml to get the largest QPS. + + ``` + op: + concurrency: 8 + ... + ``` + + Multiple service requests can be sent at the same time if necessary. + + The predicted performance data will be automatically written into the `PipelineServingLogs/pipeline.tracer` file. + + +## FAQ +**Q1**: No result return after sending the request. + +**A1**: Do not set the proxy when starting the service and sending the request. You can close the proxy before starting the service and before sending the request. The command to close the proxy is: +``` +unset https_proxy +unset http_proxy +``` diff --git a/deploy/paddleserving/recognition/README_CN.md b/deploy/paddleserving/recognition/README_CN.md new file mode 100644 index 0000000000000000000000000000000000000000..58efd6ba7f283fa183fed49a99d84d154cace969 --- /dev/null +++ b/deploy/paddleserving/recognition/README_CN.md @@ -0,0 +1,172 @@ +# 基于PaddleServing的商品识别服务部署 + +([English](./README.md)|简体中文) + +本文以商品识别为例,介绍如何使用[PaddleServing](https://github.com/PaddlePaddle/Serving/blob/develop/README_CN.md)工具部署PaddleClas动态图模型的pipeline在线服务。 + +相比较于hubserving部署,PaddleServing具备以下优点: +- 支持客户端和服务端之间高并发和高效通信 +- 支持 工业级的服务能力 例如模型管理,在线加载,在线A/B测试等 +- 支持 多种编程语言 开发客户端,例如C++, Python和Java + +更多有关PaddleServing服务化部署框架介绍和使用教程参考[文档](https://github.com/PaddlePaddle/Serving/blob/develop/README_CN.md)。 + +## 目录 +- [环境准备](#环境准备) +- [模型转换](#模型转换) +- [Paddle Serving pipeline部署](#部署) +- [FAQ](#FAQ) + + +## 环境准备 + +需要准备PaddleClas的运行环境和PaddleServing的运行环境。 + +- 准备PaddleClas的[运行环境](../../docs/zh_CN/tutorials/install.md), 根据环境下载对应的paddle whl包,推荐安装2.1.0版本 + +- 准备PaddleServing的运行环境,步骤如下 + +1. 安装serving,用于启动服务 + ``` + pip3 install paddle-serving-server==0.6.1 # for CPU + pip3 install paddle-serving-server-gpu==0.6.1 # for GPU + # 其他GPU环境需要确认环境再选择执行如下命令 + pip3 install paddle-serving-server-gpu==0.6.1.post101 # GPU with CUDA10.1 + TensorRT6 + pip3 install paddle-serving-server-gpu==0.6.1.post11 # GPU with CUDA11 + TensorRT7 + ``` + +2. 安装client,用于向服务发送请求 + 在[下载链接](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md)中找到对应python版本的client安装包,这里推荐python3.7版本: + + ``` + wget https://paddle-serving.bj.bcebos.com/test-dev/whl/paddle_serving_client-0.0.0-cp37-none-any.whl + pip3 install paddle_serving_client-0.0.0-cp37-none-any.whl + ``` + +3. 安装serving-app + ``` + pip3 install paddle-serving-app==0.6.1 + ``` + **Note:** 如果要安装最新版本的PaddleServing参考[链接](https://github.com/PaddlePaddle/Serving/blob/develop/doc/LATEST_PACKAGES.md)。 + + +## 模型转换 + +使用PaddleServing做服务化部署时,需要将保存的inference模型转换为serving易于部署的模型。 +以下内容假定当前工作目录为PaddleClas根目录。 + +首先,下载商品识别的inference模型 +``` +cd deploy + +# 下载并解压商品识别模型 +wget -P models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/models/inference/product_ResNet50_vd_aliproduct_v1.0_infer.tar +cd models +tar -xf product_ResNet50_vd_aliproduct_v1.0_infer.tar +``` + +接下来,用安装的paddle_serving_client把下载的inference模型转换成易于server部署的模型格式。 + +``` +# 转换商品识别模型 +python3 -m paddle_serving_client.convert --dirname ./product_ResNet50_vd_aliproduct_v1.0_infer/ \ + --model_filename inference.pdmodel \ + --params_filename inference.pdiparams \ + --serving_server ./product_ResNet50_vd_aliproduct_v1.0_serving/ \ + --serving_client ./product_ResNet50_vd_aliproduct_v1.0_client/ +``` +商品识别推理模型转换完成后,会在当前文件夹多出`product_ResNet50_vd_aliproduct_v1.0_serving` 和`product_ResNet50_vd_aliproduct_v1.0_client`的文件夹,具备如下格式: +``` +|- product_ResNet50_vd_aliproduct_v1.0_serving/ + |- __model__ + |- __params__ + |- serving_server_conf.prototxt + |- serving_server_conf.stream.prototxt + +|- product_ResNet50_vd_aliproduct_v1.0_client + |- serving_client_conf.prototxt + |- serving_client_conf.stream.prototxt + +``` +得到模型文件之后,需要修改serving_server_conf.prototxt中的alias名字: 将`fetch_var`中的`alias_name`改为`features`, +修改后的serving_server_conf.prototxt内容如下: +``` +feed_var { + name: "x" + alias_name: "x" + is_lod_tensor: false + feed_type: 1 + shape: 3 + shape: 224 + shape: 224 +} +fetch_var { + name: "save_infer_model/scale_0.tmp_1" + alias_name: "features" + is_lod_tensor: true + fetch_type: 1 + shape: -1 +} +``` + +接下来,下载并解压已经构建后的商品库index +``` +cd ../ +wget https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/rec/data/recognition_demo_data_v1.1.tar && tar -xf recognition_demo_data_v1.1.tar +``` + + + +## Paddle Serving pipeline部署 + +1. 下载PaddleClas代码,若已下载可跳过此步骤 + ``` + git clone https://github.com/PaddlePaddle/PaddleClas + + # 进入到工作目录 + cd PaddleClas/deploy/paddleserving/recognition + ``` + paddleserving目录包含启动pipeline服务和发送预测请求的代码,包括: + ``` + __init__.py + config.yml # 启动服务的配置文件 + pipeline_http_client.py # http方式发送pipeline预测请求的脚本 + pipeline_rpc_client.py # rpc方式发送pipeline预测请求的脚本 + recognition_web_service.py # 启动pipeline服务端的脚本 + ``` + +2. 启动服务可运行如下命令: + ``` + # 启动服务,运行日志保存在log.txt + python3 recognition_web_service.py &>log.txt & + ``` + 成功启动服务后,log.txt中会打印类似如下日志 + ![](../imgs/start_server_recog.png) + +3. 发送服务请求: + ``` + python3 pipeline_http_client.py + ``` + 成功运行后,模型预测的结果会打印在cmd窗口中,结果示例为: + ![](../imgs/results_recog.png) + + 调整 config.yml 中的并发个数可以获得最大的QPS + ``` + op: + #并发数,is_thread_op=True时,为线程并发;否则为进程并发 + concurrency: 8 + ... + ``` + 有需要的话可以同时发送多个服务请求 + + 预测性能数据会被自动写入 `PipelineServingLogs/pipeline.tracer` 文件中。 + + +## FAQ +**Q1**: 发送请求后没有结果返回或者提示输出解码报错 + +**A1**: 启动服务和发送请求时不要设置代理,可以在启动服务前和发送请求前关闭代理,关闭代理的命令是: +``` +unset https_proxy +unset http_proxy +``` diff --git a/deploy/paddleserving/recognition/__init__.py b/deploy/paddleserving/recognition/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/deploy/paddleserving/recognition/config.yml b/deploy/paddleserving/recognition/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..9ccd0cfc3e93935d23a9084c33743962895c960c --- /dev/null +++ b/deploy/paddleserving/recognition/config.yml @@ -0,0 +1,33 @@ +#worker_num, 最大并发数。当build_dag_each_worker=True时, 框架会创建worker_num个进程,每个进程内构建grpcSever和DAG +##当build_dag_each_worker=False时,框架会设置主线程grpc线程池的max_workers=worker_num +worker_num: 1 + +#http端口, rpc_port和http_port不允许同时为空。当rpc_port可用且http_port为空时,不自动生成http_port +http_port: 18081 +rpc_port: 9994 + +dag: + #op资源类型, True, 为线程模型;False,为进程模型 + is_thread_op: False +op: + recog: + #并发数,is_thread_op=True时,为线程并发;否则为进程并发 + concurrency: 1 + + #当op配置没有server_endpoints时,从local_service_conf读取本地服务配置 + local_service_conf: + + #uci模型路径 + model_config: ../../models/product_ResNet50_vd_aliproduct_v1.0_serving + + #计算硬件类型: 空缺时由devices决定(CPU/GPU),0=cpu, 1=gpu, 2=tensorRT, 3=arm cpu, 4=kunlun xpu + device_type: 1 + + #计算硬件ID,当devices为""或不写时为CPU预测;当devices为"0", "0,1,2"时为GPU预测,表示使用的GPU卡 + devices: "0" # "0,1" + + #client类型,包括brpc, grpc和local_predictor.local_predictor不启动Serving服务,进程内预测 + client_type: local_predictor + + #Fetch结果列表,以client_config中fetch_var的alias_name为准 + fetch_list: ["features"] \ No newline at end of file diff --git a/deploy/paddleserving/recognition/daoxiangcunjinzhubing_6.jpg b/deploy/paddleserving/recognition/daoxiangcunjinzhubing_6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fc64a9531db0829d42b51e888361fa697afd080f Binary files /dev/null and b/deploy/paddleserving/recognition/daoxiangcunjinzhubing_6.jpg differ diff --git a/deploy/paddleserving/recognition/pipeline_http_client.py b/deploy/paddleserving/recognition/pipeline_http_client.py new file mode 100644 index 0000000000000000000000000000000000000000..8a9ffd536483a8f85d6564e17421bf1fcbed1dbf --- /dev/null +++ b/deploy/paddleserving/recognition/pipeline_http_client.py @@ -0,0 +1,21 @@ +import requests +import json +import base64 +import os + +imgpath = "daoxiangcunjinzhubing_6.jpg" + +def cv2_to_base64(image): + return base64.b64encode(image).decode('utf8') + +if __name__ == "__main__": + url = "http://127.0.0.1:18081/recog_service/prediction" + + with open(os.path.join(".", imgpath), 'rb') as file: + image_data1 = file.read() + image = cv2_to_base64(image_data1) + data = {"key": ["image"], "value": [image]} + + for i in range(5): + r = requests.post(url=url, data=json.dumps(data)) + print(r.json()) diff --git a/deploy/paddleserving/recognition/pipeline_rpc_client.py b/deploy/paddleserving/recognition/pipeline_rpc_client.py new file mode 100644 index 0000000000000000000000000000000000000000..fa43cf432185531dca436ba631dfcbe2d4d72083 --- /dev/null +++ b/deploy/paddleserving/recognition/pipeline_rpc_client.py @@ -0,0 +1,34 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +try: + from paddle_serving_server_gpu.pipeline import PipelineClient +except ImportError: + from paddle_serving_server.pipeline import PipelineClient +import base64 + +client = PipelineClient() +client.connect(['127.0.0.1:9994']) +imgpath = "daoxiangcunjinzhubing_6.jpg" + +def cv2_to_base64(image): + return base64.b64encode(image).decode('utf8') + +if __name__ == "__main__": + with open(imgpath, 'rb') as file: + image_data = file.read() + image = cv2_to_base64(image_data) + + for i in range(1): + ret = client.predict(feed_dict={"image": image}, fetch=["label", "dist"]) + print(ret) diff --git a/deploy/paddleserving/recognition/recognition_web_service.py b/deploy/paddleserving/recognition/recognition_web_service.py new file mode 100644 index 0000000000000000000000000000000000000000..f8beddfbc25b9b45ac7f74ee77976e8dc22a8561 --- /dev/null +++ b/deploy/paddleserving/recognition/recognition_web_service.py @@ -0,0 +1,79 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +from paddle_serving_app.reader import Sequential, URL2Image, Resize, CenterCrop, RGB2BGR, Transpose, Div, Normalize, Base64ToImage +try: + from paddle_serving_server_gpu.web_service import WebService, Op +except ImportError: + from paddle_serving_server.web_service import WebService, Op +import logging +import numpy as np +import base64, cv2 +import os +import faiss +import pickle + +class RecogOp(Op): + def init_op(self): + self.seq = Sequential([ + Resize(256), CenterCrop(224), RGB2BGR(), Transpose((2, 0, 1)), + Div(255), Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225], + True) + ]) + + #load index; and return top1 + index_dir = "../../recognition_demo_data_v1.1/gallery_product/index" + assert os.path.exists(os.path.join( + index_dir, "vector.index")), "vector.index not found ..." + assert os.path.exists(os.path.join( + index_dir, "id_map.pkl")), "id_map.pkl not found ... " + + self.Searcher = faiss.read_index( + os.path.join(index_dir, "vector.index")) + + with open(os.path.join(index_dir, "id_map.pkl"), "rb") as fd: + self.id_map = pickle.load(fd) + + def preprocess(self, input_dicts, data_id, log_id): + (_, input_dict), = input_dicts.items() + batch_size = len(input_dict.keys()) + imgs = [] + for key in input_dict.keys(): + data = base64.b64decode(input_dict[key].encode('utf8')) + data = np.fromstring(data, np.uint8) + im = cv2.imdecode(data, cv2.IMREAD_COLOR) + img = self.seq(im) + imgs.append(img[np.newaxis, :].copy()) + input_imgs = np.concatenate(imgs, axis=0) + return {"x": input_imgs}, False, None, "" + + def postprocess(self, input_dicts, fetch_dict, log_id): + score_list = fetch_dict["features"] + + return_top_k = 1 + scores, docs = self.Searcher.search(score_list, return_top_k) + + result = {} + result["label"] = self.id_map[docs[0][0]].split()[1] + result["dist"] = str(scores[0][0]) + return result, None, "" + +class ProductRecognitionService(WebService): + def get_pipeline_response(self, read_op): + image_op = RecogOp(name="recog", input_ops=[read_op]) + return image_op + +uci_service = ProductRecognitionService(name="recog_service") +uci_service.prepare_pipeline_config("config.yml") +uci_service.run_service()