diff --git a/python/examples/blazeface/README.md b/python/examples/blazeface/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f569841ce4a3ae69b1ff16041f7fb7d4617177f7 --- /dev/null +++ b/python/examples/blazeface/README.md @@ -0,0 +1,23 @@ +# Blazeface + +## Get Model +``` +python -m paddle_serving_app.package --get_model blazeface +tar -xzvf blazeface.tar.gz +``` + +## RPC Service + +### Start Service + +``` +python -m paddle_serving_server.serve --model serving_server --port 9494 +``` + +### Client Prediction + +``` +python test_client.py serving_client/serving_client_conf.prototxt test.jpg +``` + +the result is in `output` folder, including a json file and image file with bounding boxes. diff --git a/python/examples/blazeface/test_client.py b/python/examples/blazeface/test_client.py new file mode 100644 index 0000000000000000000000000000000000000000..27eb185ea90ce72641cef44d9066c46945ad2629 --- /dev/null +++ b/python/examples/blazeface/test_client.py @@ -0,0 +1,38 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from paddle_serving_client import Client +from paddle_serving_app.reader import * +import sys +import numpy as np + +preprocess = Sequential([ + File2Image(), + Normalize([104, 117, 123], [127.502231, 127.502231, 127.502231], False) +]) + +postprocess = BlazeFacePostprocess("label_list.txt", "output") +client = Client() + +client.load_client_config(sys.argv[1]) +client.connect(['127.0.0.1:9494']) + +im_0 = preprocess(sys.argv[2]) +tmp = Transpose((2, 0, 1)) +im = tmp(im_0) +fetch_map = client.predict( + feed={"image": im}, fetch=["detection_output_0.tmp_0"]) +fetch_map["image"] = sys.argv[2] +fetch_map["im_shape"] = im_0.shape +postprocess(fetch_map) diff --git a/python/paddle_serving_app/models/model_list.py b/python/paddle_serving_app/models/model_list.py index 79b3f91bd6584d17ddbc4124584cf40bd586b965..c5e78c25e554ef0438c90f489a3f726b4f87ceaa 100644 --- a/python/paddle_serving_app/models/model_list.py +++ b/python/paddle_serving_app/models/model_list.py @@ -24,7 +24,8 @@ class ServingModels(object): "SentimentAnalysis"] = ["senta_bilstm", "senta_bow", "senta_cnn"] self.model_dict["SemanticRepresentation"] = ["ernie"] self.model_dict["ChineseWordSegmentation"] = ["lac"] - self.model_dict["ObjectDetection"] = ["faster_rcnn", "yolov4"] + self.model_dict[ + "ObjectDetection"] = ["faster_rcnn", "yolov4", "blazeface"] self.model_dict["ImageSegmentation"] = [ "unet", "deeplabv3", "deeplabv3+cityscapes" ] diff --git a/python/paddle_serving_app/reader/functional.py b/python/paddle_serving_app/reader/functional.py index 4240641dd99fceb278ff60a5ba1dbb5275e534aa..7bab279c7f1aa71a2d55a8cb7b12bcb38607eb70 100644 --- a/python/paddle_serving_app/reader/functional.py +++ b/python/paddle_serving_app/reader/functional.py @@ -29,6 +29,7 @@ def normalize(img, mean, std, channel_first): else: img_mean = np.array(mean).reshape((1, 1, 3)) img_std = np.array(std).reshape((1, 1, 3)) + img = np.array(img).astype("float32") img -= img_mean img /= img_std return img diff --git a/python/paddle_serving_app/reader/image_reader.py b/python/paddle_serving_app/reader/image_reader.py index a44ca5de84da2bafce9b4cea37fb88095debabc6..096f46549af137cb04a87e26a3b28c8d42e33daa 100644 --- a/python/paddle_serving_app/reader/image_reader.py +++ b/python/paddle_serving_app/reader/image_reader.py @@ -440,6 +440,30 @@ class RCNNPostprocess(object): self.label_file, self.output_dir) +class BlazeFacePostprocess(RCNNPostprocess): + def clip_bbox(self, bbox, im_size=None): + h = 1. if im_size is None else im_size[0] + w = 1. if im_size is None else im_size[1] + xmin = max(min(bbox[0], w), 0.) + ymin = max(min(bbox[1], h), 0.) + xmax = max(min(bbox[2], w), 0.) + ymax = max(min(bbox[3], h), 0.) + return xmin, ymin, xmax, ymax + + def _get_bbox_result(self, fetch_map, fetch_name, clsid2catid): + result = {} + is_bbox_normalized = True #for blaze face, set true here + output = fetch_map[fetch_name] + lod = [fetch_map[fetch_name + '.lod']] + lengths = self._offset_to_lengths(lod) + np_data = np.array(output) + result['bbox'] = (np_data, lengths) + result['im_id'] = np.array([[0]]) + result["im_shape"] = np.array(fetch_map["im_shape"]).astype(np.int32) + bbox_results = self._bbox2out([result], clsid2catid, is_bbox_normalized) + return bbox_results + + class Sequential(object): """ Args: