提交 4799f112 编写于 作者: M MRXLT

update imagenet demo

上级 e6ff806e
......@@ -12,17 +12,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle_serving_server.plugin_service import PluginService
from paddle_serving_server.web_service import WebService
import sys
import cv2
import base64
from PIL import Image
from StringIO import StringIO
import numpy as np
from image_server import start_serving
class ImageService(PluginService):
class ImageService(WebService):
def set_param(self):
self.image_mean = [0.485, 0.456, 0.406]
self.image_std = [0.229, 0.224, 0.225]
......@@ -115,5 +112,7 @@ class ImageService(PluginService):
return res_feed, fetch
image_service = ImageService(name="image", model=sys.argv[1], port=9291)
image_service.start_service()
image_service = ImageService(name="image")
image_service.load_model_config(sys.argv[1])
image_service.prepare_server(workdir=sys.argv[2], port=9393, device="cpu")
image_service.run_server()
......@@ -21,7 +21,7 @@ import os
import numpy as np
def predict(image_path):
def predict(image_path, server):
image = open(image_path).read()
image = base64.b64encode(image)
......@@ -30,20 +30,19 @@ def predict(image_path):
req["fetch"] = ["score"]
req = json.dumps(req)
url = "http://127.0.0.1:9291/image/prediction"
url = server
headers = {"Content-Type": "application/json"}
r = requests.post(url, data=req, headers=headers)
if r.status_code == requests.codes.ok:
score = r.json()["score"]
score = np.array(score)
print("max score : {} class {}".format(np.max(score), np.argmax(score)))
print("picture {} max score : {} class {}".format(
image_path, np.max(score), np.argmax(score)))
else:
print("predict {} error".format(image_path))
if __name__ == "__main__":
folder = "./to_longteng/n01440764"
file_list = os.listdir(folder)
for f in file_list:
image_path = folder + "/" + f
predict(image_path)
server = "http://127.0.0.1:9393/image/prediction"
image_path = "./data/n01440764_10026.JPEG"
predict(image_path, server)
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from paddle_serving_server import OpMaker
from paddle_serving_server import OpSeqMaker
from paddle_serving_server import Server
def start_serving():
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
general_infer_op = op_maker.create('general_infer')
general_response_op = op_maker.create('general_response')
op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op)
op_seq_maker.add_op(general_infer_op)
op_seq_maker.add_op(general_response_op)
server = Server()
server.set_op_sequence(op_seq_maker.get_op_sequence())
server.set_num_threads(24)
server.load_model_config(sys.argv[1])
port = int(sys.argv[2])
server.prepare_server(workdir="work_dir1", port=port, device="cpu")
server.run_server()
if __name__ == "__main__":
start_serving()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册