From ece6b883c444588497023d58a792ff9d67b4d2ae Mon Sep 17 00:00:00 2001 From: guru4elephant Date: Tue, 17 Mar 2020 11:12:48 +0800 Subject: [PATCH] try to add multiple gpu card for web service --- .../paddle_serving_server_gpu/web_service.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/python/paddle_serving_server_gpu/web_service.py b/python/paddle_serving_server_gpu/web_service.py index fbb52470..8c3ae58d 100755 --- a/python/paddle_serving_server_gpu/web_service.py +++ b/python/paddle_serving_server_gpu/web_service.py @@ -1,4 +1,4 @@ -# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +g # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -24,6 +24,10 @@ import time import random +def producers(input_queue, output_queue, endpoint): + pass + + class WebService(object): def __init__(self, name="default_service"): self.name = name @@ -100,6 +104,18 @@ class WebService(object): time.sleep(1) service_name = "/" + self.name + "/prediction" + input_queues = [] + output_queue = Queue() + for i in range(gpu_num): + input_queues.append(Queue()) + + @app_instance.route("{}_batch".format(service_name), methods['POST']) + def get_prediction(): + if not request.json: + abort(400) + if "fetch" not in request.json: + abort(400) + @app_instance.route(service_name, methods=['POST']) def get_prediction(): if not request.json: -- GitLab