提交 f5a809b5 编写于 作者: M MRXLT

add run_flask && fix HTTP demo

上级 61ed03a0
...@@ -36,3 +36,4 @@ bert_service.set_gpus(gpu_ids) ...@@ -36,3 +36,4 @@ bert_service.set_gpus(gpu_ids)
bert_service.prepare_server( bert_service.prepare_server(
workdir="workdir", port=int(sys.argv[2]), device="gpu") workdir="workdir", port=int(sys.argv[2]), device="gpu")
bert_service.run_server() bert_service.run_server()
bert_service.run_flask()
...@@ -47,3 +47,4 @@ image_service.load_model_config(sys.argv[1]) ...@@ -47,3 +47,4 @@ image_service.load_model_config(sys.argv[1])
image_service.prepare_server( image_service.prepare_server(
workdir=sys.argv[2], port=int(sys.argv[3]), device="cpu") workdir=sys.argv[2], port=int(sys.argv[3]), device="cpu")
image_service.run_server() image_service.run_server()
image_service.run_flask()
...@@ -12,12 +12,12 @@ ...@@ -12,12 +12,12 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from paddle_serving_server_gpu.web_service import WebService
import sys import sys
import cv2 import cv2
import base64 import base64
import numpy as np import numpy as np
from image_reader import ImageReader from image_reader import ImageReader
from paddle_serving_server_gpu.web_service import WebService
class ImageService(WebService): class ImageService(WebService):
...@@ -49,3 +49,4 @@ image_service.set_gpus("0,1") ...@@ -49,3 +49,4 @@ image_service.set_gpus("0,1")
image_service.prepare_server( image_service.prepare_server(
workdir=sys.argv[2], port=int(sys.argv[3]), device="gpu") workdir=sys.argv[2], port=int(sys.argv[3]), device="gpu")
image_service.run_server() image_service.run_server()
image_service.run_flask()
...@@ -39,3 +39,4 @@ imdb_service.prepare_server( ...@@ -39,3 +39,4 @@ imdb_service.prepare_server(
workdir=sys.argv[2], port=int(sys.argv[3]), device="cpu") workdir=sys.argv[2], port=int(sys.argv[3]), device="cpu")
imdb_service.prepare_dict({"dict_file_path": sys.argv[4]}) imdb_service.prepare_dict({"dict_file_path": sys.argv[4]})
imdb_service.run_server() imdb_service.run_server()
imdb_service.run_flask()
...@@ -108,6 +108,24 @@ class WebService(object): ...@@ -108,6 +108,24 @@ class WebService(object):
p_rpc = Process(target=self._launch_rpc_service) p_rpc = Process(target=self._launch_rpc_service)
p_rpc.start() p_rpc.start()
def run_flask(self):
app_instance = Flask(__name__)
@app_instance.before_first_request
def init():
self._launch_web_service()
service_name = "/" + self.name + "/prediction"
@app_instance.route(service_name, methods=["POST"])
def run():
return self.get_prediction(request)
app_instance.run(host="0.0.0.0",
port=self.port,
threaded=False,
processes=4)
def preprocess(self, feed={}, fetch=[]): def preprocess(self, feed={}, fetch=[]):
return feed, fetch return feed, fetch
......
...@@ -143,6 +143,24 @@ class WebService(object): ...@@ -143,6 +143,24 @@ class WebService(object):
for p in server_pros: for p in server_pros:
p.start() p.start()
def run_flask(self):
app_instance = Flask(__name__)
@app_instance.before_first_request
def init():
self._launch_web_service()
service_name = "/" + self.name + "/prediction"
@app_instance.route(service_name, methods=["POST"])
def run():
return self.get_prediction(request)
app_instance.run(host="0.0.0.0",
port=self.port,
threaded=False,
processes=4)
def preprocess(self, feed={}, fetch=[]): def preprocess(self, feed={}, fetch=[]):
return feed, fetch return feed, fetch
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册