serve.py 5.0 KB
Newer Older
G
guru4elephant 已提交
1
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
G
guru4elephant 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage:
    Host a trained paddle model with one line command
    Example:
G
guru4elephant 已提交
18
        python -m paddle_serving_server.serve --model ./serving_server_model --port 9292
G
guru4elephant 已提交
19
"""
G
guru4elephant 已提交
20
import argparse
G
guru4elephant 已提交
21
from .web_service import WebService
M
MRXLT 已提交
22
from flask import Flask, request
G
guru4elephant 已提交
23

B
barrierye 已提交
24 25

def parse_args():  # pylint: disable=doc-string-missing
G
guru4elephant 已提交
26
    parser = argparse.ArgumentParser("serve")
B
barrierye 已提交
27 28 29 30 31 32
    parser.add_argument(
        "--thread", type=int, default=10, help="Concurrency of server")
    parser.add_argument(
        "--model", type=str, default="", help="Model for serving")
    parser.add_argument(
        "--port", type=int, default=9292, help="Port the server")
G
guru4elephant 已提交
33 34
    parser.add_argument(
        "--name", type=str, default="None", help="Web service name")
B
barrierye 已提交
35 36 37 38 39 40 41
    parser.add_argument(
        "--workdir",
        type=str,
        default="workdir",
        help="Working dir of current service")
    parser.add_argument(
        "--device", type=str, default="cpu", help="Type of device")
M
MRXLT 已提交
42
    parser.add_argument(
M
MRXLT 已提交
43 44 45 46
        "--mem_optim",
        default=False,
        action="store_true",
        help="Memory optimize")
M
MRXLT 已提交
47
    parser.add_argument(
M
MRXLT 已提交
48 49 50
        "--ir_optim", default=False, action="store_true", help="Graph optimize")
    parser.add_argument(
        "--use_mkl", default=False, action="store_true", help="Use MKL")
M
MRXLT 已提交
51 52 53
    parser.add_argument(
        "--max_body_size",
        type=int,
M
bug fix  
MRXLT 已提交
54
        default=512 * 1024 * 1024,
M
MRXLT 已提交
55
        help="Limit sizes of messages")
G
guru4elephant 已提交
56 57
    return parser.parse_args()

B
barrierye 已提交
58 59

def start_standard_model():  # pylint: disable=doc-string-missing
G
guru4elephant 已提交
60 61 62 63 64 65
    args = parse_args()
    thread_num = args.thread
    model = args.model
    port = args.port
    workdir = args.workdir
    device = args.device
M
MRXLT 已提交
66
    mem_optim = args.mem_optim
M
MRXLT 已提交
67
    ir_optim = args.ir_optim
M
MRXLT 已提交
68
    max_body_size = args.max_body_size
M
MRXLT 已提交
69
    use_mkl = args.use_mkl
G
guru4elephant 已提交
70 71 72 73

    if model == "":
        print("You must specify your serving model")
        exit(-1)
G
guru4elephant 已提交
74 75 76 77 78 79 80 81 82 83 84 85

    import paddle_serving_server as serving
    op_maker = serving.OpMaker()
    read_op = op_maker.create('general_reader')
    general_infer_op = op_maker.create('general_infer')
    general_response_op = op_maker.create('general_response')

    op_seq_maker = serving.OpSeqMaker()
    op_seq_maker.add_op(read_op)
    op_seq_maker.add_op(general_infer_op)
    op_seq_maker.add_op(general_response_op)

G
guru4elephant 已提交
86
    server = serving.Server()
G
guru4elephant 已提交
87
    server.set_op_sequence(op_seq_maker.get_op_sequence())
G
guru4elephant 已提交
88
    server.set_num_threads(thread_num)
M
MRXLT 已提交
89
    server.set_memory_optimize(mem_optim)
M
MRXLT 已提交
90
    server.set_ir_optimize(ir_optim)
M
MRXLT 已提交
91
    server.use_mkl(use_mkl)
M
MRXLT 已提交
92
    server.set_max_body_size(max_body_size)
M
MRXLT 已提交
93
    server.set_port(port)
G
guru4elephant 已提交
94

G
guru4elephant 已提交
95 96
    server.load_model_config(model)
    server.prepare_server(workdir=workdir, port=port, device=device)
G
guru4elephant 已提交
97 98
    server.run_server()

B
barrierye 已提交
99

M
encry  
MRXLT 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import urllib
import json
import subprocess


class MainService(BaseHTTPRequestHandler):
    def _set_headers(self):
        self.send_response(200)
        self.send_header('Content-type', 'application/json')
        self.end_headers()

    def do_GET(self):
        response = {'status': 'SUCCESS', 'data': 'hello from server'}

        self._set_headers()
        self.wfile.write(json.dumps(response))

    def do_POST(self):
        path = self.path
        print(path)
        content_length = int(self.headers['Content-Length'])
        post_data = self.rfile.read(content_length)
        print(post_data)
        p = subprocess.popen(start_standard_model)
        response = {"endpoint_list": ["9292"]}
        self._set_headers()
        self.wfile.write(json.dumps(response))


G
guru4elephant 已提交
130
if __name__ == "__main__":
G
guru4elephant 已提交
131 132 133

    args = parse_args()
    if args.name == "None":
M
encry  
MRXLT 已提交
134 135 136 137
        #start_standard_model()
        server = HTTPServer(('', int(args.port)), MainService)
        server.serve_forever()

G
guru4elephant 已提交
138 139 140 141 142
    else:
        service = WebService(name=args.name)
        service.load_model_config(args.model)
        service.prepare_server(
            workdir=args.workdir, port=args.port, device=args.device)
M
MRXLT 已提交
143
        service.run_rpc_service()
M
MRXLT 已提交
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160

        app_instance = Flask(__name__)

        @app_instance.before_first_request
        def init():
            service._launch_web_service()

        service_name = "/" + service.name + "/prediction"

        @app_instance.route(service_name, methods=["POST"])
        def run():
            return service.get_prediction(request)

        app_instance.run(host="0.0.0.0",
                         port=service.port,
                         threaded=False,
                         processes=4)