serve.py 8.2 KB
Newer Older
G
guru4elephant 已提交
1
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
G
guru4elephant 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage:
    Host a trained paddle model with one line command
    Example:
G
guru4elephant 已提交
18
        python -m paddle_serving_server.serve --model ./serving_server_model --port 9292
G
guru4elephant 已提交
19
"""
G
guru4elephant 已提交
20
import argparse
H
HexToString 已提交
21 22 23 24 25
import sys
import json
import base64
import time
from multiprocessing import Process
T
TeslaZhao 已提交
26
from .web_service import WebService, port_is_available
M
MRXLT 已提交
27
from flask import Flask, request
W
wangjiawei04 已提交
28
import sys
T
TeslaZhao 已提交
29 30 31 32
if sys.version_info.major == 2:
    from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
elif sys.version_info.major == 3:
    from http.server import BaseHTTPRequestHandler, HTTPServer
G
guru4elephant 已提交
33

B
barrierye 已提交
34 35

def parse_args():  # pylint: disable=doc-string-missing
G
guru4elephant 已提交
36
    parser = argparse.ArgumentParser("serve")
B
barrierye 已提交
37 38 39
    parser.add_argument(
        "--thread", type=int, default=10, help="Concurrency of server")
    parser.add_argument(
H
HexToString 已提交
40
        "--model", type=str, default="", nargs="+", help="Model for serving")
B
barrierye 已提交
41 42
    parser.add_argument(
        "--port", type=int, default=9292, help="Port the server")
G
guru4elephant 已提交
43 44
    parser.add_argument(
        "--name", type=str, default="None", help="Web service name")
B
barrierye 已提交
45 46 47 48 49 50 51
    parser.add_argument(
        "--workdir",
        type=str,
        default="workdir",
        help="Working dir of current service")
    parser.add_argument(
        "--device", type=str, default="cpu", help="Type of device")
M
MRXLT 已提交
52
    parser.add_argument(
M
MRXLT 已提交
53
        "--mem_optim_off",
M
MRXLT 已提交
54 55 56
        default=False,
        action="store_true",
        help="Memory optimize")
M
MRXLT 已提交
57
    parser.add_argument(
M
MRXLT 已提交
58 59 60
        "--ir_optim", default=False, action="store_true", help="Graph optimize")
    parser.add_argument(
        "--use_mkl", default=False, action="store_true", help="Use MKL")
M
MRXLT 已提交
61 62 63
    parser.add_argument(
        "--max_body_size",
        type=int,
M
bug fix  
MRXLT 已提交
64
        default=512 * 1024 * 1024,
M
MRXLT 已提交
65
        help="Limit sizes of messages")
H
HexToString 已提交
66 67 68 69 70
    parser.add_argument(
        "--use_encryption_model",
        default=False,
        action="store_true",
        help="Use encryption model")
B
barrierye 已提交
71 72 73 74 75
    parser.add_argument(
        "--use_multilang",
        default=False,
        action="store_true",
        help="Use Multi-language-service")
T
TeslaZhao 已提交
76 77 78 79 80 81 82 83 84 85
    parser.add_argument(
        "--product_name",
        type=str,
        default=None,
        help="product_name for authentication")
    parser.add_argument(
        "--container_id",
        type=str,
        default=None,
        help="container_id for authentication")
G
guru4elephant 已提交
86 87
    return parser.parse_args()

B
barrierye 已提交
88

H
HexToString 已提交
89
def start_standard_model(serving_port):  # pylint: disable=doc-string-missing
G
guru4elephant 已提交
90 91 92
    args = parse_args()
    thread_num = args.thread
    model = args.model
H
HexToString 已提交
93
    port = serving_port
G
guru4elephant 已提交
94 95
    workdir = args.workdir
    device = args.device
M
MRXLT 已提交
96
    mem_optim = args.mem_optim_off is False
M
MRXLT 已提交
97
    ir_optim = args.ir_optim
M
MRXLT 已提交
98
    max_body_size = args.max_body_size
M
MRXLT 已提交
99
    use_mkl = args.use_mkl
H
HexToString 已提交
100
    use_encryption_model = args.use_encryption_model
B
barrierye 已提交
101
    use_multilang = args.use_multilang
G
guru4elephant 已提交
102 103 104 105

    if model == "":
        print("You must specify your serving model")
        exit(-1)
G
guru4elephant 已提交
106 107 108 109

    import paddle_serving_server as serving
    op_maker = serving.OpMaker()
    op_seq_maker = serving.OpSeqMaker()
H
HexToString 已提交
110 111

    read_op = op_maker.create('general_reader')
G
guru4elephant 已提交
112
    op_seq_maker.add_op(read_op)
H
HexToString 已提交
113 114 115 116

    for idx, single_model in enumerate(model):
        infer_op_name = "general_infer"
        if len(model) == 2 and idx == 0:
H
HexToString 已提交
117
            infer_op_name = "general_detection"
H
HexToString 已提交
118 119 120 121 122 123
        else:
            infer_op_name = "general_infer"
        general_infer_op = op_maker.create(infer_op_name)
        op_seq_maker.add_op(general_infer_op)
    
    general_response_op = op_maker.create('general_response')
G
guru4elephant 已提交
124 125
    op_seq_maker.add_op(general_response_op)

H
HexToString 已提交
126

B
barrierye 已提交
127 128 129 130 131
    server = None
    if use_multilang:
        server = serving.MultiLangServer()
    else:
        server = serving.Server()
G
guru4elephant 已提交
132
    server.set_op_sequence(op_seq_maker.get_op_sequence())
G
guru4elephant 已提交
133
    server.set_num_threads(thread_num)
M
MRXLT 已提交
134
    server.set_memory_optimize(mem_optim)
M
MRXLT 已提交
135
    server.set_ir_optimize(ir_optim)
M
MRXLT 已提交
136
    server.use_mkl(use_mkl)
M
MRXLT 已提交
137
    server.set_max_body_size(max_body_size)
M
MRXLT 已提交
138
    server.set_port(port)
H
HexToString 已提交
139
    server.use_encryption_model(use_encryption_model)
140 141 142 143
    if args.product_name != None:
        server.set_product_name(args.product_name)
    if args.container_id != None:
        server.set_container_id(args.container_id)
G
guru4elephant 已提交
144

G
guru4elephant 已提交
145 146
    server.load_model_config(model)
    server.prepare_server(workdir=workdir, port=port, device=device)
G
guru4elephant 已提交
147 148
    server.run_server()

W
wangjiawei04 已提交
149

H
HexToString 已提交
150 151 152 153 154 155 156 157 158 159 160 161 162 163
class MainService(BaseHTTPRequestHandler):
    def get_available_port(self):
        default_port = 12000
        for i in range(1000):
            if port_is_available(default_port + i):
                return default_port + i

    def start_serving(self):
        start_standard_model(serving_port)

    def get_key(self, post_data):
        if "key" not in post_data:
            return False
        else:
H
HexToString 已提交
164 165
            key = base64.b64decode(post_data["key"].encode())
            with open(args.model + "/key", "wb") as f:
H
HexToString 已提交
166 167 168 169 170 171 172
                f.write(key)
            return True

    def check_key(self, post_data):
        if "key" not in post_data:
            return False
        else:
H
HexToString 已提交
173 174
            key = base64.b64decode(post_data["key"].encode())
            with open(args.model + "/key", "rb") as f:
H
HexToString 已提交
175 176 177 178
                cur_key = f.read()
            return (key == cur_key)

    def start(self, post_data):
H
HexToString 已提交
179
        post_data = json.loads(post_data.decode('utf-8'))
H
HexToString 已提交
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
        global p_flag
        if not p_flag:
            if args.use_encryption_model:
                print("waiting key for model")
                if not self.get_key(post_data):
                    print("not found key in request")
                    return False
            global serving_port
            global p
            serving_port = self.get_available_port()
            p = Process(target=self.start_serving)
            p.start()
            time.sleep(3)
            if p.is_alive():
                p_flag = True
            else:
                return False
        else:
            if p.is_alive():
                if not self.check_key(post_data):
                    return False
            else:
                return False
        return True

    def do_POST(self):
        content_length = int(self.headers['Content-Length'])
        post_data = self.rfile.read(content_length)
        if self.start(post_data):
            response = {"endpoint_list": [serving_port]}
        else:
            response = {"message": "start serving failed"}
        self.send_response(200)
        self.send_header('Content-type', 'application/json')
        self.end_headers()
H
HexToString 已提交
215
        self.wfile.write(json.dumps(response).encode())
B
barrierye 已提交
216

W
wangjiawei04 已提交
217

M
MRXLT 已提交
218
if __name__ == "__main__":
219

M
MRXLT 已提交
220
    args = parse_args()
G
guru4elephant 已提交
221
    if args.name == "None":
H
HexToString 已提交
222 223 224 225 226 227 228 229 230 231 232
        if args.use_encryption_model:
            p_flag = False
            p = None
            serving_port = 0
            server = HTTPServer(('localhost', int(args.port)), MainService)
            print(
                'Starting encryption server, waiting for key from client, use <Ctrl-C> to stop'
            )
            server.serve_forever()
        else:
            start_standard_model(args.port)
G
guru4elephant 已提交
233 234 235 236 237
    else:
        service = WebService(name=args.name)
        service.load_model_config(args.model)
        service.prepare_server(
            workdir=args.workdir, port=args.port, device=args.device)
M
MRXLT 已提交
238
        service.run_rpc_service()
M
MRXLT 已提交
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255

        app_instance = Flask(__name__)

        @app_instance.before_first_request
        def init():
            service._launch_web_service()

        service_name = "/" + service.name + "/prediction"

        @app_instance.route(service_name, methods=["POST"])
        def run():
            return service.get_prediction(request)

        app_instance.run(host="0.0.0.0",
                         port=service.port,
                         threaded=False,
                         processes=4)