serving.py 15.9 KB
Newer Older
走神的阿圆's avatar
走神的阿圆 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
# coding:utf-8
# Copyright (c) 2019  PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import os
走神的阿圆's avatar
走神的阿圆 已提交
22 23
import platform
import socket
走神的阿圆's avatar
走神的阿圆 已提交
24 25 26
import json
import paddlehub as hub
from paddlehub.commands.base_command import BaseCommand, ENTRY
27
from paddlehub.serving import app_single as app
走神的阿圆's avatar
走神的阿圆 已提交
28 29
from paddlehub.common.dir import CONF_HOME
from paddlehub.common.hub_server import CacheUpdater
30
import multiprocessing
走神的阿圆's avatar
走神的阿圆 已提交
31 32
import time
import signal
33

走神的阿圆's avatar
走神的阿圆 已提交
34
if platform.system() == "Windows":
35

走神的阿圆's avatar
走神的阿圆 已提交
36 37 38
    class StandaloneApplication(object):
        def __init__(self):
            pass
39

走神的阿圆's avatar
走神的阿圆 已提交
40 41
        def load_config(self):
            pass
42

走神的阿圆's avatar
走神的阿圆 已提交
43 44 45 46
        def load(self):
            pass
else:
    import gunicorn.app.base
47

走神的阿圆's avatar
走神的阿圆 已提交
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
    class StandaloneApplication(gunicorn.app.base.BaseApplication):
        def __init__(self, app, options=None):
            self.options = options or {}
            self.application = app
            super(StandaloneApplication, self).__init__()

        def load_config(self):
            config = {
                key: value
                for key, value in self.options.items()
                if key in self.cfg.settings and value is not None
            }
            for key, value in config.items():
                self.cfg.set(key.lower(), value)

        def load(self):
            return self.application


def number_of_workers():
    return (multiprocessing.cpu_count() * 2) + 1
69

走神的阿圆's avatar
走神的阿圆 已提交
70 71 72 73 74 75 76 77

def pid_is_exist(pid):
    try:
        os.kill(pid, 0)
    except:
        return False
    else:
        return True
走神的阿圆's avatar
走神的阿圆 已提交
78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93


class ServingCommand(BaseCommand):
    name = "serving"
    module_list = []

    def __init__(self, name):
        super(ServingCommand, self).__init__(name)
        self.show_in_help = True
        self.description = "Start a service for online predicting by using PaddleHub."
        self.parser = argparse.ArgumentParser(
            description=self.__class__.__doc__,
            prog='%s %s [COMMAND]' % (ENTRY, name),
            usage='%(prog)s',
            add_help=True)
        self.parser.add_argument("command")
走神的阿圆's avatar
走神的阿圆 已提交
94
        self.parser.add_argument("sub_command")
走神的阿圆's avatar
走神的阿圆 已提交
95
        self.parser.add_argument("bert_service", nargs="?")
走神的阿圆's avatar
走神的阿圆 已提交
96 97 98 99
        self.sub_parse = self.parser.add_mutually_exclusive_group(
            required=False)
        self.parser.add_argument(
            "--use_gpu", action="store_true", default=False)
走神的阿圆's avatar
走神的阿圆 已提交
100 101
        self.parser.add_argument(
            "--use_multiprocess", action="store_true", default=False)
走神的阿圆's avatar
走神的阿圆 已提交
102
        self.parser.add_argument("--modules", "-m", nargs="+")
走神的阿圆's avatar
走神的阿圆 已提交
103 104 105 106
        self.parser.add_argument("--config", "-c", nargs="?")
        self.parser.add_argument("--port", "-p", nargs="?", default=8866)
        self.parser.add_argument("--gpu", "-i", nargs="?", default=0)

走神的阿圆's avatar
走神的阿圆 已提交
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
    def dump_pid_file(self):
        pid = os.getpid()
        filepath = os.path.join(CONF_HOME,
                                "serving_" + self.args.port + ".json")
        if os.path.exists(filepath):
            os.remove(filepath)
        with open(filepath, "w") as fp:
            info = {
                "pid": pid,
                "module": self.args.modules,
                "start_time": time.time()
            }
            json.dump(info, fp)

    @staticmethod
    def load_pid_file(filepath, port=None):
        if port is None:
            port = os.path.basename(filepath).split(".")[0].split("_")[1]
        if not os.path.exists(filepath):
            print(
                "PaddleHub-Serving config file is not exists, "
                "please confirm the port [%s] you specified is correct." % port)
            return False
        with open(filepath, "r") as fp:
            info = json.load(fp)
            return info

    def stop_serving(self, port):
        filepath = os.path.join(CONF_HOME, "serving_" + str(port) + ".json")
        info = self.load_pid_file(filepath, port)
        if info is False:
            return
        pid = info["pid"]
        module = info["module"]
        start_time = info["start_time"]
        if os.path.exists(filepath):
            os.remove(filepath)

        if not pid_is_exist(pid):
            print("PaddleHub-Serving has been stopped.")
            return
        print("PaddleHub-Serving will stop.")
        CacheUpdater(
            "hub_serving_stop",
            module=module,
            addition={
                "period_time": time.time() - start_time
            }).start()
        if platform.system() == "Windows":
            os.kill(pid, signal.SIGTERM)
        else:
            os.killpg(pid, signal.SIGTERM)

走神的阿圆's avatar
走神的阿圆 已提交
160 161 162 163 164 165 166 167 168 169 170 171 172
    @staticmethod
    def start_bert_serving(args):
        if platform.system() != "Linux":
            print("Error. Bert-Service only support linux.")
            return False

        if ServingCommand.is_port_occupied("127.0.0.1", args.port) is True:
            print("Port %s is occupied, please change it." % args.port)
            return False

        from paddle_gpu_serving.run import BertServer
        bs = BertServer(with_gpu=args.use_gpu)
        bs.with_model(model_name=args.modules[0])
M
MRXLT 已提交
173
        bs.run(gpu_index=args.gpu, port=int(args.port))
走神的阿圆's avatar
走神的阿圆 已提交
174 175 176 177 178 179 180 181 182 183

    @staticmethod
    def is_port_occupied(ip, port):
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        try:
            s.connect((ip, int(port)))
            s.shutdown(2)
            return True
        except:
            return False
走神的阿圆's avatar
走神的阿圆 已提交
184 185 186 187

    @staticmethod
    def preinstall_modules(modules):
        configs = []
走神的阿圆's avatar
走神的阿圆 已提交
188
        module_exist = {}
走神的阿圆's avatar
走神的阿圆 已提交
189 190 191 192 193 194
        if modules is not None:
            for module in modules:
                module_name = module if "==" not in module else \
                module.split("==")[0]
                module_version = None if "==" not in module else \
                module.split("==")[1]
走神的阿圆's avatar
走神的阿圆 已提交
195 196 197 198 199
                if module_exist.get(module_name, "") != "":
                    print(module_name, "==", module_exist.get(module_name),
                          " will be ignored cause new version is specified.")
                    configs.pop()
                module_exist.update({module_name: module_version})
走神的阿圆's avatar
走神的阿圆 已提交
200
                try:
走神的阿圆's avatar
走神的阿圆 已提交
201 202 203 204
                    CacheUpdater(
                        "hub_serving_start",
                        module=module_name,
                        version=module_version).start()
走神的阿圆's avatar
走神的阿圆 已提交
205
                    m = hub.Module(name=module_name, version=module_version)
走神的阿圆's avatar
走神的阿圆 已提交
206 207 208 209
                    method_name = m.desc.attr.map.data['default_signature'].s
                    if method_name == "":
                        raise RuntimeError("{} cannot be use for "
                                           "predicting".format(module_name))
走神的阿圆's avatar
走神的阿圆 已提交
210 211 212 213 214 215
                    configs.append({
                        "module": module_name,
                        "version": m.version,
                        "category": str(m.type).split("/")[0].upper()
                    })
                except Exception as err:
走神的阿圆's avatar
走神的阿圆 已提交
216 217
                    print(err, ", start Hub-Serving unsuccessfully.")
                    exit(1)
走神的阿圆's avatar
走神的阿圆 已提交
218 219
            return configs

走神的阿圆's avatar
走神的阿圆 已提交
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
    def start_app_with_file(self, configs, workers):
        port = configs.get("port", 8866)
        if ServingCommand.is_port_occupied("127.0.0.1", port) is True:
            print("Port %s is occupied, please change it." % port)
            return False

        modules = configs.get("modules_info")
        module = [str(i["module"]) + "==" + str(i["version"]) for i in modules]
        module_info = ServingCommand.preinstall_modules(module)
        for index in range(len(module_info)):
            modules[index].update(module_info[index])
        options = {
            "bind": "0.0.0.0:%s" % port,
            "workers": workers,
            "pid": "./pid.txt"
        }

        configs["modules_info"] = modules
        self.dump_pid_file()
        StandaloneApplication(
            app.create_app(init_flag=False, configs=configs), options).run()

走神的阿圆's avatar
走神的阿圆 已提交
242
    def start_single_app_with_file(self, configs):
243 244 245 246 247 248 249 250 251 252
        use_gpu = configs.get("use_gpu", False)
        port = configs.get("port", 8866)
        if ServingCommand.is_port_occupied("127.0.0.1", port) is True:
            print("Port %s is occupied, please change it." % port)
            return False
        configs = configs.get("modules_info")
        module = [str(i["module"]) + "==" + str(i["version"]) for i in configs]
        module_info = ServingCommand.preinstall_modules(module)
        for index in range(len(module_info)):
            configs[index].update(module_info[index])
走神的阿圆's avatar
走神的阿圆 已提交
253
        self.dump_pid_file()
254 255 256 257 258 259 260 261 262
        app.run(use_gpu, configs=configs, port=port)

    @staticmethod
    def start_multi_app_with_file(configs):
        port = configs.get("port", 8866)
        if ServingCommand.is_port_occupied("127.0.0.1", port) is True:
            print("Port %s is occupied, please change it." % port)
            return False
        workers = configs.get("workers", number_of_workers())
263
        options = {"bind": "0.0.0.0:%s" % port, "workers": workers}
264 265 266 267
        StandaloneApplication(
            app.create_app(init_flag=False, configs=configs), options).run()
        print("PaddleHub-Serving has been stopped.")

走神的阿圆's avatar
走神的阿圆 已提交
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
    def start_app_with_args(self, workers):
        module = self.args.modules
        if module is not None:
            use_gpu = self.args.use_gpu
            port = self.args.port
            if ServingCommand.is_port_occupied("127.0.0.1", port) is True:
                print("Port %s is occupied, please change it." % port)
                return False
            module_info = ServingCommand.preinstall_modules(module)
            [
                item.update({
                    "batch_size": 1,
                    "queue_size": 20
                }) for item in module_info
            ]
            options = {"bind": "0.0.0.0:%s" % port, "workers": workers}
            configs = {"use_gpu": use_gpu, "modules_info": module_info}
            self.dump_pid_file()
            StandaloneApplication(
                app.create_app(init_flag=False, configs=configs),
                options).run()
        else:
            print("Lack of necessary parameters!")

292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
    def start_single_app_with_args(self):
        module = self.args.modules
        if module is not None:
            use_gpu = self.args.use_gpu
            port = self.args.port
            if ServingCommand.is_port_occupied("127.0.0.1", port) is True:
                print("Port %s is occupied, please change it." % port)
                return False
            module_info = ServingCommand.preinstall_modules(module)
            [
                item.update({
                    "batch_size": 1,
                    "queue_size": 20
                }) for item in module_info
            ]
走神的阿圆's avatar
走神的阿圆 已提交
307
            self.dump_pid_file()
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
            app.run(use_gpu, configs=module_info, port=port)
        else:
            print("Lack of necessary parameters!")

    def start_multi_app_with_args(self):
        module = self.args.modules
        if module is not None:
            use_gpu = self.args.use_gpu
            port = self.args.port
            workers = number_of_workers()
            if ServingCommand.is_port_occupied("127.0.0.1", port) is True:
                print("Port %s is occupied, please change it." % port)
                return False
            module_info = ServingCommand.preinstall_modules(module)
            [
                item.update({
                    "batch_size": 1,
                    "queue_size": 20
                }) for item in module_info
            ]
328
            options = {"bind": "0.0.0.0:%s" % port, "workers": workers}
329 330 331 332 333 334 335 336 337 338
            configs = {"use_gpu": use_gpu, "modules_info": module_info}
            StandaloneApplication(
                app.create_app(init_flag=False, configs=configs),
                options).run()
            print("PaddleHub-Serving has been stopped.")
        else:
            print("Lack of necessary parameters!")

    def start_serving(self):
        config_file = self.args.config
走神的阿圆's avatar
走神的阿圆 已提交
339 340 341 342
        if config_file is not None:
            if os.path.exists(config_file):
                with open(config_file, "r") as fp:
                    configs = json.load(fp)
走神的阿圆's avatar
走神的阿圆 已提交
343
                    use_multiprocess = configs.get("use_multiprocess", False)
走神的阿圆's avatar
走神的阿圆 已提交
344 345 346 347 348
                    if platform.system() == "Windows":
                        print(
                            "Warning: Windows cannot use multiprocess working "
                            "mode, Hub-Serving will switch to single process mode"
                        )
349
                        ServingCommand.start_single_app_with_file(configs)
走神的阿圆's avatar
走神的阿圆 已提交
350 351 352 353 354 355 356
                    else:
                        if use_multiprocess is True:
                            self.start_app_with_file(configs,
                                                     number_of_workers())
                        else:
                            self.start_app_with_file(configs, 1)

走神的阿圆's avatar
走神的阿圆 已提交
357 358 359
            else:
                print("config_file ", config_file, "not exists.")
        else:
走神的阿圆's avatar
走神的阿圆 已提交
360 361 362
            if platform.system() == "Windows":
                print("Warning: Windows cannot use multiprocess working "
                      "mode, Hub-Serving will switch to single process mode")
363
                self.start_single_app_with_args()
走神的阿圆's avatar
走神的阿圆 已提交
364 365 366 367 368
            else:
                if self.args.use_multiprocess is True:
                    self.start_app_with_args(number_of_workers())
                else:
                    self.start_app_with_args(1)
走神的阿圆's avatar
走神的阿圆 已提交
369 370 371 372 373

    @staticmethod
    def show_help():
        str = "serving <option>\n"
        str += "\tManage PaddleHub-Serving.\n"
走神的阿圆's avatar
走神的阿圆 已提交
374
        str += "sub command:\n"
走神的阿圆's avatar
走神的阿圆 已提交
375
        str += "1. start\n"
走神的阿圆's avatar
走神的阿圆 已提交
376
        str += "\tStart PaddleHub-Serving if specifies this parameter.\n"
走神的阿圆's avatar
走神的阿圆 已提交
377
        str += "2. start bert_service\n"
走神的阿圆's avatar
走神的阿圆 已提交
378
        str += "\tStart Bert Service if specifies this parameter.\n"
走神的阿圆's avatar
走神的阿圆 已提交
379
        str += "[start] option:\n"
走神的阿圆's avatar
走神的阿圆 已提交
380 381 382 383 384 385
        str += "--modules/-m [module1==version, module2==version...]\n"
        str += "\tPre-install modules via this parameter list.\n"
        str += "--port/-p XXXX\n"
        str += "\tUse port XXXX for serving.\n"
        str += "--use_gpu\n"
        str += "\tUse gpu for predicting if specifies this parameter.\n"
走神的阿圆's avatar
走神的阿圆 已提交
386 387
        str += "--use_multiprocess\n"
        str += "\tChoose multoprocess mode, cannot be use on Windows.\n"
走神的阿圆's avatar
走神的阿圆 已提交
388
        str += "--config/-c file_path\n"
走神的阿圆's avatar
走神的阿圆 已提交
389
        str += "\tUse configs in file to starting paddlehub serving. "
走神的阿圆's avatar
走神的阿圆 已提交
390
        str += "Other parameter will be ignored if specifies this parameter.\n"
走神的阿圆's avatar
走神的阿圆 已提交
391 392 393 394 395 396 397 398 399
        str += "[start bert_service] option:\n"
        str += "--modules/-m\n"
        str += "\tPre-install modules via this parameter.\n"
        str += "--port/-p XXXX\n"
        str += "\tUse port XXXX for serving.\n"
        str += "--use_gpu\n"
        str += "\tUse gpu for predicting if specifies this parameter.\n"
        str += "--gpu\n"
        str += "\tSpecify the graphics card to use.\n"
走神的阿圆's avatar
走神的阿圆 已提交
400 401 402
        print(str)

    def execute(self, argv):
走神的阿圆's avatar
走神的阿圆 已提交
403
        try:
404
            self.args = self.parser.parse_args()
走神的阿圆's avatar
走神的阿圆 已提交
405 406 407
        except:
            ServingCommand.show_help()
            return False
408 409 410 411 412
        if self.args.sub_command == "start":
            if self.args.bert_service == "bert_service":
                ServingCommand.start_bert_serving(self.args)
            elif self.args.bert_service is None:
                self.start_serving()
413 414
            else:
                ServingCommand.show_help()
走神的阿圆's avatar
走神的阿圆 已提交
415 416 417 418 419
        elif self.args.sub_command == "stop":
            if self.args.bert_service == "bert_service":
                print("Please stop bert_service by kill process by yourself")
            elif self.args.bert_service is None:
                self.stop_serving(port=self.args.port)
走神的阿圆's avatar
走神的阿圆 已提交
420 421 422 423 424
        else:
            ServingCommand.show_help()


command = ServingCommand.instance()