__init__.py 18.7 KB
Newer Older
M
MRXLT 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
B
barrierye 已提交
14
# pylint: disable=doc-string-missing
M
MRXLT 已提交
15 16 17 18 19 20

import os
from .proto import server_configure_pb2 as server_sdk
from .proto import general_model_config_pb2 as m_config
import google.protobuf.text_format
import tarfile
M
MRXLT 已提交
21
import socket
22
import paddle_serving_server_gpu as paddle_serving_server
23
import time
24
from .version import serving_server_version
M
MRXLT 已提交
25
from contextlib import closing
G
guru4elephant 已提交
26
import argparse
B
barrierye 已提交
27
import collections
M
MRXLT 已提交
28
import fcntl
M
MRXLT 已提交
29

B
barrierye 已提交
30

31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
def serve_args():
    parser = argparse.ArgumentParser("serve")
    parser.add_argument(
        "--thread", type=int, default=10, help="Concurrency of server")
    parser.add_argument(
        "--model", type=str, default="", help="Model for serving")
    parser.add_argument(
        "--port", type=int, default=9292, help="Port of the starting gpu")
    parser.add_argument(
        "--workdir",
        type=str,
        default="workdir",
        help="Working dir of current service")
    parser.add_argument(
        "--device", type=str, default="gpu", help="Type of device")
B
barrierye 已提交
46
    parser.add_argument("--gpu_ids", type=str, default="", help="gpu ids")
47
    parser.add_argument(
48
        "--name", type=str, default="None", help="Default service name")
M
MRXLT 已提交
49 50
    parser.add_argument(
        "--mem_optim", type=bool, default=False, help="Memory optimize")
M
MRXLT 已提交
51 52
    parser.add_argument(
        "--ir_optim", type=bool, default=False, help="Graph optimize")
M
MRXLT 已提交
53 54 55
    parser.add_argument(
        "--max_body_size",
        type=int,
M
MRXLT 已提交
56
        default=512 * 1024 * 1024,
M
MRXLT 已提交
57
        help="Limit sizes of messages")
58
    return parser.parse_args()
M
MRXLT 已提交
59

B
barrierye 已提交
60

M
MRXLT 已提交
61 62 63
class OpMaker(object):
    def __init__(self):
        self.op_dict = {
M
MRXLT 已提交
64 65 66 67 68 69
            "general_infer": "GeneralInferOp",
            "general_reader": "GeneralReaderOp",
            "general_response": "GeneralResponseOp",
            "general_text_reader": "GeneralTextReaderOp",
            "general_text_response": "GeneralTextResponseOp",
            "general_single_kv": "GeneralSingleKVOp",
W
wangjiawei04 已提交
70
            "general_dist_kv_infer": "GeneralDistKVInferOp",
M
MRXLT 已提交
71
            "general_dist_kv": "GeneralDistKVOp"
M
MRXLT 已提交
72
        }
B
barrierye 已提交
73
        self.node_name_suffix_ = collections.defaultdict(int)
M
MRXLT 已提交
74

B
barrierye 已提交
75 76 77 78
    def create(self, node_type, engine_name=None, inputs=[], outputs=[]):
        if node_type not in self.op_dict:
            raise Exception("Op type {} is not supported right now".format(
                node_type))
M
MRXLT 已提交
79
        node = server_sdk.DAGNode()
B
barrierye 已提交
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
        # node.name will be used as the infer engine name
        if engine_name:
            node.name = engine_name
        else:
            node.name = '{}_{}'.format(node_type,
                                       self.node_name_suffix_[node_type])
            self.node_name_suffix_[node_type] += 1

        node.type = self.op_dict[node_type]
        if inputs:
            for dep_node_str in inputs:
                dep_node = server_sdk.DAGNode()
                google.protobuf.text_format.Parse(dep_node_str, dep_node)
                dep = server_sdk.DAGNodeDependency()
                dep.name = dep_node.name
                dep.mode = "RO"
                node.dependencies.extend([dep])
        # Because the return value will be used as the key value of the
        # dict, and the proto object is variable which cannot be hashed,
        # so it is processed into a string. This has little effect on
        # overall efficiency.
        return google.protobuf.text_format.MessageToString(node)
M
MRXLT 已提交
102 103 104 105 106 107 108 109


class OpSeqMaker(object):
    def __init__(self):
        self.workflow = server_sdk.Workflow()
        self.workflow.name = "workflow1"
        self.workflow.workflow_type = "Sequence"

B
barrierye 已提交
110 111 112 113 114 115 116
    def add_op(self, node_str):
        node = server_sdk.DAGNode()
        google.protobuf.text_format.Parse(node_str, node)
        if len(node.dependencies) > 1:
            raise Exception(
                'Set more than one predecessor for op in OpSeqMaker is not allowed.'
            )
M
MRXLT 已提交
117
        if len(self.workflow.nodes) >= 1:
B
barrierye 已提交
118 119 120 121 122 123 124 125 126 127 128
            if len(node.dependencies) == 0:
                dep = server_sdk.DAGNodeDependency()
                dep.name = self.workflow.nodes[-1].name
                dep.mode = "RO"
                node.dependencies.extend([dep])
            elif len(node.dependencies) == 1:
                if node.dependencies[0].name != self.workflow.nodes[-1].name:
                    raise Exception(
                        'You must add op in order in OpSeqMaker. The previous op is {}, but the current op is followed by {}.'.
                        format(node.dependencies[0].name, self.workflow.nodes[
                            -1].name))
M
MRXLT 已提交
129 130 131 132 133 134 135 136
        self.workflow.nodes.extend([node])

    def get_op_sequence(self):
        workflow_conf = server_sdk.WorkflowConf()
        workflow_conf.workflows.extend([self.workflow])
        return workflow_conf


B
barrierye 已提交
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
class OpGraphMaker(object):
    def __init__(self):
        self.workflow = server_sdk.Workflow()
        self.workflow.name = "workflow1"
        # Currently, SDK only supports "Sequence"
        self.workflow.workflow_type = "Sequence"

    def add_op(self, node_str):
        node = server_sdk.DAGNode()
        google.protobuf.text_format.Parse(node_str, node)
        self.workflow.nodes.extend([node])

    def get_op_graph(self):
        workflow_conf = server_sdk.WorkflowConf()
        workflow_conf.workflows.extend([self.workflow])
        return workflow_conf


M
MRXLT 已提交
155 156 157 158 159 160 161
class Server(object):
    def __init__(self):
        self.server_handle_ = None
        self.infer_service_conf = None
        self.model_toolkit_conf = None
        self.resource_conf = None
        self.memory_optimization = False
M
MRXLT 已提交
162
        self.ir_optimization = False
M
MRXLT 已提交
163 164 165 166 167 168
        self.model_conf = None
        self.workflow_fn = "workflow.prototxt"
        self.resource_fn = "resource.prototxt"
        self.infer_service_fn = "infer_service.prototxt"
        self.model_toolkit_fn = "model_toolkit.prototxt"
        self.general_model_config_fn = "general_model.prototxt"
W
wangjiawei04 已提交
169
        self.cube_config_fn = "cube.conf"
M
MRXLT 已提交
170 171
        self.workdir = ""
        self.max_concurrency = 0
M
MRXLT 已提交
172
        self.num_threads = 4
M
MRXLT 已提交
173 174
        self.port = 8080
        self.reload_interval_s = 10
M
MRXLT 已提交
175
        self.max_body_size = 64 * 1024 * 1024
M
MRXLT 已提交
176 177
        self.module_path = os.path.dirname(paddle_serving_server.__file__)
        self.cur_path = os.getcwd()
M
MRXLT 已提交
178
        self.use_local_bin = False
M
MRXLT 已提交
179
        self.gpuid = 0
B
barrierye 已提交
180
        self.model_config_paths = None  # for multi-model in a workflow
M
MRXLT 已提交
181 182 183 184 185 186 187

    def set_max_concurrency(self, concurrency):
        self.max_concurrency = concurrency

    def set_num_threads(self, threads):
        self.num_threads = threads

M
MRXLT 已提交
188 189 190 191 192 193 194 195
    def set_max_body_size(self, body_size):
        if body_size >= self.max_body_size:
            self.max_body_size = body_size
        else:
            print(
                "max_body_size is less than default value, will use default value in service."
            )

M
MRXLT 已提交
196 197 198 199 200 201 202 203 204
    def set_port(self, port):
        self.port = port

    def set_reload_interval(self, interval):
        self.reload_interval_s = interval

    def set_op_sequence(self, op_seq):
        self.workflow_conf = op_seq

B
barrierye 已提交
205 206 207
    def set_op_graph(self, op_graph):
        self.workflow_conf = op_graph

M
MRXLT 已提交
208 209 210
    def set_memory_optimize(self, flag=False):
        self.memory_optimization = flag

M
MRXLT 已提交
211 212 213
    def set_ir_optimize(self, flag=False):
        self.ir_optimization = flag

M
MRXLT 已提交
214 215 216 217
    def check_local_bin(self):
        if "SERVING_BIN" in os.environ:
            self.use_local_bin = True
            self.bin_path = os.environ["SERVING_BIN"]
M
MRXLT 已提交
218

M
MRXLT 已提交
219
    def check_cuda(self):
M
MRXLT 已提交
220
        cuda_flag = False
M
MRXLT 已提交
221 222 223
        r = os.popen("ldd {} | grep cudart".format(self.bin_path))
        r = r.read().split("=")
        if len(r) >= 2 and "cudart" in r[1] and os.system(
M
MRXLT 已提交
224 225 226
                "ls /dev/ | grep nvidia > /dev/null") == 0:
            cuda_flag = True
        if not cuda_flag:
M
MRXLT 已提交
227 228 229 230
            raise SystemExit(
                "CUDA not found, please check your environment or use cpu version by \"pip install paddle_serving_server\""
            )

M
MRXLT 已提交
231 232 233
    def set_gpuid(self, gpuid=0):
        self.gpuid = gpuid

B
barrierye 已提交
234
    def _prepare_engine(self, model_config_paths, device):
M
MRXLT 已提交
235 236 237
        if self.model_toolkit_conf == None:
            self.model_toolkit_conf = server_sdk.ModelToolkitConf()

B
barrierye 已提交
238 239 240 241 242 243 244 245 246 247 248 249
        for engine_name, model_config_path in model_config_paths.items():
            engine = server_sdk.EngineDesc()
            engine.name = engine_name
            # engine.reloadable_meta = model_config_path + "/fluid_time_file"
            engine.reloadable_meta = self.workdir + "/fluid_time_file"
            os.system("touch {}".format(engine.reloadable_meta))
            engine.reloadable_type = "timestamp_ne"
            engine.runtime_thread_num = 0
            engine.batch_infer_size = 0
            engine.enable_batch_align = 0
            engine.model_data_path = model_config_path
            engine.enable_memory_optimization = self.memory_optimization
M
MRXLT 已提交
250
            engine.enable_ir_optimization = self.ir_optimization
B
barrierye 已提交
251 252 253 254 255 256 257 258 259
            engine.static_optimization = False
            engine.force_update_static_cache = False

            if device == "cpu":
                engine.type = "FLUID_CPU_ANALYSIS_DIR"
            elif device == "gpu":
                engine.type = "FLUID_GPU_ANALYSIS_DIR"

            self.model_toolkit_conf.engines.extend([engine])
M
MRXLT 已提交
260 261 262 263 264 265 266 267 268 269 270

    def _prepare_infer_service(self, port):
        if self.infer_service_conf == None:
            self.infer_service_conf = server_sdk.InferServiceConf()
            self.infer_service_conf.port = port
            infer_service = server_sdk.InferService()
            infer_service.name = "GeneralModelService"
            infer_service.workflows.extend(["workflow1"])
            self.infer_service_conf.services.extend([infer_service])

    def _prepare_resource(self, workdir):
271
        self.workdir = workdir
M
MRXLT 已提交
272 273 274 275 276
        if self.resource_conf == None:
            with open("{}/{}".format(workdir, self.general_model_config_fn),
                      "w") as fout:
                fout.write(str(self.model_conf))
            self.resource_conf = server_sdk.ResourceConf()
W
wangjiawei04 已提交
277 278 279 280 281
            for workflow in self.workflow_conf.workflows:
                for node in workflow.nodes:
                    if "dist_kv" in node.name:
                        self.resource_conf.cube_config_path = workdir
                        self.resource_conf.cube_config_file = self.cube_config_fn
M
MRXLT 已提交
282 283 284 285 286 287 288 289 290
            self.resource_conf.model_toolkit_path = workdir
            self.resource_conf.model_toolkit_file = self.model_toolkit_fn
            self.resource_conf.general_model_path = workdir
            self.resource_conf.general_model_file = self.general_model_config_fn

    def _write_pb_str(self, filepath, pb_obj):
        with open(filepath, "w") as fout:
            fout.write(str(pb_obj))

B
barrierye 已提交
291 292 293 294
    def load_model_config(self, model_config_paths):
        # At present, Serving needs to configure the model path in
        # the resource.prototxt file to determine the input and output
        # format of the workflow. To ensure that the input and output
B
barrierye 已提交
295
        # of multiple models are the same.
B
barrierye 已提交
296 297
        workflow_oi_config_path = None
        if isinstance(model_config_paths, str):
B
barrierye 已提交
298
            # If there is only one model path, use the default infer_op.
M
MRXLT 已提交
299
            # Because there are several infer_op type, we need to find
B
barrierye 已提交
300 301 302
            # it from workflow_conf.
            default_engine_names = [
                'general_infer_0', 'general_dist_kv_infer_0',
B
barrierye 已提交
303
                'general_dist_kv_quant_infer_0'
B
barrierye 已提交
304 305
            ]
            engine_name = None
B
barrierye 已提交
306
            for node in self.workflow_conf.workflows[0].nodes:
B
barrierye 已提交
307 308 309 310 311 312 313 314 315
                if node.name in default_engine_names:
                    engine_name = node.name
                    break
            if engine_name is None:
                raise Exception(
                    "You have set the engine_name of Op. Please use the form {op: model_path} to configure model path"
                )
            self.model_config_paths = {engine_name: model_config_paths}
            workflow_oi_config_path = self.model_config_paths[engine_name]
B
barrierye 已提交
316 317 318 319 320 321 322 323
        elif isinstance(model_config_paths, dict):
            self.model_config_paths = {}
            for node_str, path in model_config_paths.items():
                node = server_sdk.DAGNode()
                google.protobuf.text_format.Parse(node_str, node)
                self.model_config_paths[node.name] = path
            print("You have specified multiple model paths, please ensure "
                  "that the input and output of multiple models are the same.")
M
MRXLT 已提交
324 325
            workflow_oi_config_path = list(self.model_config_paths.items())[0][
                1]
B
barrierye 已提交
326 327 328 329 330
        else:
            raise Exception("The type of model_config_paths must be str or "
                            "dict({op: model_path}), not {}.".format(
                                type(model_config_paths)))

M
MRXLT 已提交
331
        self.model_conf = m_config.GeneralModelConfig()
B
barrierye 已提交
332 333 334
        f = open(
            "{}/serving_server_conf.prototxt".format(workflow_oi_config_path),
            'r')
M
MRXLT 已提交
335 336 337 338 339 340 341 342 343
        self.model_conf = google.protobuf.text_format.Merge(
            str(f.read()), self.model_conf)
        # check config here
        # print config here

    def download_bin(self):
        os.chdir(self.module_path)
        need_download = False
        device_version = "serving-gpu-"
344 345
        folder_name = device_version + serving_server_version
        tar_name = folder_name + ".tar.gz"
M
MRXLT 已提交
346
        bin_url = "https://paddle-serving.bj.bcebos.com/bin/" + tar_name
347 348 349 350
        self.server_path = os.path.join(self.module_path, folder_name)

        download_flag = "{}/{}.is_download".format(self.module_path,
                                                   folder_name)
M
MRXLT 已提交
351 352 353 354 355

        #acquire lock
        version_file = open("{}/version.py".format(self.module_path), "r")
        fcntl.flock(version_file, fcntl.LOCK_EX)

356 357 358 359 360
        if os.path.exists(download_flag):
            os.chdir(self.cur_path)
            self.bin_path = self.server_path + "/serving"
            return

M
MRXLT 已提交
361
        if not os.path.exists(self.server_path):
362 363
            os.system("touch {}/{}.is_download".format(self.module_path,
                                                       folder_name))
M
MRXLT 已提交
364 365 366 367 368
            print('Frist time run, downloading PaddleServing components ...')
            r = os.system('wget ' + bin_url + ' --no-check-certificate')
            if r != 0:
                if os.path.exists(tar_name):
                    os.remove(tar_name)
M
MRXLT 已提交
369 370 371
                raise SystemExit(
                    'Download failed, please check your network or permission of {}.'.
                    format(self.module_path))
M
MRXLT 已提交
372 373 374 375 376 377 378 379 380
            else:
                try:
                    print('Decompressing files ..')
                    tar = tarfile.open(tar_name)
                    tar.extractall()
                    tar.close()
                except:
                    if os.path.exists(exe_path):
                        os.remove(exe_path)
M
MRXLT 已提交
381 382 383
                    raise SystemExit(
                        'Decompressing failed, please check your permission of {} or disk space left.'.
                        format(self.module_path))
M
MRXLT 已提交
384 385
                finally:
                    os.remove(tar_name)
M
MRXLT 已提交
386
        #release lock
B
barrierye 已提交
387
        version_file.close()
M
MRXLT 已提交
388 389 390 391 392 393 394 395 396 397 398
        os.chdir(self.cur_path)
        self.bin_path = self.server_path + "/serving"

    def prepare_server(self, workdir=None, port=9292, device="cpu"):
        if workdir == None:
            workdir = "./tmp"
            os.system("mkdir {}".format(workdir))
        else:
            os.system("mkdir {}".format(workdir))
        os.system("touch {}/fluid_time_file".format(workdir))

M
MRXLT 已提交
399
        if not self.port_is_available(port):
M
MRXLT 已提交
400 401
            raise SystemExit("Prot {} is already used".format(port))

G
guru4elephant 已提交
402
        self.set_port(port)
M
MRXLT 已提交
403
        self._prepare_resource(workdir)
B
barrierye 已提交
404
        self._prepare_engine(self.model_config_paths, device)
M
MRXLT 已提交
405 406 407 408 409 410 411 412 413 414 415 416 417
        self._prepare_infer_service(port)
        self.workdir = workdir

        infer_service_fn = "{}/{}".format(workdir, self.infer_service_fn)
        workflow_fn = "{}/{}".format(workdir, self.workflow_fn)
        resource_fn = "{}/{}".format(workdir, self.resource_fn)
        model_toolkit_fn = "{}/{}".format(workdir, self.model_toolkit_fn)

        self._write_pb_str(infer_service_fn, self.infer_service_conf)
        self._write_pb_str(workflow_fn, self.workflow_conf)
        self._write_pb_str(resource_fn, self.resource_conf)
        self._write_pb_str(model_toolkit_fn, self.model_toolkit_conf)

M
MRXLT 已提交
418
    def port_is_available(self, port):
M
MRXLT 已提交
419 420
        with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
            sock.settimeout(2)
421
            result = sock.connect_ex(('0.0.0.0', port))
M
MRXLT 已提交
422 423 424 425 426
        if result != 0:
            return True
        else:
            return False

M
MRXLT 已提交
427 428 429
    def run_server(self):
        # just run server with system command
        # currently we do not load cube
M
MRXLT 已提交
430
        self.check_local_bin()
M
MRXLT 已提交
431 432
        if not self.use_local_bin:
            self.download_bin()
B
fix bug  
barrierye 已提交
433 434 435
            # wait for other process to download server bin
            while not os.path.exists(self.server_path):
                time.sleep(1)
M
MRXLT 已提交
436 437
        else:
            print("Use local bin : {}".format(self.bin_path))
M
MRXLT 已提交
438
        self.check_cuda()
M
MRXLT 已提交
439 440 441 442 443 444 445 446 447 448 449
        command = "{} " \
                  "-enable_model_toolkit " \
                  "-inferservice_path {} " \
                  "-inferservice_file {} " \
                  "-max_concurrency {} " \
                  "-num_threads {} " \
                  "-port {} " \
                  "-reload_interval_s {} " \
                  "-resource_path {} " \
                  "-resource_file {} " \
                  "-workflow_path {} " \
M
MRXLT 已提交
450 451
                  "-workflow_file {} " \
                  "-bthread_concurrency {} " \
M
MRXLT 已提交
452 453
                  "-gpuid {} " \
                  "-max_body_size {} ".format(
M
MRXLT 已提交
454 455 456 457 458 459 460 461 462 463
                      self.bin_path,
                      self.workdir,
                      self.infer_service_fn,
                      self.max_concurrency,
                      self.num_threads,
                      self.port,
                      self.reload_interval_s,
                      self.workdir,
                      self.resource_fn,
                      self.workdir,
M
MRXLT 已提交
464 465
                      self.workflow_fn,
                      self.num_threads,
M
MRXLT 已提交
466 467
                      self.gpuid,
                      self.max_body_size)
M
MRXLT 已提交
468 469
        print("Going to Run Comand")
        print(command)
470

M
MRXLT 已提交
471
        os.system(command)