__init__.py 25.4 KB
Newer Older
M
MRXLT 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
B
barrierye 已提交
14
# pylint: disable=doc-string-missing
M
MRXLT 已提交
15 16 17 18 19 20

import os
from .proto import server_configure_pb2 as server_sdk
from .proto import general_model_config_pb2 as m_config
import google.protobuf.text_format
import tarfile
M
MRXLT 已提交
21
import socket
22
import paddle_serving_server_gpu as paddle_serving_server
23
import time
24
from .version import serving_server_version
M
MRXLT 已提交
25
from contextlib import closing
G
guru4elephant 已提交
26
import argparse
B
barrierye 已提交
27
import collections
M
MRXLT 已提交
28
import fcntl
M
MRXLT 已提交
29

B
barrierye 已提交
30 31 32 33 34 35 36
import numpy as np
import grpc
from .proto import multi_lang_general_model_service_pb2
from .proto import multi_lang_general_model_service_pb2_grpc
from multiprocessing import Pool, Process
from concurrent import futures

B
barrierye 已提交
37

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52
def serve_args():
    parser = argparse.ArgumentParser("serve")
    parser.add_argument(
        "--thread", type=int, default=10, help="Concurrency of server")
    parser.add_argument(
        "--model", type=str, default="", help="Model for serving")
    parser.add_argument(
        "--port", type=int, default=9292, help="Port of the starting gpu")
    parser.add_argument(
        "--workdir",
        type=str,
        default="workdir",
        help="Working dir of current service")
    parser.add_argument(
        "--device", type=str, default="gpu", help="Type of device")
B
barrierye 已提交
53
    parser.add_argument("--gpu_ids", type=str, default="", help="gpu ids")
54
    parser.add_argument(
55
        "--name", type=str, default="None", help="Default service name")
M
MRXLT 已提交
56 57
    parser.add_argument(
        "--mem_optim", type=bool, default=False, help="Memory optimize")
M
MRXLT 已提交
58 59
    parser.add_argument(
        "--ir_optim", type=bool, default=False, help="Graph optimize")
M
MRXLT 已提交
60 61 62
    parser.add_argument(
        "--max_body_size",
        type=int,
M
MRXLT 已提交
63
        default=512 * 1024 * 1024,
M
MRXLT 已提交
64
        help="Limit sizes of messages")
65
    return parser.parse_args()
M
MRXLT 已提交
66

B
barrierye 已提交
67

M
MRXLT 已提交
68 69 70
class OpMaker(object):
    def __init__(self):
        self.op_dict = {
M
MRXLT 已提交
71 72 73 74 75 76
            "general_infer": "GeneralInferOp",
            "general_reader": "GeneralReaderOp",
            "general_response": "GeneralResponseOp",
            "general_text_reader": "GeneralTextReaderOp",
            "general_text_response": "GeneralTextResponseOp",
            "general_single_kv": "GeneralSingleKVOp",
W
wangjiawei04 已提交
77
            "general_dist_kv_infer": "GeneralDistKVInferOp",
M
MRXLT 已提交
78
            "general_dist_kv": "GeneralDistKVOp"
M
MRXLT 已提交
79
        }
B
barrierye 已提交
80
        self.node_name_suffix_ = collections.defaultdict(int)
M
MRXLT 已提交
81

B
barrierye 已提交
82 83 84 85
    def create(self, node_type, engine_name=None, inputs=[], outputs=[]):
        if node_type not in self.op_dict:
            raise Exception("Op type {} is not supported right now".format(
                node_type))
M
MRXLT 已提交
86
        node = server_sdk.DAGNode()
B
barrierye 已提交
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
        # node.name will be used as the infer engine name
        if engine_name:
            node.name = engine_name
        else:
            node.name = '{}_{}'.format(node_type,
                                       self.node_name_suffix_[node_type])
            self.node_name_suffix_[node_type] += 1

        node.type = self.op_dict[node_type]
        if inputs:
            for dep_node_str in inputs:
                dep_node = server_sdk.DAGNode()
                google.protobuf.text_format.Parse(dep_node_str, dep_node)
                dep = server_sdk.DAGNodeDependency()
                dep.name = dep_node.name
                dep.mode = "RO"
                node.dependencies.extend([dep])
        # Because the return value will be used as the key value of the
        # dict, and the proto object is variable which cannot be hashed,
        # so it is processed into a string. This has little effect on
        # overall efficiency.
        return google.protobuf.text_format.MessageToString(node)
M
MRXLT 已提交
109 110 111 112 113 114 115 116


class OpSeqMaker(object):
    def __init__(self):
        self.workflow = server_sdk.Workflow()
        self.workflow.name = "workflow1"
        self.workflow.workflow_type = "Sequence"

B
barrierye 已提交
117 118 119 120 121 122 123
    def add_op(self, node_str):
        node = server_sdk.DAGNode()
        google.protobuf.text_format.Parse(node_str, node)
        if len(node.dependencies) > 1:
            raise Exception(
                'Set more than one predecessor for op in OpSeqMaker is not allowed.'
            )
M
MRXLT 已提交
124
        if len(self.workflow.nodes) >= 1:
B
barrierye 已提交
125 126 127 128 129 130 131 132 133 134 135
            if len(node.dependencies) == 0:
                dep = server_sdk.DAGNodeDependency()
                dep.name = self.workflow.nodes[-1].name
                dep.mode = "RO"
                node.dependencies.extend([dep])
            elif len(node.dependencies) == 1:
                if node.dependencies[0].name != self.workflow.nodes[-1].name:
                    raise Exception(
                        'You must add op in order in OpSeqMaker. The previous op is {}, but the current op is followed by {}.'.
                        format(node.dependencies[0].name, self.workflow.nodes[
                            -1].name))
M
MRXLT 已提交
136 137 138 139 140 141 142 143
        self.workflow.nodes.extend([node])

    def get_op_sequence(self):
        workflow_conf = server_sdk.WorkflowConf()
        workflow_conf.workflows.extend([self.workflow])
        return workflow_conf


B
barrierye 已提交
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
class OpGraphMaker(object):
    def __init__(self):
        self.workflow = server_sdk.Workflow()
        self.workflow.name = "workflow1"
        # Currently, SDK only supports "Sequence"
        self.workflow.workflow_type = "Sequence"

    def add_op(self, node_str):
        node = server_sdk.DAGNode()
        google.protobuf.text_format.Parse(node_str, node)
        self.workflow.nodes.extend([node])

    def get_op_graph(self):
        workflow_conf = server_sdk.WorkflowConf()
        workflow_conf.workflows.extend([self.workflow])
        return workflow_conf


M
MRXLT 已提交
162 163 164 165 166 167 168
class Server(object):
    def __init__(self):
        self.server_handle_ = None
        self.infer_service_conf = None
        self.model_toolkit_conf = None
        self.resource_conf = None
        self.memory_optimization = False
M
MRXLT 已提交
169
        self.ir_optimization = False
M
MRXLT 已提交
170 171 172 173 174 175
        self.model_conf = None
        self.workflow_fn = "workflow.prototxt"
        self.resource_fn = "resource.prototxt"
        self.infer_service_fn = "infer_service.prototxt"
        self.model_toolkit_fn = "model_toolkit.prototxt"
        self.general_model_config_fn = "general_model.prototxt"
W
wangjiawei04 已提交
176
        self.cube_config_fn = "cube.conf"
M
MRXLT 已提交
177 178
        self.workdir = ""
        self.max_concurrency = 0
M
MRXLT 已提交
179
        self.num_threads = 4
M
MRXLT 已提交
180 181
        self.port = 8080
        self.reload_interval_s = 10
M
MRXLT 已提交
182
        self.max_body_size = 64 * 1024 * 1024
M
MRXLT 已提交
183 184
        self.module_path = os.path.dirname(paddle_serving_server.__file__)
        self.cur_path = os.getcwd()
M
MRXLT 已提交
185
        self.use_local_bin = False
M
MRXLT 已提交
186
        self.gpuid = 0
B
barrierye 已提交
187
        self.model_config_paths = None  # for multi-model in a workflow
M
MRXLT 已提交
188 189 190 191 192 193 194

    def set_max_concurrency(self, concurrency):
        self.max_concurrency = concurrency

    def set_num_threads(self, threads):
        self.num_threads = threads

M
MRXLT 已提交
195 196 197 198 199 200 201 202
    def set_max_body_size(self, body_size):
        if body_size >= self.max_body_size:
            self.max_body_size = body_size
        else:
            print(
                "max_body_size is less than default value, will use default value in service."
            )

M
MRXLT 已提交
203 204 205 206 207 208 209 210 211
    def set_port(self, port):
        self.port = port

    def set_reload_interval(self, interval):
        self.reload_interval_s = interval

    def set_op_sequence(self, op_seq):
        self.workflow_conf = op_seq

B
barrierye 已提交
212 213 214
    def set_op_graph(self, op_graph):
        self.workflow_conf = op_graph

M
MRXLT 已提交
215 216 217
    def set_memory_optimize(self, flag=False):
        self.memory_optimization = flag

M
MRXLT 已提交
218 219 220
    def set_ir_optimize(self, flag=False):
        self.ir_optimization = flag

M
MRXLT 已提交
221 222 223 224
    def check_local_bin(self):
        if "SERVING_BIN" in os.environ:
            self.use_local_bin = True
            self.bin_path = os.environ["SERVING_BIN"]
M
MRXLT 已提交
225

M
MRXLT 已提交
226
    def check_cuda(self):
M
MRXLT 已提交
227
        cuda_flag = False
M
MRXLT 已提交
228 229 230
        r = os.popen("ldd {} | grep cudart".format(self.bin_path))
        r = r.read().split("=")
        if len(r) >= 2 and "cudart" in r[1] and os.system(
M
MRXLT 已提交
231 232 233
                "ls /dev/ | grep nvidia > /dev/null") == 0:
            cuda_flag = True
        if not cuda_flag:
M
MRXLT 已提交
234 235 236 237
            raise SystemExit(
                "CUDA not found, please check your environment or use cpu version by \"pip install paddle_serving_server\""
            )

M
MRXLT 已提交
238 239 240
    def set_gpuid(self, gpuid=0):
        self.gpuid = gpuid

B
barrierye 已提交
241
    def _prepare_engine(self, model_config_paths, device):
M
MRXLT 已提交
242 243 244
        if self.model_toolkit_conf == None:
            self.model_toolkit_conf = server_sdk.ModelToolkitConf()

B
barrierye 已提交
245 246 247 248 249 250 251 252 253 254 255 256
        for engine_name, model_config_path in model_config_paths.items():
            engine = server_sdk.EngineDesc()
            engine.name = engine_name
            # engine.reloadable_meta = model_config_path + "/fluid_time_file"
            engine.reloadable_meta = self.workdir + "/fluid_time_file"
            os.system("touch {}".format(engine.reloadable_meta))
            engine.reloadable_type = "timestamp_ne"
            engine.runtime_thread_num = 0
            engine.batch_infer_size = 0
            engine.enable_batch_align = 0
            engine.model_data_path = model_config_path
            engine.enable_memory_optimization = self.memory_optimization
M
MRXLT 已提交
257
            engine.enable_ir_optimization = self.ir_optimization
B
barrierye 已提交
258 259 260 261 262 263 264 265 266
            engine.static_optimization = False
            engine.force_update_static_cache = False

            if device == "cpu":
                engine.type = "FLUID_CPU_ANALYSIS_DIR"
            elif device == "gpu":
                engine.type = "FLUID_GPU_ANALYSIS_DIR"

            self.model_toolkit_conf.engines.extend([engine])
M
MRXLT 已提交
267 268 269 270 271 272 273 274 275 276 277

    def _prepare_infer_service(self, port):
        if self.infer_service_conf == None:
            self.infer_service_conf = server_sdk.InferServiceConf()
            self.infer_service_conf.port = port
            infer_service = server_sdk.InferService()
            infer_service.name = "GeneralModelService"
            infer_service.workflows.extend(["workflow1"])
            self.infer_service_conf.services.extend([infer_service])

    def _prepare_resource(self, workdir):
278
        self.workdir = workdir
M
MRXLT 已提交
279 280 281 282 283
        if self.resource_conf == None:
            with open("{}/{}".format(workdir, self.general_model_config_fn),
                      "w") as fout:
                fout.write(str(self.model_conf))
            self.resource_conf = server_sdk.ResourceConf()
W
wangjiawei04 已提交
284 285 286 287 288
            for workflow in self.workflow_conf.workflows:
                for node in workflow.nodes:
                    if "dist_kv" in node.name:
                        self.resource_conf.cube_config_path = workdir
                        self.resource_conf.cube_config_file = self.cube_config_fn
M
MRXLT 已提交
289 290 291 292 293 294 295 296 297
            self.resource_conf.model_toolkit_path = workdir
            self.resource_conf.model_toolkit_file = self.model_toolkit_fn
            self.resource_conf.general_model_path = workdir
            self.resource_conf.general_model_file = self.general_model_config_fn

    def _write_pb_str(self, filepath, pb_obj):
        with open(filepath, "w") as fout:
            fout.write(str(pb_obj))

B
barrierye 已提交
298 299 300 301
    def load_model_config(self, model_config_paths):
        # At present, Serving needs to configure the model path in
        # the resource.prototxt file to determine the input and output
        # format of the workflow. To ensure that the input and output
B
barrierye 已提交
302
        # of multiple models are the same.
B
barrierye 已提交
303 304
        workflow_oi_config_path = None
        if isinstance(model_config_paths, str):
B
barrierye 已提交
305
            # If there is only one model path, use the default infer_op.
M
MRXLT 已提交
306
            # Because there are several infer_op type, we need to find
B
barrierye 已提交
307 308 309
            # it from workflow_conf.
            default_engine_names = [
                'general_infer_0', 'general_dist_kv_infer_0',
B
barrierye 已提交
310
                'general_dist_kv_quant_infer_0'
B
barrierye 已提交
311 312
            ]
            engine_name = None
B
barrierye 已提交
313
            for node in self.workflow_conf.workflows[0].nodes:
B
barrierye 已提交
314 315 316 317 318 319 320 321 322
                if node.name in default_engine_names:
                    engine_name = node.name
                    break
            if engine_name is None:
                raise Exception(
                    "You have set the engine_name of Op. Please use the form {op: model_path} to configure model path"
                )
            self.model_config_paths = {engine_name: model_config_paths}
            workflow_oi_config_path = self.model_config_paths[engine_name]
B
barrierye 已提交
323 324 325 326 327 328 329 330
        elif isinstance(model_config_paths, dict):
            self.model_config_paths = {}
            for node_str, path in model_config_paths.items():
                node = server_sdk.DAGNode()
                google.protobuf.text_format.Parse(node_str, node)
                self.model_config_paths[node.name] = path
            print("You have specified multiple model paths, please ensure "
                  "that the input and output of multiple models are the same.")
M
MRXLT 已提交
331 332
            workflow_oi_config_path = list(self.model_config_paths.items())[0][
                1]
B
barrierye 已提交
333 334 335 336 337
        else:
            raise Exception("The type of model_config_paths must be str or "
                            "dict({op: model_path}), not {}.".format(
                                type(model_config_paths)))

M
MRXLT 已提交
338
        self.model_conf = m_config.GeneralModelConfig()
B
barrierye 已提交
339 340 341
        f = open(
            "{}/serving_server_conf.prototxt".format(workflow_oi_config_path),
            'r')
M
MRXLT 已提交
342 343 344 345 346 347 348 349 350
        self.model_conf = google.protobuf.text_format.Merge(
            str(f.read()), self.model_conf)
        # check config here
        # print config here

    def download_bin(self):
        os.chdir(self.module_path)
        need_download = False
        device_version = "serving-gpu-"
351 352
        folder_name = device_version + serving_server_version
        tar_name = folder_name + ".tar.gz"
M
MRXLT 已提交
353
        bin_url = "https://paddle-serving.bj.bcebos.com/bin/" + tar_name
354 355 356 357
        self.server_path = os.path.join(self.module_path, folder_name)

        download_flag = "{}/{}.is_download".format(self.module_path,
                                                   folder_name)
M
MRXLT 已提交
358 359 360 361 362

        #acquire lock
        version_file = open("{}/version.py".format(self.module_path), "r")
        fcntl.flock(version_file, fcntl.LOCK_EX)

363 364 365 366 367
        if os.path.exists(download_flag):
            os.chdir(self.cur_path)
            self.bin_path = self.server_path + "/serving"
            return

M
MRXLT 已提交
368
        if not os.path.exists(self.server_path):
369 370
            os.system("touch {}/{}.is_download".format(self.module_path,
                                                       folder_name))
M
MRXLT 已提交
371 372 373 374 375
            print('Frist time run, downloading PaddleServing components ...')
            r = os.system('wget ' + bin_url + ' --no-check-certificate')
            if r != 0:
                if os.path.exists(tar_name):
                    os.remove(tar_name)
M
MRXLT 已提交
376 377 378
                raise SystemExit(
                    'Download failed, please check your network or permission of {}.'.
                    format(self.module_path))
M
MRXLT 已提交
379 380 381 382 383 384 385 386 387
            else:
                try:
                    print('Decompressing files ..')
                    tar = tarfile.open(tar_name)
                    tar.extractall()
                    tar.close()
                except:
                    if os.path.exists(exe_path):
                        os.remove(exe_path)
M
MRXLT 已提交
388 389 390
                    raise SystemExit(
                        'Decompressing failed, please check your permission of {} or disk space left.'.
                        format(self.module_path))
M
MRXLT 已提交
391 392
                finally:
                    os.remove(tar_name)
M
MRXLT 已提交
393
        #release lock
B
barrierye 已提交
394
        version_file.close()
M
MRXLT 已提交
395 396 397 398 399 400 401 402 403 404 405
        os.chdir(self.cur_path)
        self.bin_path = self.server_path + "/serving"

    def prepare_server(self, workdir=None, port=9292, device="cpu"):
        if workdir == None:
            workdir = "./tmp"
            os.system("mkdir {}".format(workdir))
        else:
            os.system("mkdir {}".format(workdir))
        os.system("touch {}/fluid_time_file".format(workdir))

M
MRXLT 已提交
406
        if not self.port_is_available(port):
M
MRXLT 已提交
407 408
            raise SystemExit("Prot {} is already used".format(port))

G
guru4elephant 已提交
409
        self.set_port(port)
M
MRXLT 已提交
410
        self._prepare_resource(workdir)
B
barrierye 已提交
411
        self._prepare_engine(self.model_config_paths, device)
M
MRXLT 已提交
412 413 414 415 416 417 418 419 420 421 422 423 424
        self._prepare_infer_service(port)
        self.workdir = workdir

        infer_service_fn = "{}/{}".format(workdir, self.infer_service_fn)
        workflow_fn = "{}/{}".format(workdir, self.workflow_fn)
        resource_fn = "{}/{}".format(workdir, self.resource_fn)
        model_toolkit_fn = "{}/{}".format(workdir, self.model_toolkit_fn)

        self._write_pb_str(infer_service_fn, self.infer_service_conf)
        self._write_pb_str(workflow_fn, self.workflow_conf)
        self._write_pb_str(resource_fn, self.resource_conf)
        self._write_pb_str(model_toolkit_fn, self.model_toolkit_conf)

M
MRXLT 已提交
425
    def port_is_available(self, port):
M
MRXLT 已提交
426 427
        with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
            sock.settimeout(2)
428
            result = sock.connect_ex(('0.0.0.0', port))
M
MRXLT 已提交
429 430 431 432 433
        if result != 0:
            return True
        else:
            return False

M
MRXLT 已提交
434 435 436
    def run_server(self):
        # just run server with system command
        # currently we do not load cube
M
MRXLT 已提交
437
        self.check_local_bin()
M
MRXLT 已提交
438 439
        if not self.use_local_bin:
            self.download_bin()
B
fix bug  
barrierye 已提交
440 441 442
            # wait for other process to download server bin
            while not os.path.exists(self.server_path):
                time.sleep(1)
M
MRXLT 已提交
443 444
        else:
            print("Use local bin : {}".format(self.bin_path))
M
MRXLT 已提交
445
        self.check_cuda()
M
MRXLT 已提交
446 447 448 449 450 451 452 453 454 455 456
        command = "{} " \
                  "-enable_model_toolkit " \
                  "-inferservice_path {} " \
                  "-inferservice_file {} " \
                  "-max_concurrency {} " \
                  "-num_threads {} " \
                  "-port {} " \
                  "-reload_interval_s {} " \
                  "-resource_path {} " \
                  "-resource_file {} " \
                  "-workflow_path {} " \
M
MRXLT 已提交
457 458
                  "-workflow_file {} " \
                  "-bthread_concurrency {} " \
M
MRXLT 已提交
459 460
                  "-gpuid {} " \
                  "-max_body_size {} ".format(
M
MRXLT 已提交
461 462 463 464 465 466 467 468 469 470
                      self.bin_path,
                      self.workdir,
                      self.infer_service_fn,
                      self.max_concurrency,
                      self.num_threads,
                      self.port,
                      self.reload_interval_s,
                      self.workdir,
                      self.resource_fn,
                      self.workdir,
M
MRXLT 已提交
471 472
                      self.workflow_fn,
                      self.num_threads,
M
MRXLT 已提交
473 474
                      self.gpuid,
                      self.max_body_size)
M
MRXLT 已提交
475 476
        print("Going to Run Comand")
        print(command)
477

M
MRXLT 已提交
478
        os.system(command)
B
barrierye 已提交
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523


class MultiLangServerService(
        multi_lang_general_model_service_pb2_grpc.MultiLangGeneralModelService):
    def __init__(self, model_config_path, endpoints):
        from paddle_serving_client import Client
        self._parse_model_config(model_config_path)
        self.bclient_ = Client()
        self.bclient_.load_client_config(
            "{}/serving_server_conf.prototxt".format(model_config_path))
        self.bclient_.connect(endpoints)

    def _parse_model_config(self, model_config_path):
        model_conf = m_config.GeneralModelConfig()
        f = open("{}/serving_server_conf.prototxt".format(model_config_path),
                 'r')
        model_conf = google.protobuf.text_format.Merge(
            str(f.read()), model_conf)
        self.feed_names_ = [var.alias_name for var in model_conf.feed_var]
        self.feed_types_ = {}
        self.feed_shapes_ = {}
        self.fetch_names_ = [var.alias_name for var in model_conf.fetch_var]
        self.fetch_types_ = {}
        self.lod_tensor_set_ = set()
        for i, var in enumerate(model_conf.feed_var):
            self.feed_types_[var.alias_name] = var.feed_type
            self.feed_shapes_[var.alias_name] = var.shape
            if var.is_lod_tensor:
                self.lod_tensor_set_.add(var.alias_name)
        for i, var in enumerate(model_conf.fetch_var):
            self.fetch_types_[var.alias_name] = var.fetch_type
            if var.is_lod_tensor:
                self.lod_tensor_set_.add(var.alias_name)

    def _flatten_list(self, nested_list):
        for item in nested_list:
            if isinstance(item, (list, tuple)):
                for sub_item in self._flatten_list(item):
                    yield sub_item
            else:
                yield item

    def _unpack_request(self, request):
        feed_names = list(request.feed_var_names)
        fetch_names = list(request.fetch_var_names)
B
barrierye 已提交
524
        is_python = request.is_python
B
barrierye 已提交
525 526 527 528
        feed_batch = []
        for feed_inst in request.insts:
            feed_dict = {}
            for idx, name in enumerate(feed_names):
B
barrierye 已提交
529
                var = feed_inst.tensor_array[idx]
B
barrierye 已提交
530 531
                v_type = self.feed_types_[name]
                data = None
B
barrierye 已提交
532 533 534 535 536 537 538
                if is_python:
                    if v_type == 0:
                        data = np.frombuffer(var.data, dtype="int64")
                    elif v_type == 1:
                        data = np.frombuffer(var.data, dtype="float32")
                    else:
                        raise Exception("error type.")
B
barrierye 已提交
539
                else:
B
barrierye 已提交
540 541 542 543 544 545 546
                    if v_type == 0:  # int64
                        data = np.array(list(var.int64_data), dtype="int64")
                    elif v_type == 1:  # float32
                        data = np.array(list(var.float_data), dtype="float32")
                    else:
                        raise Exception("error type.")
                data.shape = list(feed_inst.tensor_array[idx].shape)
B
barrierye 已提交
547 548
                feed_dict[name] = data
            feed_batch.append(feed_dict)
B
barrierye 已提交
549
        return feed_batch, fetch_names, is_python
B
barrierye 已提交
550

B
barrierye 已提交
551
    def _pack_resp_package(self, result, fetch_names, is_python, tag):
B
barrierye 已提交
552 553 554 555 556 557 558
        resp = multi_lang_general_model_service_pb2.Response()
        # Only one model is supported temporarily
        model_output = multi_lang_general_model_service_pb2.ModelOutput()
        inst = multi_lang_general_model_service_pb2.FetchInst()
        for idx, name in enumerate(fetch_names):
            tensor = multi_lang_general_model_service_pb2.Tensor()
            v_type = self.fetch_types_[name]
B
barrierye 已提交
559 560
            if is_python:
                tensor.data = result[name].tobytes()
B
barrierye 已提交
561
            else:
B
barrierye 已提交
562 563 564 565 566 567
                if v_type == 0:  # int64
                    tensor.int64_data.extend(result[name].reshape(-1).tolist())
                elif v_type == 1:  # float32
                    tensor.float_data.extend(result[name].reshape(-1).tolist())
                else:
                    raise Exception("error type.")
B
barrierye 已提交
568 569 570 571 572 573 574 575 576 577
            tensor.shape.extend(list(result[name].shape))
            if name in self.lod_tensor_set_:
                tensor.lod.extend(result["{}.lod".format(name)].tolist())
            inst.tensor_array.append(tensor)
        model_output.insts.append(inst)
        resp.outputs.append(model_output)
        resp.tag = tag
        return resp

    def inference(self, request, context):
B
barrierye 已提交
578
        feed_dict, fetch_names, is_python = self._unpack_request(request)
B
barrierye 已提交
579 580
        data, tag = self.bclient_.predict(
            feed=feed_dict, fetch=fetch_names, need_variant_tag=True)
B
barrierye 已提交
581
        return self._pack_resp_package(data, fetch_names, is_python, tag)
B
barrierye 已提交
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633


class MultiLangServer(object):
    def __init__(self, worker_num=2):
        self.bserver_ = Server()
        self.worker_num_ = worker_num

    def set_op_sequence(self, op_seq):
        self.bserver_.set_op_sequence(op_seq)

    def load_model_config(self, model_config_path):
        if not isinstance(model_config_path, str):
            raise Exception(
                "MultiLangServer only supports multi-model temporarily")
        self.bserver_.load_model_config(model_config_path)
        self.model_config_path_ = model_config_path

    def prepare_server(self, workdir=None, port=9292, device="cpu"):
        default_port = 12000
        self.port_list_ = []
        for i in range(1000):
            if default_port + i != port and self._port_is_available(default_port
                                                                    + i):
                self.port_list_.append(default_port + i)
                break
        self.bserver_.prepare_server(
            workdir=workdir, port=self.port_list_[0], device=device)
        self.gport_ = port

    def _launch_brpc_service(self, bserver):
        bserver.run_server()

    def _port_is_available(self, port):
        with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
            sock.settimeout(2)
            result = sock.connect_ex(('0.0.0.0', port))
        return result != 0

    def run_server(self):
        p_bserver = Process(
            target=self._launch_brpc_service, args=(self.bserver_, ))
        p_bserver.start()
        server = grpc.server(
            futures.ThreadPoolExecutor(max_workers=self.worker_num_))
        multi_lang_general_model_service_pb2_grpc.add_MultiLangGeneralModelServiceServicer_to_server(
            MultiLangServerService(self.model_config_path_,
                                   ["0.0.0.0:{}".format(self.port_list_[0])]),
            server)
        server.add_insecure_port('[::]:{}'.format(self.gport_))
        server.start()
        p_bserver.join()
        server.wait_for_termination()