__init__.py 30.2 KB
Newer Older
G
guru4elephant 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
B
barrierye 已提交
14
# pylint: disable=doc-string-missing
G
guru4elephant 已提交
15

G
guru4elephant 已提交
16
import os
17 18 19
from .proto import server_configure_pb2 as server_sdk
from .proto import general_model_config_pb2 as m_config
import google.protobuf.text_format
M
MRXLT 已提交
20
import tarfile
M
MRXLT 已提交
21
import socket
M
MRXLT 已提交
22
import paddle_serving_server as paddle_serving_server
23
from .version import serving_server_version
M
MRXLT 已提交
24
from contextlib import closing
B
barrierye 已提交
25
import collections
M
MRXLT 已提交
26
import shutil
B
barrierye 已提交
27
import numpy as np
B
barrierye 已提交
28
import grpc
B
barrierye 已提交
29
from .proto import multi_lang_general_model_service_pb2
B
barrierye 已提交
30
import sys
J
Jiawei Wang 已提交
31 32
if sys.platform.startswith('win') is False:
    import fcntl
B
barrierye 已提交
33 34
sys.path.append(
    os.path.join(os.path.abspath(os.path.dirname(__file__)), 'proto'))
B
barrierye 已提交
35
from .proto import multi_lang_general_model_service_pb2_grpc
B
barrierye 已提交
36 37
from multiprocessing import Pool, Process
from concurrent import futures
B
barrierye 已提交
38

G
guru4elephant 已提交
39 40 41

class OpMaker(object):
    def __init__(self):
42
        self.op_dict = {
M
MRXLT 已提交
43 44 45 46 47 48
            "general_infer": "GeneralInferOp",
            "general_reader": "GeneralReaderOp",
            "general_response": "GeneralResponseOp",
            "general_text_reader": "GeneralTextReaderOp",
            "general_text_response": "GeneralTextResponseOp",
            "general_single_kv": "GeneralSingleKVOp",
W
wangjiawei04 已提交
49
            "general_dist_kv_infer": "GeneralDistKVInferOp",
W
wangjiawei04 已提交
50
            "general_dist_kv_quant_infer": "GeneralDistKVQuantInferOp",
G
guru4elephant 已提交
51
            "general_copy": "GeneralCopyOp"
52
        }
B
barrierye 已提交
53
        self.node_name_suffix_ = collections.defaultdict(int)
G
guru4elephant 已提交
54

B
barrierye 已提交
55
    def create(self, node_type, engine_name=None, inputs=[], outputs=[]):
B
barrierye 已提交
56 57 58
        if node_type not in self.op_dict:
            raise Exception("Op type {} is not supported right now".format(
                node_type))
G
guru4elephant 已提交
59
        node = server_sdk.DAGNode()
B
barrierye 已提交
60 61 62 63 64 65 66 67
        # node.name will be used as the infer engine name
        if engine_name:
            node.name = engine_name
        else:
            node.name = '{}_{}'.format(node_type,
                                       self.node_name_suffix_[node_type])
            self.node_name_suffix_[node_type] += 1

B
barrierye 已提交
68
        node.type = self.op_dict[node_type]
B
barrierye 已提交
69 70 71 72 73 74 75 76 77 78 79 80 81
        if inputs:
            for dep_node_str in inputs:
                dep_node = server_sdk.DAGNode()
                google.protobuf.text_format.Parse(dep_node_str, dep_node)
                dep = server_sdk.DAGNodeDependency()
                dep.name = dep_node.name
                dep.mode = "RO"
                node.dependencies.extend([dep])
        # Because the return value will be used as the key value of the
        # dict, and the proto object is variable which cannot be hashed,
        # so it is processed into a string. This has little effect on
        # overall efficiency.
        return google.protobuf.text_format.MessageToString(node)
G
guru4elephant 已提交
82

M
MRXLT 已提交
83

G
guru4elephant 已提交
84 85 86 87 88 89
class OpSeqMaker(object):
    def __init__(self):
        self.workflow = server_sdk.Workflow()
        self.workflow.name = "workflow1"
        self.workflow.workflow_type = "Sequence"

B
barrierye 已提交
90 91 92 93 94 95 96
    def add_op(self, node_str):
        node = server_sdk.DAGNode()
        google.protobuf.text_format.Parse(node_str, node)
        if len(node.dependencies) > 1:
            raise Exception(
                'Set more than one predecessor for op in OpSeqMaker is not allowed.'
            )
G
guru4elephant 已提交
97
        if len(self.workflow.nodes) >= 1:
B
barrierye 已提交
98
            if len(node.dependencies) == 0:
B
barrierye 已提交
99 100 101 102
                dep = server_sdk.DAGNodeDependency()
                dep.name = self.workflow.nodes[-1].name
                dep.mode = "RO"
                node.dependencies.extend([dep])
B
barrierye 已提交
103 104 105
            elif len(node.dependencies) == 1:
                if node.dependencies[0].name != self.workflow.nodes[-1].name:
                    raise Exception(
T
TeslaZhao 已提交
106
                        'You must add op in order in OpSeqMaker. The previous op is {}, but the current op is followed by {}.'
107
                        .format(node.dependencies[0].name, self.workflow.nodes[
B
barrierye 已提交
108
                            -1].name))
G
guru4elephant 已提交
109 110 111 112 113 114 115
        self.workflow.nodes.extend([node])

    def get_op_sequence(self):
        workflow_conf = server_sdk.WorkflowConf()
        workflow_conf.workflows.extend([self.workflow])
        return workflow_conf

M
MRXLT 已提交
116

B
barrierye 已提交
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
class OpGraphMaker(object):
    def __init__(self):
        self.workflow = server_sdk.Workflow()
        self.workflow.name = "workflow1"
        # Currently, SDK only supports "Sequence"
        self.workflow.workflow_type = "Sequence"

    def add_op(self, node_str):
        node = server_sdk.DAGNode()
        google.protobuf.text_format.Parse(node_str, node)
        self.workflow.nodes.extend([node])

    def get_op_graph(self):
        workflow_conf = server_sdk.WorkflowConf()
        workflow_conf.workflows.extend([self.workflow])
        return workflow_conf


G
guru4elephant 已提交
135 136 137 138 139
class Server(object):
    def __init__(self):
        self.server_handle_ = None
        self.infer_service_conf = None
        self.model_toolkit_conf = None
G
guru4elephant 已提交
140
        self.resource_conf = None
141
        self.memory_optimization = False
M
MRXLT 已提交
142
        self.ir_optimization = False
143
        self.model_conf = None
G
guru4elephant 已提交
144 145 146 147
        self.workflow_fn = "workflow.prototxt"
        self.resource_fn = "resource.prototxt"
        self.infer_service_fn = "infer_service.prototxt"
        self.model_toolkit_fn = "model_toolkit.prototxt"
148
        self.general_model_config_fn = "general_model.prototxt"
W
wangjiawei04 已提交
149
        self.cube_config_fn = "cube.conf"
G
guru4elephant 已提交
150 151
        self.workdir = ""
        self.max_concurrency = 0
M
MRXLT 已提交
152
        self.num_threads = 4
G
guru4elephant 已提交
153 154
        self.port = 8080
        self.reload_interval_s = 10
M
MRXLT 已提交
155
        self.max_body_size = 64 * 1024 * 1024
M
MRXLT 已提交
156 157
        self.module_path = os.path.dirname(paddle_serving_server.__file__)
        self.cur_path = os.getcwd()
M
MRXLT 已提交
158
        self.use_local_bin = False
M
MRXLT 已提交
159
        self.mkl_flag = False
H
HexToString 已提交
160
        self.encryption_model = False
W
wangjiawei04 已提交
161
        self.product_name = None
162
        self.container_id = None
B
barrierye 已提交
163
        self.model_config_paths = None  # for multi-model in a workflow
G
guru4elephant 已提交
164

B
fix cpu  
barriery 已提交
165 166 167 168
    def get_fetch_list(self):
        fetch_names = [var.alias_name for var in self.model_conf.fetch_var]
        return fetch_names

G
guru4elephant 已提交
169 170 171 172 173 174
    def set_max_concurrency(self, concurrency):
        self.max_concurrency = concurrency

    def set_num_threads(self, threads):
        self.num_threads = threads

M
MRXLT 已提交
175 176 177 178 179 180 181 182
    def set_max_body_size(self, body_size):
        if body_size >= self.max_body_size:
            self.max_body_size = body_size
        else:
            print(
                "max_body_size is less than default value, will use default value in service."
            )

G
guru4elephant 已提交
183 184 185 186 187
    def set_port(self, port):
        self.port = port

    def set_reload_interval(self, interval):
        self.reload_interval_s = interval
G
guru4elephant 已提交
188 189 190 191

    def set_op_sequence(self, op_seq):
        self.workflow_conf = op_seq

B
barrierye 已提交
192 193 194
    def set_op_graph(self, op_graph):
        self.workflow_conf = op_graph

195 196 197
    def set_memory_optimize(self, flag=False):
        self.memory_optimization = flag

M
MRXLT 已提交
198 199
    def set_ir_optimize(self, flag=False):
        self.ir_optimization = flag
W
wangjiawei04 已提交
200

H
HexToString 已提交
201 202
    def use_encryption_model(self, flag=False):
        self.encryption_model = flag
M
MRXLT 已提交
203

204 205 206 207 208 209 210 211 212 213
    def set_product_name(self, product_name=None):
        if product_name == None:
            raise ValueError("product_name can't be None.")
        self.product_name = product_name

    def set_container_id(self, container_id):
        if container_id == None:
            raise ValueError("container_id can't be None.")
        self.container_id = container_id

M
MRXLT 已提交
214 215 216 217
    def check_local_bin(self):
        if "SERVING_BIN" in os.environ:
            self.use_local_bin = True
            self.bin_path = os.environ["SERVING_BIN"]
M
MRXLT 已提交
218

B
barrierye 已提交
219
    def _prepare_engine(self, model_config_paths, device):
G
guru4elephant 已提交
220 221
        if self.model_toolkit_conf == None:
            self.model_toolkit_conf = server_sdk.ModelToolkitConf()
222

B
barrierye 已提交
223 224 225 226 227 228 229 230 231 232 233
        for engine_name, model_config_path in model_config_paths.items():
            engine = server_sdk.EngineDesc()
            engine.name = engine_name
            engine.reloadable_meta = model_config_path + "/fluid_time_file"
            os.system("touch {}".format(engine.reloadable_meta))
            engine.reloadable_type = "timestamp_ne"
            engine.runtime_thread_num = 0
            engine.batch_infer_size = 0
            engine.enable_batch_align = 0
            engine.model_data_path = model_config_path
            engine.enable_memory_optimization = self.memory_optimization
M
MRXLT 已提交
234
            engine.enable_ir_optimization = self.ir_optimization
B
barrierye 已提交
235 236
            engine.static_optimization = False
            engine.force_update_static_cache = False
W
wangjiawei04 已提交
237 238 239
            if os.path.exists('{}/__params__'.format(model_config_path)):
                suffix = ""
            else:
W
wangjiawei04 已提交
240
                suffix = "_DIR"
B
barrierye 已提交
241 242

            if device == "cpu":
W
wangjiawei04 已提交
243
                if self.encryption_model:
H
HexToString 已提交
244 245 246
                    engine.type = "FLUID_CPU_ANALYSIS_ENCRYPT"
                else:
                    engine.type = "FLUID_CPU_ANALYSIS" + suffix
B
barrierye 已提交
247
            elif device == "gpu":
W
wangjiawei04 已提交
248
                if self.encryption_model:
H
HexToString 已提交
249 250 251
                    engine.type = "FLUID_GPU_ANALYSIS_ENCRYPT"
                else:
                    engine.type = "FLUID_GPU_ANALYSIS" + suffix
B
barrierye 已提交
252 253

            self.model_toolkit_conf.engines.extend([engine])
G
guru4elephant 已提交
254 255 256 257 258 259 260 261 262 263

    def _prepare_infer_service(self, port):
        if self.infer_service_conf == None:
            self.infer_service_conf = server_sdk.InferServiceConf()
            self.infer_service_conf.port = port
            infer_service = server_sdk.InferService()
            infer_service.name = "GeneralModelService"
            infer_service.workflows.extend(["workflow1"])
            self.infer_service_conf.services.extend([infer_service])

M
MRXLT 已提交
264
    def _prepare_resource(self, workdir, cube_conf):
G
gongweibao 已提交
265
        self.workdir = workdir
G
guru4elephant 已提交
266
        if self.resource_conf == None:
M
MRXLT 已提交
267 268
            with open("{}/{}".format(workdir, self.general_model_config_fn),
                      "w") as fout:
269
                fout.write(str(self.model_conf))
G
guru4elephant 已提交
270
            self.resource_conf = server_sdk.ResourceConf()
W
wangjiawei04 已提交
271 272 273 274 275
            for workflow in self.workflow_conf.workflows:
                for node in workflow.nodes:
                    if "dist_kv" in node.name:
                        self.resource_conf.cube_config_path = workdir
                        self.resource_conf.cube_config_file = self.cube_config_fn
M
MRXLT 已提交
276 277 278 279 280
                        if cube_conf == None:
                            raise ValueError(
                                "Please set the path of cube.conf while use dist_kv op."
                            )
                        shutil.copy(cube_conf, workdir)
W
wangjiawei04 已提交
281 282
                        if "quant" in node.name:
                            self.resource_conf.cube_quant_bits = 8
G
guru4elephant 已提交
283
            self.resource_conf.model_toolkit_path = workdir
G
guru4elephant 已提交
284
            self.resource_conf.model_toolkit_file = self.model_toolkit_fn
285 286
            self.resource_conf.general_model_path = workdir
            self.resource_conf.general_model_file = self.general_model_config_fn
287 288 289 290
            if self.product_name != None:
                self.resource_conf.auth_product_name = self.product_name
            if self.container_id != None:
                self.resource_conf.auth_container_id = self.container_id
G
guru4elephant 已提交
291 292 293 294 295

    def _write_pb_str(self, filepath, pb_obj):
        with open(filepath, "w") as fout:
            fout.write(str(pb_obj))

B
barrierye 已提交
296
    def load_model_config(self, model_config_paths):
B
barrierye 已提交
297 298 299
        # At present, Serving needs to configure the model path in
        # the resource.prototxt file to determine the input and output
        # format of the workflow. To ensure that the input and output
B
barrierye 已提交
300
        # of multiple models are the same.
B
barrierye 已提交
301 302
        workflow_oi_config_path = None
        if isinstance(model_config_paths, str):
B
barrierye 已提交
303
            # If there is only one model path, use the default infer_op.
M
MRXLT 已提交
304
            # Because there are several infer_op type, we need to find
B
barrierye 已提交
305 306 307
            # it from workflow_conf.
            default_engine_names = [
                'general_infer_0', 'general_dist_kv_infer_0',
B
barrierye 已提交
308
                'general_dist_kv_quant_infer_0'
B
barrierye 已提交
309 310 311 312 313 314 315 316 317 318 319 320
            ]
            engine_name = None
            for node in self.workflow_conf.workflows[0].nodes:
                if node.name in default_engine_names:
                    engine_name = node.name
                    break
            if engine_name is None:
                raise Exception(
                    "You have set the engine_name of Op. Please use the form {op: model_path} to configure model path"
                )
            self.model_config_paths = {engine_name: model_config_paths}
            workflow_oi_config_path = self.model_config_paths[engine_name]
B
barrierye 已提交
321 322 323 324 325 326 327 328
        elif isinstance(model_config_paths, dict):
            self.model_config_paths = {}
            for node_str, path in model_config_paths.items():
                node = server_sdk.DAGNode()
                google.protobuf.text_format.Parse(node_str, node)
                self.model_config_paths[node.name] = path
            print("You have specified multiple model paths, please ensure "
                  "that the input and output of multiple models are the same.")
M
MRXLT 已提交
329 330
            workflow_oi_config_path = list(self.model_config_paths.items())[0][
                1]
B
barrierye 已提交
331 332 333 334 335
        else:
            raise Exception("The type of model_config_paths must be str or "
                            "dict({op: model_path}), not {}.".format(
                                type(model_config_paths)))

336
        self.model_conf = m_config.GeneralModelConfig()
B
barrierye 已提交
337 338 339
        f = open(
            "{}/serving_server_conf.prototxt".format(workflow_oi_config_path),
            'r')
340 341
        self.model_conf = google.protobuf.text_format.Merge(
            str(f.read()), self.model_conf)
G
guru4elephant 已提交
342
        # check config here
343
        # print config here
G
guru4elephant 已提交
344

M
MRXLT 已提交
345 346
    def use_mkl(self, flag):
        self.mkl_flag = flag
M
MRXLT 已提交
347

M
MRXLT 已提交
348 349
    def get_device_version(self):
        avx_flag = False
M
MRXLT 已提交
350
        mkl_flag = self.mkl_flag
M
MRXLT 已提交
351 352 353 354 355 356 357 358 359 360
        openblas_flag = False
        r = os.system("cat /proc/cpuinfo | grep avx > /dev/null 2>&1")
        if r == 0:
            avx_flag = True
        if avx_flag:
            if mkl_flag:
                device_version = "serving-cpu-avx-mkl-"
            else:
                device_version = "serving-cpu-avx-openblas-"
        else:
M
MRXLT 已提交
361 362 363 364
            if mkl_flag:
                print(
                    "Your CPU does not support AVX, server will running with noavx-openblas mode."
                )
M
MRXLT 已提交
365 366 367 368 369 370 371
            device_version = "serving-cpu-noavx-openblas-"
        return device_version

    def download_bin(self):
        os.chdir(self.module_path)
        need_download = False
        device_version = self.get_device_version()
G
gongweibao 已提交
372 373
        folder_name = device_version + serving_server_version
        tar_name = folder_name + ".tar.gz"
M
MRXLT 已提交
374
        bin_url = "https://paddle-serving.bj.bcebos.com/bin/" + tar_name
G
gongweibao 已提交
375
        self.server_path = os.path.join(self.module_path, folder_name)
376

M
MRXLT 已提交
377 378 379 380
        #acquire lock
        version_file = open("{}/version.py".format(self.module_path), "r")
        fcntl.flock(version_file, fcntl.LOCK_EX)

M
MRXLT 已提交
381 382 383 384 385 386
        if not os.path.exists(self.server_path):
            print('Frist time run, downloading PaddleServing components ...')
            r = os.system('wget ' + bin_url + ' --no-check-certificate')
            if r != 0:
                if os.path.exists(tar_name):
                    os.remove(tar_name)
M
MRXLT 已提交
387
                raise SystemExit(
T
TeslaZhao 已提交
388 389
                    'Download failed, please check your network or permission of {}.'
                    .format(self.module_path))
M
MRXLT 已提交
390 391 392 393 394 395 396 397 398
            else:
                try:
                    print('Decompressing files ..')
                    tar = tarfile.open(tar_name)
                    tar.extractall()
                    tar.close()
                except:
                    if os.path.exists(exe_path):
                        os.remove(exe_path)
M
MRXLT 已提交
399
                    raise SystemExit(
T
TeslaZhao 已提交
400 401
                        'Decompressing failed, please check your permission of {} or disk space left.'
                        .format(self.module_path))
M
MRXLT 已提交
402 403
                finally:
                    os.remove(tar_name)
M
MRXLT 已提交
404 405
        #release lock
        version_file.close()
M
MRXLT 已提交
406 407 408
        os.chdir(self.cur_path)
        self.bin_path = self.server_path + "/serving"

M
MRXLT 已提交
409 410 411 412 413
    def prepare_server(self,
                       workdir=None,
                       port=9292,
                       device="cpu",
                       cube_conf=None):
G
guru4elephant 已提交
414 415 416
        if workdir == None:
            workdir = "./tmp"
            os.system("mkdir {}".format(workdir))
G
guru4elephant 已提交
417 418
        else:
            os.system("mkdir {}".format(workdir))
G
guru4elephant 已提交
419
        os.system("touch {}/fluid_time_file".format(workdir))
G
guru4elephant 已提交
420

M
MRXLT 已提交
421
        if not self.port_is_available(port):
G
gongweibao 已提交
422
            raise SystemExit("Port {} is already used".format(port))
G
gongweibao 已提交
423
        self.set_port(port)
M
MRXLT 已提交
424
        self._prepare_resource(workdir, cube_conf)
B
barrierye 已提交
425
        self._prepare_engine(self.model_config_paths, device)
G
guru4elephant 已提交
426 427 428
        self._prepare_infer_service(port)
        self.workdir = workdir

G
guru4elephant 已提交
429 430 431 432
        infer_service_fn = "{}/{}".format(workdir, self.infer_service_fn)
        workflow_fn = "{}/{}".format(workdir, self.workflow_fn)
        resource_fn = "{}/{}".format(workdir, self.resource_fn)
        model_toolkit_fn = "{}/{}".format(workdir, self.model_toolkit_fn)
G
guru4elephant 已提交
433 434 435 436 437 438

        self._write_pb_str(infer_service_fn, self.infer_service_conf)
        self._write_pb_str(workflow_fn, self.workflow_conf)
        self._write_pb_str(resource_fn, self.resource_conf)
        self._write_pb_str(model_toolkit_fn, self.model_toolkit_conf)

M
MRXLT 已提交
439
    def port_is_available(self, port):
M
MRXLT 已提交
440 441
        with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
            sock.settimeout(2)
442
            result = sock.connect_ex(('0.0.0.0', port))
M
MRXLT 已提交
443 444 445 446 447
        if result != 0:
            return True
        else:
            return False

G
guru4elephant 已提交
448 449
    def run_server(self):
        # just run server with system command
G
guru4elephant 已提交
450
        # currently we do not load cube
M
MRXLT 已提交
451
        self.check_local_bin()
M
MRXLT 已提交
452 453
        if not self.use_local_bin:
            self.download_bin()
G
guru4elephant 已提交
454
        else:
M
MRXLT 已提交
455
            print("Use local bin : {}".format(self.bin_path))
M
MRXLT 已提交
456 457
        command = "{} " \
                  "-enable_model_toolkit " \
G
guru4elephant 已提交
458 459 460 461 462 463 464 465 466
                  "-inferservice_path {} " \
                  "-inferservice_file {} " \
                  "-max_concurrency {} " \
                  "-num_threads {} " \
                  "-port {} " \
                  "-reload_interval_s {} " \
                  "-resource_path {} " \
                  "-resource_file {} " \
                  "-workflow_path {} " \
M
MRXLT 已提交
467
                  "-workflow_file {} " \
M
MRXLT 已提交
468 469
                  "-bthread_concurrency {} " \
                  "-max_body_size {} ".format(
M
MRXLT 已提交
470
                      self.bin_path,
G
guru4elephant 已提交
471 472 473 474 475 476 477 478
                      self.workdir,
                      self.infer_service_fn,
                      self.max_concurrency,
                      self.num_threads,
                      self.port,
                      self.reload_interval_s,
                      self.workdir,
                      self.resource_fn,
M
MRXLT 已提交
479 480
                      self.workdir,
                      self.workflow_fn,
M
MRXLT 已提交
481 482
                      self.num_threads,
                      self.max_body_size)
W
wangjiawei04 已提交
483
        print("Going to Run Command")
G
guru4elephant 已提交
484
        print(command)
G
guru4elephant 已提交
485
        os.system(command)
B
barrierye 已提交
486 487


488 489 490
class MultiLangServerServiceServicer(multi_lang_general_model_service_pb2_grpc.
                                     MultiLangGeneralModelServiceServicer):
    def __init__(self, model_config_path, is_multi_model, endpoints):
B
barrierye 已提交
491
        self.is_multi_model_ = is_multi_model
492 493 494 495 496 497 498 499
        self.model_config_path_ = model_config_path
        self.endpoints_ = endpoints
        with open(self.model_config_path_) as f:
            self.model_config_str_ = str(f.read())
        self._parse_model_config(self.model_config_str_)
        self._init_bclient(self.model_config_path_, self.endpoints_)

    def _init_bclient(self, model_config_path, endpoints, timeout_ms=None):
B
barrierye 已提交
500 501
        from paddle_serving_client import Client
        self.bclient_ = Client()
B
barrierye 已提交
502 503
        if timeout_ms is not None:
            self.bclient_.set_rpc_timeout_ms(timeout_ms)
504
        self.bclient_.load_client_config(model_config_path)
B
barrierye 已提交
505 506
        self.bclient_.connect(endpoints)

507
    def _parse_model_config(self, model_config_str):
B
barrierye 已提交
508
        model_conf = m_config.GeneralModelConfig()
509 510
        model_conf = google.protobuf.text_format.Merge(model_config_str,
                                                       model_conf)
B
barrierye 已提交
511 512
        self.feed_names_ = [var.alias_name for var in model_conf.feed_var]
        self.feed_types_ = {}
B
barrierye 已提交
513
        self.feed_shapes_ = {}
B
barrierye 已提交
514
        self.fetch_names_ = [var.alias_name for var in model_conf.fetch_var]
B
barrierye 已提交
515 516
        self.fetch_types_ = {}
        self.lod_tensor_set_ = set()
B
barrierye 已提交
517 518 519
        for i, var in enumerate(model_conf.feed_var):
            self.feed_types_[var.alias_name] = var.feed_type
            self.feed_shapes_[var.alias_name] = var.shape
B
barrierye 已提交
520 521
            if var.is_lod_tensor:
                self.lod_tensor_set_.add(var.alias_name)
B
barrierye 已提交
522
        for i, var in enumerate(model_conf.fetch_var):
B
barrierye 已提交
523 524 525
            self.fetch_types_[var.alias_name] = var.fetch_type
            if var.is_lod_tensor:
                self.lod_tensor_set_.add(var.alias_name)
B
barrierye 已提交
526

B
barrierye 已提交
527 528 529 530 531 532 533 534
    def _flatten_list(self, nested_list):
        for item in nested_list:
            if isinstance(item, (list, tuple)):
                for sub_item in self._flatten_list(item):
                    yield sub_item
            else:
                yield item

535
    def _unpack_inference_request(self, request):
B
barrierye 已提交
536
        feed_names = list(request.feed_var_names)
B
barrierye 已提交
537
        fetch_names = list(request.fetch_var_names)
B
barrierye 已提交
538
        is_python = request.is_python
B
barriery 已提交
539
        log_id = request.log_id
540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555
        feed_batch = []
        for feed_inst in request.insts:
            feed_dict = {}
            for idx, name in enumerate(feed_names):
                var = feed_inst.tensor_array[idx]
                v_type = self.feed_types_[name]
                data = None
                if is_python:
                    if v_type == 0:  # int64
                        data = np.frombuffer(var.data, dtype="int64")
                    elif v_type == 1:  # float32
                        data = np.frombuffer(var.data, dtype="float32")
                    elif v_type == 2:  # int32
                        data = np.frombuffer(var.data, dtype="int32")
                    else:
                        raise Exception("error type.")
B
barrierye 已提交
556
                else:
557 558 559 560 561 562 563 564 565 566 567 568 569 570
                    if v_type == 0:  # int64
                        data = np.array(list(var.int64_data), dtype="int64")
                    elif v_type == 1:  # float32
                        data = np.array(list(var.float_data), dtype="float32")
                    elif v_type == 2:  # int32
                        data = np.array(list(var.int_data), dtype="int32")
                    else:
                        raise Exception("error type.")
                data.shape = list(feed_inst.tensor_array[idx].shape)
                feed_dict[name] = data
                if len(var.lod) > 0:
                    feed_dict["{}.lod".format(name)] = var.lod
            feed_batch.append(feed_dict)
        return feed_batch, fetch_names, is_python, log_id
B
barrierye 已提交
571

572 573
    def _pack_inference_response(self, ret, fetch_names, is_python):
        resp = multi_lang_general_model_service_pb2.InferenceResponse()
B
fix bug  
barrierye 已提交
574
        if ret is None:
575
            resp.err_code = 1
B
fix bug  
barrierye 已提交
576 577
            return resp
        results, tag = ret
B
barrierye 已提交
578
        resp.tag = tag
579
        resp.err_code = 0
B
barrierye 已提交
580 581 582 583 584 585 586 587 588 589
        if not self.is_multi_model_:
            results = {'general_infer_0': results}
        for model_name, model_result in results.items():
            model_output = multi_lang_general_model_service_pb2.ModelOutput()
            inst = multi_lang_general_model_service_pb2.FetchInst()
            for idx, name in enumerate(fetch_names):
                tensor = multi_lang_general_model_service_pb2.Tensor()
                v_type = self.fetch_types_[name]
                if is_python:
                    tensor.data = model_result[name].tobytes()
B
barrierye 已提交
590
                else:
B
barrierye 已提交
591 592 593 594 595 596
                    if v_type == 0:  # int64
                        tensor.int64_data.extend(model_result[name].reshape(-1)
                                                 .tolist())
                    elif v_type == 1:  # float32
                        tensor.float_data.extend(model_result[name].reshape(-1)
                                                 .tolist())
B
barrierye 已提交
597
                    elif v_type == 2:  # int32
598 599
                        tensor.int_data.extend(model_result[name].reshape(-1)
                                               .tolist())
B
barrierye 已提交
600 601 602
                    else:
                        raise Exception("error type.")
                tensor.shape.extend(list(model_result[name].shape))
M
MRXLT 已提交
603
                if "{}.lod".format(name) in model_result:
B
barrierye 已提交
604 605 606 607 608 609
                    tensor.lod.extend(model_result["{}.lod".format(name)]
                                      .tolist())
                inst.tensor_array.append(tensor)
            model_output.insts.append(inst)
            model_output.engine_name = model_name
            resp.outputs.append(model_output)
B
barrierye 已提交
610 611
        return resp

612 613 614 615 616 617 618
    def SetTimeout(self, request, context):
        # This porcess and Inference process cannot be operate at the same time.
        # For performance reasons, do not add thread lock temporarily.
        timeout_ms = request.timeout_ms
        self._init_bclient(self.model_config_path_, self.endpoints_, timeout_ms)
        resp = multi_lang_general_model_service_pb2.SimpleResponse()
        resp.err_code = 0
B
barrierye 已提交
619 620
        return resp

621
    def Inference(self, request, context):
622
        feed_batch, fetch_names, is_python, log_id = \
B
barriery 已提交
623
                self._unpack_inference_request(request)
B
fix bug  
barrierye 已提交
624
        ret = self.bclient_.predict(
625
            feed=feed_batch,
626
            fetch=fetch_names,
W
wangjiawei04 已提交
627
            batch=True,
628 629
            need_variant_tag=True,
            log_id=log_id)
630 631 632 633 634 635
        return self._pack_inference_response(ret, fetch_names, is_python)

    def GetClientConfig(self, request, context):
        resp = multi_lang_general_model_service_pb2.GetClientConfigResponse()
        resp.client_config_str = self.model_config_str_
        return resp
B
barrierye 已提交
636 637


638
class MultiLangServer(object):
B
barrierye 已提交
639
    def __init__(self):
B
barrierye 已提交
640
        self.bserver_ = Server()
B
barrierye 已提交
641
        self.worker_num_ = 4
642 643
        self.body_size_ = 64 * 1024 * 1024
        self.concurrency_ = 100000
B
barrierye 已提交
644 645
        self.is_multi_model_ = False  # for model ensemble

B
barrierye 已提交
646
    def set_max_concurrency(self, concurrency):
B
barrierye 已提交
647
        self.concurrency_ = concurrency
B
barrierye 已提交
648 649 650
        self.bserver_.set_max_concurrency(concurrency)

    def set_num_threads(self, threads):
B
barrierye 已提交
651
        self.worker_num_ = threads
B
barrierye 已提交
652 653 654 655
        self.bserver_.set_num_threads(threads)

    def set_max_body_size(self, body_size):
        self.bserver_.set_max_body_size(body_size)
B
barrierye 已提交
656 657 658 659 660 661
        if body_size >= self.body_size_:
            self.body_size_ = body_size
        else:
            print(
                "max_body_size is less than default value, will use default value in service."
            )
B
barrierye 已提交
662

663 664 665
    def use_encryption_model(self, flag=False):
        self.encryption_model = flag

B
barrierye 已提交
666 667 668 669 670
    def set_port(self, port):
        self.gport_ = port

    def set_reload_interval(self, interval):
        self.bserver_.set_reload_interval(interval)
B
barrierye 已提交
671 672 673 674

    def set_op_sequence(self, op_seq):
        self.bserver_.set_op_sequence(op_seq)

B
barrierye 已提交
675 676 677 678 679 680 681 682 683 684 685
    def set_op_graph(self, op_graph):
        self.bserver_.set_op_graph(op_graph)

    def set_memory_optimize(self, flag=False):
        self.bserver_.set_memory_optimize(flag)

    def set_ir_optimize(self, flag=False):
        self.bserver_.set_ir_optimize(flag)

    def set_op_sequence(self, op_seq):
        self.bserver_.set_op_sequence(op_seq)
B
barrierye 已提交
686

B
barrierye 已提交
687 688 689
    def use_mkl(self, flag):
        self.bserver_.use_mkl(flag)

690 691 692 693 694 695 696 697 698 699 700
    def load_model_config(self, server_config_paths, client_config_path=None):
        self.bserver_.load_model_config(server_config_paths)
        if client_config_path is None:
            if isinstance(server_config_paths, dict):
                self.is_multi_model_ = True
                client_config_path = '{}/serving_server_conf.prototxt'.format(
                    list(server_config_paths.items())[0][1])
            else:
                client_config_path = '{}/serving_server_conf.prototxt'.format(
                    server_config_paths)
        self.bclient_config_path_ = client_config_path
B
barrierye 已提交
701

M
MRXLT 已提交
702 703 704 705 706
    def prepare_server(self,
                       workdir=None,
                       port=9292,
                       device="cpu",
                       cube_conf=None):
B
barrierye 已提交
707 708
        if not self._port_is_available(port):
            raise SystemExit("Prot {} is already used".format(port))
B
barrierye 已提交
709 710 711
        default_port = 12000
        self.port_list_ = []
        for i in range(1000):
B
barrierye 已提交
712 713
            if default_port + i != port and self._port_is_available(default_port
                                                                    + i):
B
barrierye 已提交
714 715 716
                self.port_list_.append(default_port + i)
                break
        self.bserver_.prepare_server(
M
MRXLT 已提交
717 718 719 720
            workdir=workdir,
            port=self.port_list_[0],
            device=device,
            cube_conf=cube_conf)
B
barrierye 已提交
721
        self.set_port(port)
B
barrierye 已提交
722 723 724 725 726 727 728 729 730 731 732 733

    def _launch_brpc_service(self, bserver):
        bserver.run_server()

    def _port_is_available(self, port):
        with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
            sock.settimeout(2)
            result = sock.connect_ex(('0.0.0.0', port))
        return result != 0

    def run_server(self):
        p_bserver = Process(
B
barrierye 已提交
734
            target=self._launch_brpc_service, args=(self.bserver_, ))
B
barrierye 已提交
735
        p_bserver.start()
B
barrierye 已提交
736 737
        options = [('grpc.max_send_message_length', self.body_size_),
                   ('grpc.max_receive_message_length', self.body_size_)]
B
barrierye 已提交
738
        server = grpc.server(
B
barrierye 已提交
739 740 741
            futures.ThreadPoolExecutor(max_workers=self.worker_num_),
            options=options,
            maximum_concurrent_rpcs=self.concurrency_)
742
        multi_lang_general_model_service_pb2_grpc.add_MultiLangGeneralModelServiceServicer_to_server(
743 744 745
            MultiLangServerServiceServicer(
                self.bclient_config_path_, self.is_multi_model_,
                ["0.0.0.0:{}".format(self.port_list_[0])]), server)
B
barrierye 已提交
746 747
        server.add_insecure_port('[::]:{}'.format(self.gport_))
        server.start()
B
barrierye 已提交
748 749
        p_bserver.join()
        server.wait_for_termination()