operator.py 83.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
B
barriery 已提交
15
from time import time as _time
B
barriery 已提交
16
import time
17 18
import threading
import multiprocessing
H
HexToString 已提交
19
from paddle_serving_client import Client
20 21 22
from concurrent import futures
import logging
import func_timeout
23
import os
B
barrierye 已提交
24
import sys
25
import collections
B
barrierye 已提交
26
import numpy as np
T
TeslaZhao 已提交
27
import json
B
barrierye 已提交
28
from numpy import *
29
from io import BytesIO
B
barrierye 已提交
30 31 32 33 34 35
if sys.version_info.major == 2:
    import Queue
elif sys.version_info.major == 3:
    import queue as Queue
else:
    raise Exception("Error Python version")
36

37 38 39
from .error_catch import ErrorCatch, CustomException, CustomExceptionCode, ParamChecker, ParamVerify
check_feed_dict=ParamVerify.check_feed_dict
check_fetch_list=ParamVerify.check_fetch_list
B
barrierye 已提交
40
from .proto import pipeline_service_pb2
41 42 43 44
from .channel import (ThreadChannel, ProcessChannel,ChannelData, 
                      ChannelDataType, ChannelStopError, ChannelTimeoutError)
from .error_catch import  ProductErrCode
from .error_catch import CustomExceptionCode as ChannelDataErrcode
B
barrierye 已提交
45
from .util import NameGenerator
B
barriery 已提交
46
from .profiler import UnsafeTimeProfiler as TimeProfiler
W
wangjiawei04 已提交
47
from . import local_service_handler
48
from .pipeline_client import PipelineClient as PPClient
H
huangjianhui 已提交
49
from paddle_serving_server.util import kill_stop_process_by_pid
50

51
_LOGGER = logging.getLogger(__name__)
B
barrierye 已提交
52 53
_op_name_gen = NameGenerator("Op")

54 55 56 57 58 59 60 61 62 63 64 65 66 67
# data type of tensor to numpy_data
_TENSOR_DTYPE_2_NUMPY_DATA_DTYPE = {
    0: "int64",  # VarType.INT64
    1: "float32",  # VarType.FP32
    2: "int32",  # VarType.INT32
    3: "float64",  # VarType.FP64
    4: "int16",  # VarType.int16
    5: "float16",  # VarType.FP32
    6: "uint16",  # VarType.BF16
    7: "uint8",  # VarType.UINT8
    8: "int8",  # VarType.INT8
    9: "bool",  # VarType.BOOL
    10: "complex64",  # VarType.COMPLEX64
    11: "complex128",  # VarType.COMPLEX128
68 69
    12: "string",  # load by numpy
    13: "bytes",  # load by numpy
70 71
}

D
dongdaxiang 已提交
72 73 74

class Op(object):
    def __init__(self,
B
barrierye 已提交
75
                 name=None,
D
dongdaxiang 已提交
76
                 input_ops=[],
B
barriery 已提交
77 78
                 server_endpoints=None,
                 fetch_list=None,
B
barrierye 已提交
79
                 client_config=None,
W
wangjiawei04 已提交
80
                 client_type=None,
B
barriery 已提交
81 82
                 concurrency=None,
                 timeout=None,
T
TeslaZhao 已提交
83
                 retry=0,
B
barriery 已提交
84
                 batch_size=None,
85
                 auto_batching_timeout=None,
86 87
                 local_service_handler=None,
                 jump_to_ops=[]):
B
barriery 已提交
88
        # In __init__, all the parameters are just saved and Op is not initialized
B
barrierye 已提交
89
        if name is None:
B
barrierye 已提交
90
            name = _op_name_gen.next()
91
        self.name = name  # to identify the type of OP, it must be globally unique
B
barrierye 已提交
92
        self.concurrency = concurrency  # amount of concurrency
B
barrierye 已提交
93
        self.set_input_ops(input_ops)
94
        self.set_jump_to_ops(jump_to_ops)
B
barrierye 已提交
95

W
wangjiawei04 已提交
96
        self._local_service_handler = local_service_handler
B
barriery 已提交
97
        self._server_endpoints = server_endpoints
B
barrierye 已提交
98
        self._fetch_names = fetch_list
B
barriery 已提交
99
        self._client_config = client_config
W
wangjiawei04 已提交
100
        self.client_type = client_type
B
barriery 已提交
101
        self._timeout = timeout
102
        self._retry = max(1, retry)
B
barriery 已提交
103 104
        self._batch_size = batch_size
        self._auto_batching_timeout = auto_batching_timeout
F
felixhjh 已提交
105 106
        self._use_encryption_model = None
        self._encryption_key = ""
B
barriery 已提交
107

108 109
        self._input = None
        self._outputs = []
B
barrierye 已提交
110

B
barriery 已提交
111 112 113
        self._server_use_profile = False
        self._tracer = None

114 115 116
        # for grpc_pipeline predict mode. False, string key/val; True, tensor format.
        self._pack_tensor_format = False

B
barriery 已提交
117 118 119 120 121
        # only for thread op
        self._for_init_op_lock = threading.Lock()
        self._for_close_op_lock = threading.Lock()
        self._succ_init_op = False
        self._succ_close_op = False
F
felixhjh 已提交
122 123 124 125
        self.dynamic_shape_info = {} 
        self.set_dynamic_shape_info()
    
    def set_dynamic_shape_info(self):
F
felixhjh 已提交
126 127 128 129 130
        """
        when opening tensorrt(configure in config.yml) and each time the input shape
        for inferring is different, using this method for configuring tensorrt
        dynamic shape to infer in each op model
        """
F
felixhjh 已提交
131
        pass
B
barriery 已提交
132

133 134 135 136 137 138 139 140 141 142 143 144 145
    # for feed/fetch dict cehck
    @staticmethod
    def get_feed_fetch_list(client):
        from paddle_serving_app.local_predict import LocalPredictor
        if isinstance(client, Client):
            feed_names = client.get_feed_names()
            fetch_names = client.get_fetch_names()
        if isinstance(client, LocalPredictor):
            feed_names = client.feed_names_
            fetch_names = client.fetch_names_
        return feed_names, fetch_names
              

B
barriery 已提交
146
    def init_from_dict(self, conf):
147 148 149 150 151 152 153 154 155 156 157
        """
        Initializing one Op from config.yaml. If server_endpoints exist,
        which is remote RPC mode, otherwise it is local RPC mode. There
        are three types of predictios in local RPC mode, brpc, grpc and
        local_predictor.

        Args:
            conf: config.yaml

        Returns:
        """
B
barriery 已提交
158 159 160 161 162 163 164 165
        if self.concurrency is None:
            self.concurrency = conf["concurrency"]
        if self._retry is None:
            self._retry = conf["retry"]
        if self._fetch_names is None:
            self._fetch_names = conf.get("fetch_list")
        if self._client_config is None:
            self._client_config = conf.get("client_config")
F
felixhjh 已提交
166 167 168 169 170
        if self._use_encryption_model is None:
            print ("config use_encryption model here", conf.get("use_encryption_model"))
            self._use_encryption_model = conf.get("use_encryption_model")
            if self._encryption_key is None or self._encryption_key=="":
                self._encryption_key = conf.get("encryption_key")
B
barriery 已提交
171 172 173 174 175 176 177 178 179 180 181 182
        if self._timeout is None:
            self._timeout = conf["timeout"]
        if self._timeout > 0:
            self._timeout = self._timeout / 1000.0
        else:
            self._timeout = -1

        if self._batch_size is None:
            self._batch_size = conf["batch_size"]
        if self._auto_batching_timeout is None:
            self._auto_batching_timeout = conf["auto_batching_timeout"]
        if self._auto_batching_timeout <= 0 or self._batch_size == 1:
183
            _LOGGER.debug(
B
barriery 已提交
184 185 186 187 188 189 190
                self._log(
                    "Because auto_batching_timeout <= 0 or batch_size == 1,"
                    " set auto_batching_timeout to None."))
            self._auto_batching_timeout = None
        else:
            self._auto_batching_timeout = self._auto_batching_timeout / 1000.0

191 192 193
        self.model_config = None
        self.workdir = None
        self.thread_num = self.concurrency
194
        self.device_type = -1
195 196 197
        self.devices = ""
        self.mem_optim = False
        self.ir_optim = False
198
        self.precision = "fp32"
T
TeslaZhao 已提交
199 200 201 202
        self.use_mkldnn = False
        self.mkldnn_cache_capacity = 0
        self.mkldnn_op_list = None
        self.mkldnn_bf16_op_list = None
F
felixhjh 已提交
203
        self.min_subgraph_size = 3
204
        self.use_calib = False
T
TeslaZhao 已提交
205

B
barriery 已提交
206 207 208 209 210 211
        if self._server_endpoints is None:
            server_endpoints = conf.get("server_endpoints", [])
            if len(server_endpoints) != 0:
                # remote service
                self.with_serving = True
                self._server_endpoints = server_endpoints
212
                self.client_type = conf["client_type"]
213
            else:
W
wangjiawei04 已提交
214
                if self._local_service_handler is None:
B
barriery 已提交
215
                    local_service_conf = conf.get("local_service_conf")
B
barriery 已提交
216 217
                    _LOGGER.info("local_service_conf: {}".format(
                        local_service_conf))
218
                    self.model_config = local_service_conf.get("model_config")
W
wangjiawei04 已提交
219
                    self.client_type = local_service_conf.get("client_type")
220 221
                    self.workdir = local_service_conf.get("workdir")
                    self.thread_num = local_service_conf.get("thread_num")
222
                    self.device_type = local_service_conf.get("device_type")
223 224 225 226
                    self.devices = local_service_conf.get("devices")
                    self.mem_optim = local_service_conf.get("mem_optim")
                    self.ir_optim = local_service_conf.get("ir_optim")
                    self._fetch_names = local_service_conf.get("fetch_list")
227
                    self.precision = local_service_conf.get("precision")
228
                    self.use_calib = local_service_conf.get("use_calib")
T
TeslaZhao 已提交
229 230 231 232 233 234 235
                    self.use_mkldnn = local_service_conf.get("use_mkldnn")
                    self.mkldnn_cache_capacity = local_service_conf.get(
                        "mkldnn_cache_capacity")
                    self.mkldnn_op_list = local_service_conf.get(
                        "mkldnn_op_list")
                    self.mkldnn_bf16_op_list = local_service_conf.get(
                        "mkldnn_bf16_op_list")
F
felixhjh 已提交
236 237
                    self.min_subgraph_size = local_service_conf.get(
                        "min_subgraph_size")
T
TeslaZhao 已提交
238

239
                    if self.model_config is None:
B
barriery 已提交
240 241 242 243
                        self.with_serving = False
                    else:
                        # local rpc service
                        self.with_serving = True
W
wangjiawei04 已提交
244 245
                        if self.client_type == "brpc" or self.client_type == "grpc":
                            service_handler = local_service_handler.LocalServiceHandler(
246
                                model_config=self.model_config,
W
wangjiawei04 已提交
247
                                client_type=self.client_type,
248 249
                                workdir=self.workdir,
                                thread_num=self.thread_num,
250
                                device_type=self.device_type,
251 252
                                devices=self.devices,
                                mem_optim=self.mem_optim,
253
                                ir_optim=self.ir_optim,
T
TeslaZhao 已提交
254 255 256 257 258
                                precision=self.precision,
                                use_mkldnn=self.use_mkldnn,
                                mkldnn_cache_capacity=self.
                                mkldnn_cache_capacity,
                                mkldnn_op_list=self.mkldnn_bf16_op_list,
F
felixhjh 已提交
259 260
                                mkldnn_bf16_op_list=self.mkldnn_bf16_op_list,
                                min_subgraph_size=self.min_subgraph_size,
261 262
                                dynamic_shape_info=self.dynamic_shape_info,
                                use_calib=self.use_calib)
W
wangjiawei04 已提交
263 264 265 266 267 268 269 270 271 272 273 274
                            service_handler.prepare_server()  # get fetch_list
                            serivce_ports = service_handler.get_port_list()
                            self._server_endpoints = [
                                "127.0.0.1:{}".format(p) for p in serivce_ports
                            ]
                            if self._client_config is None:
                                self._client_config = service_handler.get_client_config(
                                )
                            if self._fetch_names is None:
                                self._fetch_names = service_handler.get_fetch_list(
                                )
                        elif self.client_type == "local_predictor":
W
wangjiawei04 已提交
275
                            service_handler = local_service_handler.LocalServiceHandler(
276
                                model_config=self.model_config,
W
wangjiawei04 已提交
277
                                client_type=self.client_type,
278 279
                                workdir=self.workdir,
                                thread_num=self.thread_num,
280
                                device_type=self.device_type,
281
                                devices=self.devices,
282 283
                                fetch_names=self._fetch_names,
                                mem_optim=self.mem_optim,
284
                                ir_optim=self.ir_optim,
T
TeslaZhao 已提交
285 286 287 288 289
                                precision=self.precision,
                                use_mkldnn=self.use_mkldnn,
                                mkldnn_cache_capacity=self.
                                mkldnn_cache_capacity,
                                mkldnn_op_list=self.mkldnn_op_list,
F
felixhjh 已提交
290 291
                                mkldnn_bf16_op_list=self.mkldnn_bf16_op_list,
                                min_subgraph_size=self.min_subgraph_size,
292 293
                                dynamic_shape_info=self.dynamic_shape_info,
                                use_calib=self.use_calib)
W
wangjiawei04 已提交
294 295 296 297
                            if self._client_config is None:
                                self._client_config = service_handler.get_client_config(
                                )
                        self._local_service_handler = service_handler
B
barriery 已提交
298
                else:
B
barriery 已提交
299
                    self.with_serving = True
W
wangjiawei04 已提交
300
                    self._local_service_handler.prepare_server(
B
barriery 已提交
301
                    )  # get fetch_list
W
wangjiawei04 已提交
302
                    serivce_ports = self._local_service_handler.get_port_list()
B
barriery 已提交
303 304 305
                    self._server_endpoints = [
                        "127.0.0.1:{}".format(p) for p in serivce_ports
                    ]
B
barriery 已提交
306
                    if self._client_config is None:
W
wangjiawei04 已提交
307
                        self._client_config = self._local_service_handler.get_client_config(
B
barriery 已提交
308
                        )
B
barriery 已提交
309
                    if self._fetch_names is None:
W
wangjiawei04 已提交
310
                        self._fetch_names = self._local_service_handler.get_fetch_list(
B
barriery 已提交
311
                        )
B
barriery 已提交
312 313
        else:
            self.with_serving = True
B
barriery 已提交
314

315 316 317 318 319 320 321 322 323 324 325
        if not isinstance(self, RequestOp) and not isinstance(self, ResponseOp):
            _LOGGER.info(
                self._log("\n\tinput_ops: {},"
                          "\n\tserver_endpoints: {}"
                          "\n\tfetch_list: {}"
                          "\n\tclient_config: {}"
                          "\n\tconcurrency: {},"
                          "\n\ttimeout(s): {},"
                          "\n\tretry: {},"
                          "\n\tbatch_size: {},"
                          "\n\tauto_batching_timeout(s): {}".format(
B
barriery 已提交
326
                              ", ".join([op.name for op in self._input_ops
327 328 329 330
                                         ]), self._server_endpoints,
                              self._fetch_names, self._client_config,
                              self.concurrency, self._timeout, self._retry,
                              self._batch_size, self._auto_batching_timeout)))
B
barriery 已提交
331

332
    def launch_local_rpc_service(self):
333 334 335 336 337 338 339 340 341
        """
        Launching multiple local rpc servers.

        Args:
            None

        Returns:
            None
        """
W
wangjiawei04 已提交
342
        if self._local_service_handler is None:
B
barriery 已提交
343 344
            _LOGGER.warning(
                self._log("Failed to launch local rpc"
W
wangjiawei04 已提交
345
                          " service: local_service_handler is None."))
B
barriery 已提交
346
            return
W
wangjiawei04 已提交
347
        port = self._local_service_handler.get_port_list()
W
wangjiawei04 已提交
348 349 350
        #if self._local_service_handler.client_type == "local_predictor":
        #    _LOGGER.info("Op({}) use local predictor.")
        #    return
W
wangjiawei04 已提交
351
        self._local_service_handler.start_server()
B
barriery 已提交
352
        _LOGGER.info("Op({}) use local rpc service at port: {}"
353 354
                     .format(self.name, port))

B
barriery 已提交
355
    def use_default_auto_batching_config(self):
356 357 358 359 360 361 362 363 364
        """
        Set the auto batching config default.

        Args:
            None

        Returns:
            None
        """
B
bug fix  
barriery 已提交
365
        if self._batch_size != 1:
366 367
            _LOGGER.warning("Op({}) reset batch_size=1 (original: {})"
                            .format(self.name, self._batch_size))
B
bug fix  
barriery 已提交
368 369
            self._batch_size = 1
        if self._auto_batching_timeout != None:
370
            _LOGGER.warning(
B
barriery 已提交
371 372
                "Op({}) reset auto_batching_timeout=None (original: {})"
                .format(self.name, self._auto_batching_timeout))
B
bug fix  
barriery 已提交
373
            self._auto_batching_timeout = None
B
barriery 已提交
374

B
barrierye 已提交
375
    def use_profiler(self, use_profile):
B
barrierye 已提交
376
        self._server_use_profile = use_profile
377

B
barriery 已提交
378 379 380
    def set_tracer(self, tracer):
        self._tracer = tracer

B
bjjwwang 已提交
381 382 383
    def set_use_prometheus(self, use_prometheus):
        self._use_prometheus = use_prometheus

W
wangjiawei04 已提交
384
    def init_client(self, client_config, server_endpoints):
385 386 387 388 389 390 391 392 393 394 395 396
        """
        Initialize the client object. There are three types of clients, brpc,
        grpc and local_predictor. In grpc or brpc mode, the client connects 
        endpoints.

        Args:
            client_config: client config info
            server_endpoints: server IP/Port list.

        Returns:
            client: client object.
        """
397
        if self.with_serving == False:
B
barriery 已提交
398
            _LOGGER.info("Op({}) has no client (and it also do not "
399
                         "run the process function)".format(self.name))
B
barrierye 已提交
400
            return None
W
wangjiawei04 已提交
401
        if self.client_type == 'brpc':
B
barrierye 已提交
402 403
            client = Client()
            client.load_client_config(client_config)
404
            self.right_feed_names, self.right_fetch_names = self.get_feed_fetch_list(client) 
405 406
        elif self.client_type == 'pipeline_grpc':
            client = PPClient()
W
wangjiawei04 已提交
407 408 409 410
        elif self.client_type == 'local_predictor':
            if self.local_predictor is None:
                raise ValueError("local predictor not yet created")
            client = self.local_predictor
411
            self.right_feed_names, self.right_fetch_names = self.get_feed_fetch_list(client)
412
        else:
B
barriery 已提交
413
            raise ValueError("Failed to init client: unknow client "
W
wangjiawei04 已提交
414
                             "type {}".format(self.client_type))
W
wangjiawei04 已提交
415 416 417
        if self._fetch_names is None:
            self._fetch_names = client.fetch_names_
            _LOGGER.info("Op({}) has no fetch name set. So fetch all vars")
W
wangjiawei04 已提交
418
        if self.client_type != "local_predictor":
F
felixhjh 已提交
419 420 421 422 423 424
            if self._use_encryption_model is None or self._use_encryption_model is False:
               client.connect(server_endpoints)
            else:
               print("connect to encryption rpc client")
               client.use_key(self._encryption_key)
               client.connect(server_endpoints, encryption=True)
425
        _LOGGER.info("init_client, feed_list:{}, fetch_list: {}".format(self.right_feed_names, self.right_fetch_names))
B
barrierye 已提交
426
        return client
427 428 429 430 431

    def get_input_ops(self):
        return self._input_ops

    def set_input_ops(self, ops):
432 433 434 435 436 437 438 439 440 441
        """
        Set input ops.Each op have many input ops, but only one input
        channel.

        Args:
            ops: op list

        Returns:
            None.
        """
442 443 444 445 446
        if not isinstance(ops, list):
            ops = [] if ops is None else [ops]
        self._input_ops = []
        for op in ops:
            if not isinstance(op, Op):
447
                _LOGGER.critical(
B
barriery 已提交
448 449
                    self._log("Failed to set input_ops: input op "
                              "must be Op type, not {}".format(type(op))))
450
                os._exit(-1)
451
            self._input_ops.append(op)
D
dongdaxiang 已提交
452

453 454 455
    def set_pack_tensor_format(self, is_tensor_format=False):
        self._pack_tensor_format = is_tensor_format

456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
    def get_jump_to_ops(self):
        return self._jump_to_ops

    def set_jump_to_ops(self, ops):
        """
        Set jump to ops, then, this op can send channeldata to output channel.

        Args:
            ops: op list to be jumpped

        Returns:
            None.
        """
        if not isinstance(ops, list):
            ops = [] if ops is None else [ops]

        self._jump_to_ops = []
        for op in ops:
            if not isinstance(op, Op):
                _LOGGER.critical(
                    self._log("Failed to set input_ops: input op "
                              "must be Op type, not {}".format(type(op))))
                os._exit(-1)
            self._jump_to_ops.append(op)

    def is_jump_op(self):
        """
        The op has _jump_to_ops members or not.

        Args:
            None

        Returns:
            True or False
        """
        return len(self._jump_to_ops) > 0

    def check_jumping(self, input_data):
        """
        Check whether to send data to jump ops.WhileOp needs to rewrite 
        this interface. this function returns False default.
     
        Args:
            input_data: input data to be preprocessed

        Returns:
            True, send data to the output channel of jump ops
            False, send data to output channel.
        """
        return False

    def get_output_channels_of_jump_ops(self):
        """
        Get output channels of jump ops

        Args:
            None

        Returns:
            list of channels
        """
        channels = []
        if self.is_jump_op() is False:
            return channels
        for op in self._jump_to_ops:
            _LOGGER.info("op:{} extend op._get_output_channels:{}".format(
                op.name, op._get_output_channels()))
            channels.extend(op._get_output_channels())

        _LOGGER.info("get_output_channels_of_jump_ops, channels:{}".format(
            channels))
        return channels

529
    def add_input_channel(self, channel):
530 531 532 533
        """
        Adding one input channel to the Op. Each op have many front op,
        but, only one input channel.
        """
534
        if not isinstance(channel, (ThreadChannel, ProcessChannel)):
535
            _LOGGER.critical(
B
barriery 已提交
536 537 538
                self._log("Failed to set input_channel: input "
                          "channel must be Channel type, not {}".format(
                              type(channel))))
539
            os._exit(-1)
540 541
        channel.add_consumer(self.name)
        self._input = channel
D
dongdaxiang 已提交
542

543
    def clean_input_channel(self):
B
barrierye 已提交
544 545 546 547
        self._input = None

    def _get_input_channel(self):
        return self._input
D
dongdaxiang 已提交
548

549
    def add_output_channel(self, channel):
550 551 552 553 554 555 556 557 558 559
        """
        Adding one output channel to the Op. Each op have many output channels,
        But only one front channel.

        Args:
            channel: an output channel object.

        Returns:
            None
        """
560
        if not isinstance(channel, (ThreadChannel, ProcessChannel)):
561
            _LOGGER.critical(
B
barriery 已提交
562 563
                self._log("Failed to add output_channel: output channel "
                          "must be Channel type, not {}".format(type(channel))))
564
            os._exit(-1)
565 566
        channel.add_producer(self.name)
        self._outputs.append(channel)
567
        _LOGGER.debug("op:{} add output_channel {}".format(self.name, channel))
D
dongdaxiang 已提交
568

569
    def clean_output_channels(self):
B
barrierye 已提交
570 571 572 573 574
        self._outputs = []

    def _get_output_channels(self):
        return self._outputs

575
    def preprocess(self, input_dicts, data_id=0, log_id=0):
T
TeslaZhao 已提交
576 577 578 579 580 581
        """
        In preprocess stage, assembling data for process stage. users can 
        override this function for model feed features.

        Args:
            input_dicts: input data to be preprocessed
582
            data_id: inner unique id, increase auto
583
            log_id: global unique id for RTT, 0 default
T
TeslaZhao 已提交
584 585

        Return:
T
TeslaZhao 已提交
586
            output_data: data for process stage
T
TeslaZhao 已提交
587 588 589 590 591
            is_skip_process: skip process stage or not, False default
            prod_errcode: None default, otherwise, product errores occured.
                          It is handled in the same way as exception. 
            prod_errinfo: "" default
        """
B
barrierye 已提交
592
        # multiple previous Op
B
barrierye 已提交
593
        if len(input_dicts) != 1:
594 595
            _LOGGER.critical(
                self._log(
B
barriery 已提交
596 597
                    "Failed to run preprocess: this Op has multiple previous "
                    "inputs. Please override this func."))
598
            os._exit(-1)
D
dongdaxiang 已提交
599

B
barrierye 已提交
600
        (_, input_dict), = input_dicts.items()
T
TeslaZhao 已提交
601
        return input_dict, False, None, ""
602
    
603
    def process(self, feed_batch, typical_logid=0):
T
TeslaZhao 已提交
604 605 606 607 608
        """
        In process stage, send requests to the inference server or predict locally.
        users do not need to inherit this function
        Args:
            feed_batch: data to be fed to inference server
609 610
            typical_logid: mark batch predicts, usually the first logid in batch,
                0 default.
T
TeslaZhao 已提交
611 612 613 614

        Returns:
            call_result: predict result
        """
615 616 617 618

        call_result = None
        err_code = ChannelDataErrcode.OK.value
        err_info = ""
619 620 621 622 623 624 625 626 627 628 629 630 631
        @ErrorCatch 
        @ParamChecker
        def feed_fetch_list_check_helper(feed_batch : lambda feed_batch: check_feed_dict(feed_batch[0], self.right_feed_names),
                                         fetch_list : lambda fetch_list: check_fetch_list(fetch_list, self.right_fetch_names),
                                         log_id):
            return None
        _, resp = feed_fetch_list_check_helper(feed_batch, self._fetch_names, log_id=typical_logid)
        if resp.err_no != CustomExceptionCode.OK.value:
            err_code = resp.err_no
            err_info = resp.err_msg
            call_result = None
            return call_result, err_code, err_info
                
W
wangjiawei04 已提交
632
        if self.client_type == "local_predictor":
633 634 635 636 637 638 639 640
            err, err_info = ChannelData.check_batch_npdata(feed_batch)
            if err != 0:
                _LOGGER.error(
                    self._log("Failed to run process: {}. feed_batch must be \
                        npdata in process for local_predictor mode."
                              .format(err_info)))
                return call_result, ChannelDataErrcode.TYPE_ERROR.value, "feed_batch must be npdata"

W
wangjiawei04 已提交
641 642
            call_result = self.client.predict(
                feed=feed_batch[0],
W
wangjiawei04 已提交
643
                fetch=self._fetch_names,
W
wangjiawei04 已提交
644 645
                batch=True,
                log_id=typical_logid)
646 647 648 649 650 651 652 653

        elif self.client_type == "brpc":
            err, err_info = ChannelData.check_batch_npdata(feed_batch)
            if err != 0:
                _LOGGER.error(
                    self._log("Failed to run process: {}. feed_batch must be \
                        npdata in process for brpc mode.".format(err_info)))
                return call_result, ChannelDataErrcode.TYPE_ERROR.value, "feed_batch must be npdata"
W
wangjiawei04 已提交
654
            call_result = self.client.predict(
655
                feed=feed_batch[0],
W
wangjiawei04 已提交
656
                fetch=self._fetch_names,
W
wangjiawei04 已提交
657 658
                batch=True,
                log_id=typical_logid)
659 660 661 662 663 664 665 666 667 668 669 670 671 672

        elif self.client_type == "pipeline_grpc":
            err, err_info = ChannelData.check_dictdata(feed_batch)
            if err != 0:
                _LOGGER.error(
                    self._log("Failed to run process: {}. feed_batch must be \
                       npdata in process for pipeline_grpc mode."
                              .format(err_info)))
                return call_result, ChannelDataErrcode.TYPE_ERROR.value, "feed_batch must be dict"

            call_result = self.client.predict(
                feed_dict=feed_batch[0],
                fetch=self._fetch_names,
                asyn=False,
673
                pack_tensor_format=self._pack_tensor_format,
674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693
                profile=False)
            if call_result is None:
                _LOGGER.error(
                    self._log("Failed in pipeline_grpc. call_result is None."))
                return call_result, ChannelDataErrcode.UNKNOW.value, "pipeline_grpc error"
            if call_result.err_no != 0:
                _LOGGER.error(
                    self._log("Failed in pipeline_grpc. err_no:{}, err_info:{}".
                              format(call_result.err_no, call_result.err_msg)))
                return call_result, ChannelDataErrcode(
                    call_result.err_no).value, call_result.err_msg

            new_dict = {}
            err_code = ChannelDataErrcode(call_result.err_no).value
            err_info = call_result.err_msg
            for idx, key in enumerate(call_result.key):
                new_dict[key] = [call_result.value[idx]]
            call_result = new_dict

        return call_result, err_code, err_info
694

695
    def postprocess(self, input_data, fetch_data, data_id=0, log_id=0):
T
TeslaZhao 已提交
696 697 698
        """
        In postprocess stage, assemble data for next op or output.
        Args:
T
TeslaZhao 已提交
699 700
            input_data: data returned in preprocess stage, dict(for single predict) or list(for batch predict)
            fetch_data: data returned in process stage, dict(for single predict) or list(for batch predict)
701
            data_id: inner unique id, increase auto
702
            log_id: logid, 0 default
T
TeslaZhao 已提交
703 704

        Returns: 
T
TeslaZhao 已提交
705
            fetch_dict: fetch result must be dict type.
T
TeslaZhao 已提交
706 707 708 709
            prod_errcode: None default, otherwise, product errores occured.
                          It is handled in the same way as exception.
            prod_errinfo: "" default
        """
T
TeslaZhao 已提交
710 711 712
        fetch_dict = {}
        if isinstance(fetch_data, dict):
            fetch_dict = fetch_data
T
TeslaZhao 已提交
713
        return fetch_dict, None, ""
D
dongdaxiang 已提交
714

B
barrierye 已提交
715
    def _parse_channeldata(self, channeldata_dict):
T
TeslaZhao 已提交
716 717 718 719 720 721 722 723 724 725 726 727 728
        """
        Parse one channeldata 
        Args:
            channeldata_dict : channel data to be parsed, dict type
        
        Return:
            data_id: created by dag._id_generator, unique
            error_channeldata: error channeldata
            parsed_data: get np/dict data from channeldata
            client_need_profile: need profile info
            profile_set: profile info
            log_id: logid for tracing a request 
        """
729
        data_id, error_channeldata = None, None
B
barrierye 已提交
730
        client_need_profile, profile_set = False, set()
B
barrierye 已提交
731 732 733 734
        parsed_data = {}

        key = list(channeldata_dict.keys())[0]
        data_id = channeldata_dict[key].id
T
TeslaZhao 已提交
735
        log_id = channeldata_dict[key].log_id
B
barrierye 已提交
736
        client_need_profile = channeldata_dict[key].client_need_profile
B
barrierye 已提交
737 738

        for name, data in channeldata_dict.items():
T
TeslaZhao 已提交
739
            if data.error_code != ChannelDataErrcode.OK.value:
B
barrierye 已提交
740 741 742
                error_channeldata = data
                break
            parsed_data[name] = data.parse()
B
barrierye 已提交
743
            if client_need_profile:
B
barrierye 已提交
744
                profile_set |= data.profile_data_set
B
barrierye 已提交
745
        return (data_id, error_channeldata, parsed_data, client_need_profile,
T
TeslaZhao 已提交
746
                profile_set, log_id)
B
barrierye 已提交
747 748 749 750 751

    def _push_to_output_channels(self,
                                 data,
                                 channels,
                                 name=None,
B
barriery 已提交
752
                                 profile_str=None,
B
barrierye 已提交
753
                                 client_need_profile=False,
B
barrierye 已提交
754
                                 profile_set=None):
T
TeslaZhao 已提交
755 756 757 758 759 760 761 762 763 764 765 766 767 768
        """
        Push data to output channels, Do not run the later stage(preprocess,
        process, postprocess)
        Args:
            data: channeldata, to be pushed
            channels: output channels
            name: op name  
            profile_str: one profile message
            client_need_profile: False default
            profile_set: profile message collections

        Returns:
            None
        """
769 770
        if name is None:
            name = self.name
B
barrierye 已提交
771

B
barriery 已提交
772
        # add profile into channeldata
B
barrierye 已提交
773
        if client_need_profile and profile_set is not None:
B
barriery 已提交
774 775
            if profile_str is not None:
                profile_set.add(profile_str)
B
barrierye 已提交
776
            data.add_profile(profile_set)
B
barrierye 已提交
777

B
barriery 已提交
778 779 780
        for channel in channels:
            channel.push(data, name)

W
wangjiawei04 已提交
781
    def start_with_process(self):
782 783 784 785 786 787 788 789 790 791
        """
        Each OP creates a process to run the main loop, initializes the CUDA
        environment in each individual process.

        Args:
            None

        Returns:
            process array
        """
B
barriery 已提交
792 793 794
        trace_buffer = None
        if self._tracer is not None:
            trace_buffer = self._tracer.data_buffer()
W
wangjiawei04 已提交
795
        process = []
B
barrierye 已提交
796
        for concurrency_idx in range(self.concurrency):
797 798
            p = multiprocessing.Process(
                target=self._run,
B
barrierye 已提交
799
                args=(concurrency_idx, self._get_input_channel(),
800 801
                      self._get_output_channels(), False, trace_buffer,
                      self.model_config, self.workdir, self.thread_num,
802
                      self.device_type, self.devices, self.mem_optim,
T
TeslaZhao 已提交
803 804
                      self.ir_optim, self.precision, self.use_mkldnn,
                      self.mkldnn_cache_capacity, self.mkldnn_op_list,
805
                      self.mkldnn_bf16_op_list, self.is_jump_op(),
F
felixhjh 已提交
806
                      self.get_output_channels_of_jump_ops(),
807 808
                      self.min_subgraph_size, self.dynamic_shape_info, 
                      self.use_calib))
B
barriery 已提交
809
            p.daemon = True
810
            p.start()
W
wangjiawei04 已提交
811 812
            process.append(p)
        return process
813

W
wangjiawei04 已提交
814
    def start_with_thread(self):
815 816 817 818 819 820 821 822 823 824
        """
        Each OP creates a thread to run the main loop, initializes the CUDA 
        environment in the main thread.

        Args:
            None
 
        Returns:
            thread array
        """
B
barriery 已提交
825 826 827
        trace_buffer = None
        if self._tracer is not None:
            trace_buffer = self._tracer.data_buffer()
828 829 830 831

        #Init cuda env in main thread
        if self.client_type == "local_predictor":
            _LOGGER.info("Init cuda env in main thread")
832
            self.local_predictor = self._local_service_handler.get_client(0)
833

834
        threads = []
B
barrierye 已提交
835
        for concurrency_idx in range(self.concurrency):
836 837
            t = threading.Thread(
                target=self._run,
B
barrierye 已提交
838
                args=(concurrency_idx, self._get_input_channel(),
839 840
                      self._get_output_channels(), True, trace_buffer,
                      self.model_config, self.workdir, self.thread_num,
841
                      self.device_type, self.devices, self.mem_optim,
842 843 844
                      self.ir_optim, self.precision, self.use_mkldnn, 
                      self.mkldnn_cache_capacity, self.mkldnn_op_list, 
                      self.mkldnn_bf16_op_list, self.is_jump_op(), 
F
felixhjh 已提交
845
                      self.get_output_channels_of_jump_ops(),
846 847
                      self.min_subgraph_size, self.dynamic_shape_info,
                      self.use_calib))
B
barriery 已提交
848 849 850
            # When a process exits, it attempts to terminate
            # all of its daemonic child processes.
            t.daemon = True
851 852 853 854
            t.start()
            threads.append(t)
        return threads

B
barrierye 已提交
855
    def init_op(self):
B
barrierye 已提交
856 857
        pass

T
TeslaZhao 已提交
858 859 860 861 862 863 864 865 866 867 868 869 870 871
    def _run_preprocess(self, parsed_data_dict, op_info_prefix, logid_dict):
        """
        Run preprocess stage
        Args:
            parsed_data_dict: data to be pre-processed
            op_info_prefix: input op info
            logid_dict: logid dict

        Returns:
            preped_data_dict: data preprocessed, to be processed 
            err_channeldata_dict: when exceptions occurred, putting errors in it.
            skip_process_dict: skip process stage or not

        """
B
barriery 已提交
872
        _LOGGER.debug("{} Running preprocess".format(op_info_prefix))
873 874
        preped_data_dict = collections.OrderedDict()
        err_channeldata_dict = collections.OrderedDict()
T
TeslaZhao 已提交
875
        skip_process_dict = {}
876 877 878 879 880 881
        @ErrorCatch
        def preprocess_help(self, parsed_data, data_id, logid_dict):
            preped_data, is_skip_process, prod_errcode, prod_errinfo = self.preprocess(
                parsed_data, data_id, logid_dict.get(data_id))
            return preped_data, is_skip_process, prod_errcode, prod_errinfo
            
882 883
        for data_id, parsed_data in parsed_data_dict.items():
            preped_data, error_channeldata = None, None
T
TeslaZhao 已提交
884 885 886
            is_skip_process = False
            prod_errcode, prod_errinfo = None, None
            log_id = logid_dict.get(data_id)
F
felixhjh 已提交
887 888
            process_res, resp = preprocess_help(self, parsed_data, data_id = data_id,
            logid_dict = logid_dict)
F
felixhjh 已提交
889
            if resp.err_no == CustomExceptionCode.OK.value:
890
                preped_data, is_skip_process, prod_errcode, prod_errinfo = process_res
T
TeslaZhao 已提交
891 892
                if is_skip_process is True:
                    skip_process_dict[data_id] = True
893 894 895 896 897 898 899 900 901 902 903
                if prod_errcode is not None:
                    _LOGGER.error("data_id: {} return product error. Product ErrNo:{}, Product ErrMsg: {}".format(data_id, prod_errcode, prod_errinfo))
                    error_channeldata = ChannelData(
                      error_code=ChannelDataErrcode.PRODUCT_ERROR.value,
                      error_info="",
                      prod_error_code=prod_errcode,
                      prod_error_info=prod_errinfo,
                      data_id=data_id,
                      log_id=log_id)
            else:
                
T
TeslaZhao 已提交
904
                error_channeldata = ChannelData(
905 906 907 908 909
                  error_code=resp.err_no,
                  error_info=resp.err_msg,
                  data_id=data_id,
                  log_id=log_id)
                skip_process_dict[data_id] = True 
T
TeslaZhao 已提交
910

911 912 913 914
            if error_channeldata is not None:
                err_channeldata_dict[data_id] = error_channeldata
            else:
                preped_data_dict[data_id] = preped_data
B
barriery 已提交
915
        _LOGGER.debug("{} Succ preprocess".format(op_info_prefix))
T
TeslaZhao 已提交
916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931
        return preped_data_dict, err_channeldata_dict, skip_process_dict

    def _run_process(self, preped_data_dict, op_info_prefix, skip_process_dict,
                     logid_dict):
        """
        Run process stage
        Args:
            preped_data_dict: feed the data to be predicted by the model.  
            op_info_prefix: prefix op info
            skip_process_dict: skip process stage or not
            logid_dict: logid dict

        Returns:
            midped_data_dict: data midprocessed, to be post-processed 
            err_channeldata_dict: when exceptions occurred, putting errors in it 
        """
B
barriery 已提交
932
        _LOGGER.debug("{} Running process".format(op_info_prefix))
933 934
        midped_data_dict = collections.OrderedDict()
        err_channeldata_dict = collections.OrderedDict()
T
TeslaZhao 已提交
935
        is_skip_process = False
T
TeslaZhao 已提交
936
        data_ids = list(preped_data_dict.keys())
T
TeslaZhao 已提交
937 938

        # skip process stage
T
TeslaZhao 已提交
939 940
        if len(data_ids) == 1 and skip_process_dict.get(data_ids[0]) == True:
            is_skip_process = True
T
TeslaZhao 已提交
941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
        if self.with_serving is False or is_skip_process is True:
            midped_data_dict = preped_data_dict
            _LOGGER.warning("(data_id={} log_id={}) OP={} skip process stage. " \
                "with_serving={}, is_skip_process={}".format(data_ids[0],
                logid_dict.get(data_ids[0]), self.name, self.with_serving,
                is_skip_process))
            return midped_data_dict, err_channeldata_dict

        # use typical_logid to mark batch data
        # data_ids is one self-increasing unique key. 
        typical_logid = data_ids[0]
        if len(data_ids) != 1:
            for data_id in data_ids:
                _LOGGER.info(
                    "(data_id={} logid={}) Auto-batching is On Op={}!!" \
                    "We selected logid={} (from batch: {}) as a " \
                    "representative for logging.".format(
                    data_id, logid_dict.get(data_id), self.name,
                    typical_logid, data_ids))

        one_input = preped_data_dict[data_ids[0]]
        feed_batch = []
        feed_dict = {}
        cur_offset = 0
        input_offset_dict = {}
        batch_input = False

        if isinstance(one_input, dict):
            # For dict type, data structure is dict.
            # Merge multiple dicts for data_ids into one dict.
            # feed_batch is the input param of predict func.
            # input_offset_dict is used for data restration[data_ids]
            if len(data_ids) == 1:
                feed_batch = [preped_data_dict[data_id] for data_id in data_ids]
            else:
976 977
                for data_id in data_ids:
                    for key, val in preped_data_dict[data_id].items():
T
TeslaZhao 已提交
978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
                        has_val = feed_dict.get(key)
                        if has_val is None:
                            feed_dict[key] = val
                            continue
                        # merge 2 np.arrray
                        if isinstance(val, np.ndarray):
                            feed_dict[key] = np.append(
                                feed_dict[key], val, axis=0)
                feed_batch.append(feed_dict)

            for data_id in data_ids:
                start = cur_offset
                for key, val in preped_data_dict[data_id].items():
                    if isinstance(val, (list, np.ndarray)):
                        cur_offset += len(val)
                    else:
                        cur_offset += 1
                    break
                input_offset_dict[data_id] = [start, cur_offset]
        elif isinstance(one_input, list):
            # For list type, data structure of one_input is [dict, dict, ...]
            # Data structure of feed_batch is [dict1_1, dict1_2, dict2_1, ...]   
            # Data structure of input_offset_dict is { data_id : [start, end] }
            batch_input = True
            for data_id in data_ids:
                feed_batch.extend(preped_data_dict[data_id])
                data_size = len(preped_data_dict[data_id])
                start = cur_offset
                cur_offset = start + data_size
                input_offset_dict[data_id] = [start, cur_offset]
        else:
            _LOGGER.critical(
                "(data_id={} log_id={}){} Failed to process: expect input type is dict"
                " or list(batch input), but get {}".format(data_ids[
                    0], typical_logid, op_info_prefix, type(one_input)))
            for data_id in data_ids:
                error_code = ChannelDataErrcode.TYPE_ERROR.value
                error_info = "expect input type is dict or list, but get {}".format(
                    type(one_input))
                err_channeldata_dict[data_id] = ChannelData(
                    error_code=error_code,
                    error_info=error_info,
                    data_id=data_id,
                    log_id=logid_dict.get(data_id))
            return midped_data_dict, err_channeldata_dict
B
barrierye 已提交
1023

T
TeslaZhao 已提交
1024 1025
        midped_batch = None
        error_code = ChannelDataErrcode.OK.value
1026
        error_info = ""
T
TeslaZhao 已提交
1027 1028 1029 1030
        if self._timeout <= 0:
            # No retry
            try:
                if batch_input is False:
1031 1032
                    midped_batch, error_code, error_info = self.process(
                        feed_batch, typical_logid)
T
TeslaZhao 已提交
1033 1034 1035
                else:
                    midped_batch = []
                    for idx in range(len(feed_batch)):
1036 1037 1038 1039
                        predict_res, error_code, error_info = self.process(
                            [feed_batch[idx]], typical_logid)
                        if error_code != ChannelDataErrcode.OK.value:
                            break
T
TeslaZhao 已提交
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
                        midped_batch.append(predict_res)
            except Exception as e:
                error_code = ChannelDataErrcode.UNKNOW.value
                error_info = "(data_id={} log_id={}) {} Failed to process(batch: {}): {}".format(
                    data_ids[0], typical_logid, op_info_prefix, data_ids, e)
                _LOGGER.error(error_info, exc_info=True)
        else:
            # retry N times configed in yaml files.
            for i in range(self._retry):
                try:
                    # time out for each process
                    if batch_input is False:
1052
                        midped_batch, error_code, error_info = func_timeout.func_timeout(
B
barriery 已提交
1053 1054 1055
                            self._timeout,
                            self.process,
                            args=(feed_batch, typical_logid))
1056
                    else:
T
TeslaZhao 已提交
1057 1058
                        midped_batch = []
                        for idx in range(len(feed_batch)):
1059
                            predict_res, error_code, error_info = func_timeout.func_timeout(
T
TeslaZhao 已提交
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
                                self._timeout,
                                self.process,
                                args=([feed_batch[idx]], typical_logid))
                            midped_batch[idx].append(predict_res)

                except func_timeout.FunctionTimedOut as e:
                    if i + 1 >= self._retry:
                        error_code = ChannelDataErrcode.TIMEOUT.value
                        error_info = "(log_id={}) {} Failed to process(batch: {}): " \
                            "exceeded retry count.".format(typical_logid, op_info_prefix, data_ids)
                        _LOGGER.error(error_info)
B
barrierye 已提交
1071
                    else:
T
TeslaZhao 已提交
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087
                        _LOGGER.warning(
                            "(log_id={}) {} Failed to process(batch: {}): timeout,"
                            " and retrying({}/{})...".format(
                                typical_logid, op_info_prefix, data_ids, i + 1,
                                self._retry))
                except Exception as e:
                    error_code = ChannelDataErrcode.UNKNOW.value
                    error_info = "(log_id={}) {} Failed to process(batch: {}): {}".format(
                        typical_logid, op_info_prefix, data_ids, e)
                    _LOGGER.error(error_info, exc_info=True)
                    break
                else:
                    break

        # 2 kinds of errors
        if error_code != ChannelDataErrcode.OK.value or midped_batch is None:
1088 1089 1090
            error_info = "[{}] failed to predict. {}. Please check the input dict and checkout PipelineServingLogs/pipeline.log for more details.".format(
             self.name, error_info)
    
T
TeslaZhao 已提交
1091 1092 1093
            _LOGGER.error(error_info)
            for data_id in data_ids:
                err_channeldata_dict[data_id] = ChannelData(
1094
                    error_code=error_code,
T
TeslaZhao 已提交
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138
                    error_info=error_info,
                    data_id=data_id,
                    log_id=logid_dict.get(data_id))
            return midped_data_dict, err_channeldata_dict

        # Split batch infer result to each data_ids
        if batch_input is False:
            var_names = midped_batch.keys()
            lod_var_names = set()
            lod_offset_names = set()
            # midped_batch is dict type for single input 
            for name in var_names:
                lod_offset_name = "{}.lod".format(name)
                if lod_offset_name in var_names:
                    _LOGGER.debug("(log_id={}) {} {} is LodTensor".format(
                        typical_logid, op_info_prefix, name))
                    lod_var_names.add(name)
                    lod_offset_names.add(lod_offset_name)

            for idx, data_id in enumerate(data_ids):
                midped_data_dict[data_id] = {}

            for name, value in midped_batch.items():
                if name in lod_offset_names:
                    continue
                if name in lod_var_names:
                    # lodtensor
                    lod_offset_name = "{}.lod".format(name)
                    lod_offset = midped_batch[lod_offset_name]
                    for idx, data_id in enumerate(data_ids):
                        data_offset_left = input_offset_dict[data_id][0]
                        data_offset_right = input_offset_dict[data_id][1]
                        lod_offset_left = lod_offset[data_offset_left]
                        lod_offset_right = lod_offset[data_offset_right]
                        midped_data_dict[data_id][name] = value[
                            lod_offset_left:lod_offset_right]
                        midped_data_dict[data_id][lod_offset_name] = \
                            lod_offset[data_offset_left:data_offset_right + 1] - lod_offset[data_offset_left]
                else:
                    # normal tensor
                    for idx, data_id in enumerate(data_ids):
                        start = input_offset_dict[data_id][0]
                        end = input_offset_dict[data_id][1]
                        midped_data_dict[data_id][name] = value[start:end]
1139
        else:
T
TeslaZhao 已提交
1140 1141 1142 1143 1144
            # midped_batch is list type for batch input
            for idx, data_id in enumerate(data_ids):
                start = input_offset_dict[data_id][0]
                end = input_offset_dict[data_id][1]
                midped_data_dict[data_id] = midped_batch[start:end]
1145 1146
        return midped_data_dict, err_channeldata_dict

B
barriery 已提交
1147
    def _run_postprocess(self, parsed_data_dict, midped_data_dict,
T
TeslaZhao 已提交
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161
                         op_info_prefix, logid_dict):
        """
        Run postprocess stage.
        Args:
            parsed_data_dict: data returned in preprocess stage 
            midped_data_dict: data returned in process stage
            op_info_prefix: prefix op info
            logid_dict: logid dict

        Returns:
            postped_data_dict: data postprocessed 
            err_channeldata_dict: when exceptions occurred, putting errors in it
 
        """
B
barriery 已提交
1162
        _LOGGER.debug("{} Running postprocess".format(op_info_prefix))
1163 1164
        postped_data_dict = collections.OrderedDict()
        err_channeldata_dict = collections.OrderedDict()
1165 1166 1167 1168 1169
        @ErrorCatch
        def postprocess_help(self, parsed_data_dict, midped_data, data_id, logid_dict):
            postped_data, prod_errcode, prod_errinfo = self.postprocess(parsed_data_dict[data_id], 
              midped_data, data_id, logid_dict.get(data_id))
            if not isinstance(postped_data, dict):
F
felixhjh 已提交
1170
                raise CustomException(CustomExceptionCode.TYPE_ERROR, "postprocess should return dict", True)
1171 1172
            return postped_data, prod_errcode, prod_errinfo

B
bug fix  
barriery 已提交
1173
        for data_id, midped_data in midped_data_dict.items():
T
TeslaZhao 已提交
1174
            log_id = logid_dict.get(data_id)
1175
            postped_data, err_channeldata = None, None
T
TeslaZhao 已提交
1176 1177
            prod_errcode, prod_errinfo = None, None

F
felixhjh 已提交
1178 1179
            post_res, resp = postprocess_help(self, parsed_data_dict, midped_data, data_id
            = data_id, logid_dict = logid_dict)
H
huangjianhui 已提交
1180
            if resp.err_no == CustomExceptionCode.OK.value:
1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
                postped_data, prod_errcode, prod_errinfo = post_res
                if prod_errcode is not None:
                  # product errors occured
                    err_channeldata = ChannelData(
                      error_code=ChannelDataErrcode.PRODUCT_ERROR.value,
                      error_info="",
                      prod_error_code=prod_errcode,
                      prod_error_info=prod_errinfo,
                      data_id=data_id,
                      log_id=log_id)
            else:
T
TeslaZhao 已提交
1192
                err_channeldata = ChannelData(
1193 1194
                    error_code=resp.err_no,
                    error_info=resp.err_msg,
T
TeslaZhao 已提交
1195 1196 1197
                    data_id=data_id,
                    log_id=log_id)

1198 1199 1200 1201
            if err_channeldata is not None:
                err_channeldata_dict[data_id] = err_channeldata
                continue

1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
            output_data = None
            err, _ = ChannelData.check_npdata(postped_data)
            if err == 0:
                output_data = ChannelData(
                  ChannelDataType.CHANNEL_NPDATA.value,
                  npdata=postped_data,
                  data_id=data_id,
                  log_id=log_id)
            else:
                output_data = ChannelData(
                  ChannelDataType.DICT.value,
                  dictdata=postped_data,
                  data_id=data_id,
                  log_id=log_id)
            postped_data_dict[data_id] = output_data
B
barriery 已提交
1217
        _LOGGER.debug("{} Succ postprocess".format(op_info_prefix))
1218
        return postped_data_dict, err_channeldata_dict
B
barriery 已提交
1219 1220

    def _auto_batching_generator(self, input_channel, op_name, batch_size,
B
barriery 已提交
1221
                                 timeout, op_info_prefix):
1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
        """
        Merge batch_size requests for one prediction.Taking one piece of data 
        from the input channel each time until equals batch_size, or the waiting 
        time exceeds auto_batching_timeout.

        Args:
            input_channel: the input channel of Op
            op_name: op name
            batch_size: batch size, Less than worker_num
            timeout: batch timeout, seconds, If timeout is None, and the quantity 
                taken from the front is less than batch_size, blocking occured.
            op_info_prefix: op link info.

        Returns:
            None
        """
B
barriery 已提交
1238 1239 1240 1241 1242 1243 1244 1245 1246
        while True:
            batch = []
            while len(batch) == 0:
                endtime = None
                if timeout is not None:
                    endtime = _time() + timeout
                for idx in range(batch_size):
                    try:
                        channeldata_dict = None
1247
                        front_start_time = int(round(_time() * 1000000))
B
barriery 已提交
1248 1249 1250
                        if timeout is not None:
                            remaining = endtime - _time()
                            if remaining <= 0.0:
B
barriery 已提交
1251 1252
                                _LOGGER.debug("{} Failed to generate batch: "
                                              "timeout".format(op_info_prefix))
B
barriery 已提交
1253
                                break
B
barriery 已提交
1254 1255
                            channeldata_dict = input_channel.front(op_name,
                                                                   timeout)
B
barriery 已提交
1256 1257 1258
                        else:
                            channeldata_dict = input_channel.front(op_name)
                        batch.append(channeldata_dict)
1259
                        _LOGGER.debug(
1260 1261
                            "_auto_batching_generator get {} channeldata from op:{} input channel. time={}".
                            format(idx, op_name, front_start_time))
B
barriery 已提交
1262
                    except ChannelTimeoutError:
B
barriery 已提交
1263 1264
                        _LOGGER.debug("{} Failed to generate batch: "
                                      "timeout".format(op_info_prefix))
B
barriery 已提交
1265
                        break
B
barriery 已提交
1266 1267
            _LOGGER.debug("{} Got actual batch_size: {}".format(op_info_prefix,
                                                                len(batch)))
B
barriery 已提交
1268
            yield batch
1269

1270
    def _parse_channeldata_batch(self, batch, output_channels):
T
TeslaZhao 已提交
1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282
        """
        Parse channeldatas batch
        Args:
            batch: auto-batching batch datas
            output_channels: output channels 

        Returns:
            parsed_data_dict: parsed from channeldata in batch
            need_profile_dict: need profile dict in batch 
            profile_dict: profile info dict in batch
            logid_dict: trace each request in batch
        """
1283
        parsed_data_dict = collections.OrderedDict()
1284 1285
        need_profile_dict = {}
        profile_dict = {}
T
TeslaZhao 已提交
1286
        logid_dict = {}
B
bug fix  
barriery 已提交
1287
        for channeldata_dict in batch:
1288
            (data_id, error_channeldata, parsed_data,
T
TeslaZhao 已提交
1289
                    client_need_profile, profile_set, log_id) = \
1290 1291 1292 1293 1294
                            self._parse_channeldata(channeldata_dict)
            if error_channeldata is None:
                parsed_data_dict[data_id] = parsed_data
                need_profile_dict[data_id] = client_need_profile
                profile_dict[data_id] = profile_set
T
TeslaZhao 已提交
1295
                logid_dict[data_id] = log_id
1296 1297 1298
            else:
                # error data in predecessor Op
                # (error_channeldata with profile info)
B
barriery 已提交
1299 1300
                self._push_to_output_channels(error_channeldata,
                                              output_channels)
1301

T
TeslaZhao 已提交
1302
        return parsed_data_dict, need_profile_dict, profile_dict, logid_dict
B
barriery 已提交
1303

W
wangjiawei04 已提交
1304
    def _run(self, concurrency_idx, input_channel, output_channels,
1305
             is_thread_op, trace_buffer, model_config, workdir, thread_num,
1306 1307 1308 1309
             device_type, devices, mem_optim, ir_optim, precision,
             use_mkldnn, mkldnn_cache_capacity, mkldnn_op_list, 
             mkldnn_bf16_op_list, is_jump_op, output_channels_of_jump_ops, 
             min_subgraph_size, dynamic_shape_info, use_calib):
1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326
        """
        _run() is the entry function of OP process / thread model.When client 
        type is local_predictor in process mode, the CUDA environment needs to 
        be initialized by LocalServiceHandler[child process], otherwise, Cuda
        error(3), initialization error is occured. Preprocess, process and 
        postprocess are executed in the main loop. The preprocess and postprocess
        function is usually rewrited by users. Trace data is recorded by trace_que.

        Args:
            concurrency_idx: thread/process index
            input_channel: input channel, take the data to be processed
            output_channels: output channel, store processed data
            is_thread_op: False, It's process op; True, It's thread op
            trace_buffer: store trace infomations
            model_config: model config path
            workdir: work directory
            thread_num: number of threads, concurrent quantity
1327
            device_type: support multiple devices
1328 1329
            devices: gpu id list[gpu], "" default[cpu]
            mem_optim: use memory/graphics memory optimization, True default.
1330
            ir_optim: use calculation chart optimization, False default.
T
TeslaZhao 已提交
1331 1332 1333 1334 1335
            precision: inference precision, e.g. "fp32", "fp16", "int8", "bf16"
            use_mkldnn: use mkldnn, default False.
            mkldnn_cache_capacity: cache capacity of mkldnn, 0 means no limit.
            mkldnn_op_list: OP list optimized by mkldnn, None default.
            mkldnn_bf16_op_list: OP list optimized by mkldnn bf16, None default.
1336 1337
            is_jump_op: OP has jump op list or not, False default.
            output_channels_of_jump_ops: all output channels of jump ops.
1338
            use_calib: use calib mode of paddle inference, False default.
1339 1340 1341 1342

        Returns:
            None
        """
1343
        op_info_prefix = "[{}|{}]".format(self.name, concurrency_idx)
B
barrierye 已提交
1344

1345
        # init ops
B
barriery 已提交
1346
        profiler = None
1347 1348 1349 1350 1351 1352
        @ErrorCatch
        def check_helper(self, is_thread_op, model_config, workdir, 
             thread_num, device_type, devices, mem_optim, ir_optim, 
             precision, use_mkldnn, mkldnn_cache_capacity, mkldnn_op_list, 
             mkldnn_bf16_op_list, min_subgraph_size, dynamic_shape_info):
            
1353 1354 1355 1356 1357 1358
            if is_thread_op == False and self.client_type == "local_predictor":
                self.service_handler = local_service_handler.LocalServiceHandler(
                    model_config=model_config,
                    client_type="local_predictor",
                    workdir=workdir,
                    thread_num=thread_num,
1359
                    device_type=device_type,
1360 1361
                    devices=devices,
                    mem_optim=mem_optim,
1362
                    ir_optim=ir_optim,
T
TeslaZhao 已提交
1363 1364 1365 1366
                    precision=precision,
                    use_mkldnn=use_mkldnn,
                    mkldnn_cache_capacity=mkldnn_cache_capacity,
                    mkldnn_op_list=mkldnn_op_list,
F
felixhjh 已提交
1367 1368
                    mkldnn_bf16_op_list=mkldnn_bf16_op_list,
                    min_subgraph_size=min_subgraph_size,
1369 1370
                    dynamic_shape_info=dynamic_shape_info,
                    use_calib=use_calib)
1371 1372 1373

                _LOGGER.info("Init cuda env in process {}".format(
                    concurrency_idx))
1374 1375
                self.local_predictor = self.service_handler.get_client(
                    concurrency_idx)
1376
            # check all ops initialized successfully.
W
wangjiawei04 已提交
1377
            profiler = self._initialize(is_thread_op, concurrency_idx)
F
felixhjh 已提交
1378
            return profiler
1379

F
felixhjh 已提交
1380
        profiler, resp = check_helper(self, is_thread_op, model_config, workdir,
1381 1382 1383 1384 1385
             thread_num, device_type, devices, mem_optim, ir_optim,
             precision, use_mkldnn, mkldnn_cache_capacity, mkldnn_op_list,
             mkldnn_bf16_op_list, min_subgraph_size, dynamic_shape_info)

        if resp.err_no != CustomExceptionCode.OK.value:
B
barriery 已提交
1386
            _LOGGER.critical(
H
huangjianhui 已提交
1387
                "{} failed to init op: {}".format(op_info_prefix, resp.err_msg),
H
huangjianhui 已提交
1388
                exc_info=False)
1389

H
huangjianhui 已提交
1390
            print("{} failed to init op: {}".format(op_info_prefix, resp.err_msg))
H
huangjianhui 已提交
1391
            kill_stop_process_by_pid("kill", os.getpgid(os.getpid()))
1392

B
barriery 已提交
1393
        _LOGGER.info("{} Succ init".format(op_info_prefix))
1394

B
barriery 已提交
1395
        batch_generator = self._auto_batching_generator(
B
barriery 已提交
1396 1397 1398 1399
            input_channel=input_channel,
            op_name=self.name,
            batch_size=self._batch_size,
            timeout=self._auto_batching_timeout,
B
barriery 已提交
1400
            op_info_prefix=op_info_prefix)
B
barriery 已提交
1401

B
barriery 已提交
1402
        start, end = None, None
B
barrierye 已提交
1403
        trace_que = collections.deque()
B
barrierye 已提交
1404
        while True:
B
barriery 已提交
1405
            start = int(round(_time() * 1000000))
B
barrierye 已提交
1406
            try:
B
barriery 已提交
1407
                channeldata_dict_batch = next(batch_generator)
B
barrierye 已提交
1408
            except ChannelStopError:
B
barriery 已提交
1409
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
B
barriery 已提交
1410
                self._finalize(is_thread_op)
B
barrierye 已提交
1411
                break
B
barriery 已提交
1412
            end = int(round(_time() * 1000000))
B
barrierye 已提交
1413
            in_time = end - start
1414 1415
            _LOGGER.debug("op:{} in_time_end:{}".format(op_info_prefix,
                                                        time.time()))
1416

B
barriery 已提交
1417 1418
            # parse channeldata batch
            try:
T
TeslaZhao 已提交
1419
                parsed_data_dict, need_profile_dict, profile_dict, logid_dict\
1420 1421
                        = self._parse_channeldata_batch(
                                channeldata_dict_batch, output_channels)
B
barriery 已提交
1422
            except ChannelStopError:
B
barriery 已提交
1423
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
1424
                self._finalize(is_thread_op)
B
barriery 已提交
1425
                break
1426 1427 1428
            if len(parsed_data_dict) == 0:
                # data in the whole batch is all error data
                continue
1429 1430
            _LOGGER.debug("op:{} parse_end:{}".format(op_info_prefix,
                                                      time.time()))
1431

1432 1433 1434 1435 1436 1437
            front_cost = int(round(_time() * 1000000)) - start
            for data_id, parsed_data in parsed_data_dict.items():
                _LOGGER.debug(
                    "(data_id={}) POP INPUT CHANNEL! op:{}, cost:{} ms".format(
                        data_id, self.name, front_cost / 1000.0))

1438
            # preprecess
B
barriery 已提交
1439
            start = profiler.record("prep#{}_0".format(op_info_prefix))
T
TeslaZhao 已提交
1440 1441
            preped_data_dict, err_channeldata_dict, skip_process_dict \
                    = self._run_preprocess(parsed_data_dict, op_info_prefix, logid_dict)
B
barriery 已提交
1442
            end = profiler.record("prep#{}_1".format(op_info_prefix))
B
barrierye 已提交
1443
            prep_time = end - start
1444 1445
            _LOGGER.debug("op:{} preprocess_end:{}, cost:{}".format(
                op_info_prefix, time.time(), prep_time))
1446
            try:
T
TeslaZhao 已提交
1447
                # put error requests into output channel, skip process and postprocess stage
1448
                for data_id, err_channeldata in err_channeldata_dict.items():
B
barrierye 已提交
1449
                    self._push_to_output_channels(
B
barriery 已提交
1450 1451
                        data=err_channeldata,
                        channels=output_channels,
1452 1453 1454
                        client_need_profile=need_profile_dict[data_id],
                        profile_set=profile_dict[data_id])
            except ChannelStopError:
B
barriery 已提交
1455
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
1456 1457
                self._finalize(is_thread_op)
                break
B
bug fix  
barrierye 已提交
1458
            if len(preped_data_dict) == 0:
1459 1460
                continue

B
barrierye 已提交
1461
            # process
B
barriery 已提交
1462
            start = profiler.record("midp#{}_0".format(op_info_prefix))
1463
            midped_data_dict, err_channeldata_dict \
T
TeslaZhao 已提交
1464
                    = self._run_process(preped_data_dict, op_info_prefix, skip_process_dict, logid_dict)
B
barriery 已提交
1465
            end = profiler.record("midp#{}_1".format(op_info_prefix))
B
bjjwwang 已提交
1466
            _LOGGER.info("prometheus inf count +1")
B
barrierye 已提交
1467
            midp_time = end - start
1468 1469
            _LOGGER.debug("op:{} process_end:{}, cost:{}".format(
                op_info_prefix, time.time(), midp_time))
1470 1471
            try:
                for data_id, err_channeldata in err_channeldata_dict.items():
B
barrierye 已提交
1472
                    self._push_to_output_channels(
B
barriery 已提交
1473 1474
                        data=err_channeldata,
                        channels=output_channels,
B
barriery 已提交
1475 1476
                        client_need_profile=need_profile_dict[data_id],
                        profile_set=profile_dict[data_id])
1477
            except ChannelStopError:
B
barriery 已提交
1478
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
1479 1480 1481
                self._finalize(is_thread_op)
                break
            if len(midped_data_dict) == 0:
1482
                continue
1483 1484

            # postprocess
B
barriery 已提交
1485
            start = profiler.record("postp#{}_0".format(op_info_prefix))
1486
            postped_data_dict, err_channeldata_dict \
T
TeslaZhao 已提交
1487
                    = self._run_postprocess(parsed_data_dict, midped_data_dict, op_info_prefix, logid_dict)
B
barriery 已提交
1488
            end = profiler.record("postp#{}_1".format(op_info_prefix))
B
barrierye 已提交
1489
            postp_time = end - start
1490
            after_postp_time = _time()
1491 1492
            _LOGGER.debug("op:{} postprocess_end:{}, cost:{}".format(
                op_info_prefix, time.time(), postp_time))
1493 1494
            try:
                for data_id, err_channeldata in err_channeldata_dict.items():
B
barrierye 已提交
1495
                    self._push_to_output_channels(
B
bug fix  
barrierye 已提交
1496
                        data=err_channeldata,
B
barriery 已提交
1497
                        channels=output_channels,
B
barriery 已提交
1498 1499
                        client_need_profile=need_profile_dict[data_id],
                        profile_set=profile_dict[data_id])
1500
            except ChannelStopError:
B
barriery 已提交
1501
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
1502 1503 1504
                self._finalize(is_thread_op)
                break
            if len(postped_data_dict) == 0:
1505
                continue
1506

1507
            # push data to channel (if run succ)
B
barriery 已提交
1508
            start = int(round(_time() * 1000000))
B
barrierye 已提交
1509
            try:
B
barriery 已提交
1510
                profile_str = profiler.gen_profile_str()
1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545
                if self.is_jump_op() is True and self.check_jumping(
                        postped_data_dict) is True:
                    # push data to output channel of ops to be jumped 
                    for data_id, postped_data in postped_data_dict.items():
                        if self._server_use_profile:
                            sys.stderr.write(profile_str)
                        self._push_to_output_channels(
                            data=postped_data,
                            channels=output_channels_of_jump_ops,
                            profile_str=profile_str,
                            client_need_profile=need_profile_dict[data_id],
                            profile_set=profile_dict[data_id])
                        after_outchannel_time = _time()
                        _LOGGER.debug(
                            "(data_id={}) PUSH OUTPUT CHANNEL OF JUMP OPs! op:{} push cost:{} ms".
                            format(data_id, self.name, (after_outchannel_time -
                                                        after_postp_time) *
                                   1000))
                else:
                    # push data to output channel.
                    for data_id, postped_data in postped_data_dict.items():
                        if self._server_use_profile:
                            sys.stderr.write(profile_str)
                        self._push_to_output_channels(
                            data=postped_data,
                            channels=output_channels,
                            profile_str=profile_str,
                            client_need_profile=need_profile_dict[data_id],
                            profile_set=profile_dict[data_id])
                        after_outchannel_time = _time()
                        _LOGGER.debug(
                            "(data_id={}) PUSH OUTPUT CHANNEL! op:{} push cost:{} ms".
                            format(data_id, self.name, (after_outchannel_time -
                                                        after_postp_time) *
                                   1000))
B
barrierye 已提交
1546
            except ChannelStopError:
B
barriery 已提交
1547
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
1548
                self._finalize(is_thread_op)
B
barrierye 已提交
1549
                break
B
barriery 已提交
1550
            end = int(round(_time() * 1000000))
B
barrierye 已提交
1551
            out_time = end - start
1552
            after_outchannel_time = int(round(_time() * 1000000))
B
barriery 已提交
1553
            if trace_buffer is not None:
B
barrierye 已提交
1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570
                trace_que.append({
                    "name": self.name,
                    "actions": {
                        "in": in_time,
                        "prep": prep_time,
                        "midp": midp_time,
                        "postp": postp_time,
                        "out": out_time,
                    }
                })
                while trace_que:
                    info = trace_que[0]
                    try:
                        trace_buffer.put_nowait(info)
                        trace_que.popleft()
                    except Queue.Full:
                        break
B
barriery 已提交
1571

W
wangjiawei04 已提交
1572
    def _initialize(self, is_thread_op, concurrency_idx):
1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585
        """
        Initialize one OP object in the target function of a thread or porcess.
        Initialize the client object with _client_config and _server_endpoints.
        Create a TimeProfiler per thread or process for recording profiler info.

        Args:
            is_thread_op: True, one op runs in one thread; False, one op runs
                in one process.
            concurrency_idx: process id, Thread mode does not use this param.

        Returns:
            TimeProfiler
        """
1586 1587 1588 1589 1590 1591 1592 1593 1594
        @ErrorCatch
        def init_helper(self, is_thread_op, concurrency_idx):
            if is_thread_op:
                with self._for_init_op_lock:
                    if not self._succ_init_op:
                        # for the threaded version of Op, each thread cannot get its concurrency_idx
                        self.concurrency_idx = None
                        # init client
                        self.client = self.init_client(self._client_config,
W
wangjiawei04 已提交
1595
                                                   self._server_endpoints)
1596 1597 1598 1599 1600 1601 1602 1603
                        # user defined
                        self.init_op()
                        self._succ_init_op = True
                        self._succ_close_op = False
            else:
                self.concurrency_idx = concurrency_idx
                # init client
                self.client = self.init_client(self._client_config,
W
wangjiawei04 已提交
1604
                                           self._server_endpoints)
1605 1606 1607 1608
                # user defined
                self.init_op() 
        
        init_helper(self, is_thread_op, concurrency_idx)
F
felixhjh 已提交
1609
        print("[OP Object] init success")
B
barriery 已提交
1610 1611 1612 1613 1614
        # use a separate TimeProfiler per thread or process
        profiler = TimeProfiler()
        profiler.enable(True)
        return profiler

B
barriery 已提交
1615 1616 1617 1618 1619 1620 1621 1622
    def _finalize(self, is_thread_op):
        if is_thread_op:
            with self._for_close_op_lock:
                if not self._succ_close_op:
                    self._profiler = None
                    self.client = None
                    self._succ_init_op = False
                    self._succ_close_op = True
1623 1624 1625 1626 1627

    def _log(self, info):
        return "{} {}".format(self.name, info)


B
barrierye 已提交
1628
class RequestOp(Op):
1629 1630 1631 1632 1633 1634
    """
    RequestOp is a special Op, for unpacking one request package. If the
    request needs one special unpackaging method, you need to inherit class
    RequestOp and rewrite function unpack_request_package.Notice!!! Class
    RequestOp does not run preprocess, process, postprocess.
    """
B
barrierye 已提交
1635

B
barrierye 已提交
1636
    def __init__(self):
1637 1638 1639
        """
        Initialize the RequestOp
        """
B
barriery 已提交
1640 1641
        # PipelineService.name = "@DAGExecutor"
        super(RequestOp, self).__init__(name="@DAGExecutor", input_ops=[])
B
barrierye 已提交
1642
        # init op
1643
        try:
1644
            self.init_op()
1645
        except Exception as e:
B
barriery 已提交
1646
            _LOGGER.critical("Op(Request) Failed to init: {}".format(e))
1647
            os._exit(-1)
B
barrierye 已提交
1648

1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661
    def proto_tensor_2_numpy(self, tensor):
        """
        Convert proto tensor to numpy array, The supported types are as follows:
                INT64
                FP32
		INT32
		FP64
		INT16
		FP16
		BF16
		UINT8
		INT8
		BOOL
1662
                BYTES
1663
        Unsupported type:
1664
                STRING
1665 1666 1667 1668 1669 1670 1671
                COMPLEX64
                COMPLEX128

        Args:
            tensor: one tensor in request.tensors.

        Returns:
T
TeslaZhao 已提交
1672 1673
            np_data: np.ndnumpy, the tensor data is converted to numpy.
            lod_info: np.ndnumpy, lod info of the tensor data, None default.
1674 1675 1676 1677 1678 1679
        """
        if tensor is None or tensor.elem_type is None or tensor.name is None:
            _LOGGER.error("input params of tensor is wrong. tensor: {}".format(
                tensor))
            return None

T
TeslaZhao 已提交
1680
        # Set dim shape
1681 1682 1683 1684 1685 1686 1687
        dims = []
        if tensor.shape is None:
            dims.append(1)
        else:
            for one_dim in tensor.shape:
                dims.append(one_dim)

T
TeslaZhao 已提交
1688 1689 1690 1691 1692
        # Set up 2-d lod tensor
        np_lod = None
        if len(tensor.lod) > 0:
            np_lod = np.array(tensor.lod).astype(int32).reshape(2, -1)

1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726
        np_data = None
        _LOGGER.info("proto_to_numpy, name:{}, type:{}, dims:{}".format(
            tensor.name, tensor.elem_type, dims))
        if tensor.elem_type == 0:
            # VarType: INT64
            np_data = np.array(tensor.int64_data).astype(int64).reshape(dims)
        elif tensor.elem_type == 1:
            # VarType: FP32
            np_data = np.array(tensor.float_data).astype(float32).reshape(dims)
        elif tensor.elem_type == 2:
            # VarType: INT32
            np_data = np.array(tensor.int_data).astype(int32).reshape(dims)
        elif tensor.elem_type == 3:
            # VarType: FP64
            np_data = np.array(tensor.float64_data).astype(float64).reshape(
                dims)
        elif tensor.elem_type == 4:
            # VarType: INT16
            np_data = np.array(tensor.int_data).astype(int16).reshape(dims)
        elif tensor.elem_type == 5:
            # VarType: FP16
            np_data = np.array(tensor.float_data).astype(float16).reshape(dims)
        elif tensor.elem_type == 6:
            # VarType: BF16
            np_data = np.array(tensor.uint32_data).astype(uint16).reshape(dims)
        elif tensor.elem_type == 7:
            # VarType: UINT8
            np_data = np.array(tensor.uint32_data).astype(uint8).reshape(dims)
        elif tensor.elem_type == 8:
            # VarType: INT8
            np_data = np.array(tensor.int_data).astype(int8).reshape(dims)
        elif tensor.elem_type == 9:
            # VarType: BOOL
            np_data = np.array(tensor.bool_data).astype(bool).reshape(dims)
1727 1728 1729 1730
        elif tensor.elem_type == 13:
            # VarType: BYTES
            byte_data = BytesIO(tensor.byte_data)
            np_data = np.load(byte_data, allow_pickle=True)
1731 1732 1733 1734 1735 1736 1737
        else:
            _LOGGER.error("Sorry, the type {} of tensor {} is not supported.".
                          format(tensor.elem_type, tensor.name))
            raise ValueError(
                "Sorry, the type {} of tensor {} is not supported.".format(
                    tensor.elem_type, tensor.name))

T
TeslaZhao 已提交
1738
        return np_data, np_lod
1739

B
barrierye 已提交
1740
    def unpack_request_package(self, request):
T
TeslaZhao 已提交
1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758
        """
        Unpack request package by gateway.proto
        Args:
            request: HTTP body, JSON format

        Returns:
            dict_data: json fields in HTTP body
            log_id: log_id
            prod_errcode: None or ProductErrCode.SUCC.value default, otherwise,
                          product errores occured.It is handled in the same way
                          as exception.
            prod_errinfo: "" default 
        """
        dict_data = {}
        log_id = None
        if request is None:
            _LOGGER.critical("request is None")
            raise ValueError("request is None")
1759

1760
        # unpack key/value string list
1761
        for idx, key in enumerate(request.key):
1762
            dict_data[key] = request.value[idx]
T
TeslaZhao 已提交
1763
        log_id = request.logid
1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794

        # unpack proto.tensors data.
        for one_tensor in request.tensors:
            name = one_tensor.name
            elem_type = one_tensor.elem_type

            if one_tensor.name is None:
                _LOGGER.error("Tensor name is None.")
                raise ValueError("Tensor name is None.")

            numpy_dtype = _TENSOR_DTYPE_2_NUMPY_DATA_DTYPE.get(elem_type)
            if numpy_dtype is None:
                _LOGGER.error(
                    "elem_type:{} is dismatch in unpack_request_package.",
                    format(elem_type))
                raise ValueError("elem_type:{} error".format(elem_type))

            if numpy_dtype == "string":
                new_string = ""
                if one_tensor.str_data is None:
                    _LOGGER.error(
                        "str_data of tensor:{} is None, elem_type is {}.".
                        format(name, elem_type))
                    raise ValueError(
                        "str_data of tensor:{} is None, elem_type is {}.".
                        format(name, elem_type))
                for one_str in one_tensor.str_data:
                    new_string += one_str

                dict_data[name] = new_string
            else:
T
TeslaZhao 已提交
1795 1796 1797 1798
                np_data, np_lod = self.proto_tensor_2_numpy(one_tensor)
                dict_data[name] = np_data
                if np_lod is not None:
                    dict_data[name + ".lod"] = np_lod
1799

1800 1801 1802 1803
        _LOGGER.info("RequestOp unpack one request. log_id:{}, clientip:{} \
            name:{}, method:{}, time:{}"
                     .format(log_id, request.clientip, request.name,
                             request.method, time.time()))
T
TeslaZhao 已提交
1804 1805

        return dict_data, log_id, None, ""
B
barrierye 已提交
1806 1807 1808


class ResponseOp(Op):
1809 1810 1811 1812 1813 1814
    """ 
    ResponseOp is a special Op, for packing one response package. If the channeldata 
    needs a special packaging method, you need to inherit class ReponseOp and rewrite
    pack_response_package function. Notice!!! Class ResponseOp does not run preprocess,
    process, postprocess.
    """
B
barrierye 已提交
1815

B
barrierye 已提交
1816
    def __init__(self, input_ops):
1817 1818 1819
        """
        Initialize the ResponseOp
        """
B
barriery 已提交
1820 1821
        super(ResponseOp, self).__init__(
            name="@DAGExecutor", input_ops=input_ops)
1822

B
barrierye 已提交
1823
        # init op
1824
        try:
1825
            self.init_op()
1826
        except Exception as e:
B
barriery 已提交
1827 1828
            _LOGGER.critical("Op(ResponseOp) Failed to init: {}".format(
                e, exc_info=True))
1829
            os._exit(-1)
B
barrierye 已提交
1830

1831 1832 1833 1834 1835 1836
        # init ResponseOp
        self.is_pack_tensor = False

    def set_pack_format(self, isTensor=False):
        self.is_pack_tensor = isTensor

B
barrierye 已提交
1837
    def pack_response_package(self, channeldata):
T
TeslaZhao 已提交
1838
        """
1839 1840 1841 1842 1843 1844 1845 1846
        Getting channeldata from the last channel, packting the response 
        package serialized by protobuf.  

        Args:
            channeldata: Type ChannelData

        Returns:
            resp: pipeline_service_pb2.Response()
T
TeslaZhao 已提交
1847
        """
B
barrierye 已提交
1848
        resp = pipeline_service_pb2.Response()
T
TeslaZhao 已提交
1849 1850 1851
        error_code = channeldata.error_code
        error_info = ""
        if error_code == ChannelDataErrcode.OK.value:
1852
            # Framework level errors
B
barrierye 已提交
1853 1854 1855 1856
            if channeldata.datatype == ChannelDataType.CHANNEL_NPDATA.value:
                feed = channeldata.parse()
                # ndarray to string:
                # https://stackoverflow.com/questions/30167538/convert-a-numpy-ndarray-to-stringor-bytes-and-convert-it-back-to-numpy-ndarray
B
barrierye 已提交
1857
                np.set_printoptions(threshold=sys.maxsize)
B
barrierye 已提交
1858
                for name, var in feed.items():
1859 1860
                    resp.value.append(var.__repr__())
                    resp.key.append(name)
B
barrierye 已提交
1861 1862 1863 1864
            elif channeldata.datatype == ChannelDataType.DICT.value:
                feed = channeldata.parse()
                for name, var in feed.items():
                    if not isinstance(var, str):
T
TeslaZhao 已提交
1865 1866
                        error_code = ChannelDataErrcode.TYPE_ERROR.value
                        error_info = self._log(
B
barrierye 已提交
1867 1868
                            "fetch var type must be str({}).".format(
                                type(var)))
B
barriery 已提交
1869 1870
                        _LOGGER.error("(logid={}) Failed to pack RPC "
                                      "response package: {}".format(
W
wangjiawei04 已提交
1871
                                          channeldata.id, resp.err_msg))
B
barrierye 已提交
1872
                        break
1873 1874
                    resp.value.append(var)
                    resp.key.append(name)
B
barrierye 已提交
1875
            else:
T
TeslaZhao 已提交
1876 1877 1878
                error_code = ChannelDataErrcode.TYPE_ERROR.value
                error_info = self._log("error type({}) in datatype.".format(
                    channeldata.datatype))
B
barriery 已提交
1879
                _LOGGER.error("(logid={}) Failed to pack RPC response"
T
TeslaZhao 已提交
1880
                              " package: {}".format(channeldata.id, error_info))
B
barrierye 已提交
1881
        else:
1882
            # Product level errors
T
TeslaZhao 已提交
1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894
            error_info = channeldata.error_info
            if error_code == ChannelDataErrcode.PRODUCT_ERROR.value:
                #rewrite error_code when product errors occured
                error_code = channeldata.prod_error_code
                error_info = channeldata.prod_error_info

        # pack results
        if error_code is None:
            error_code = 0
        resp.err_no = error_code
        resp.err_msg = error_info

B
barrierye 已提交
1895
        return resp
1896 1897 1898


class VirtualOp(Op):
1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915
    """ 
    To connect 2 ops across levels in dag view, we create virtual ops
    between non-virtual ops, and transfer data only. For examples, 
    the pred ops of F are D & E.In the process of building DAG, we will
    create channels layer by layer according to dag views.Op F is not 
    in the next layer view of [B, E], so we will create a virtual OP 
    'V1' whose pred OP is E. And so on, we create two virtual op 'V2'
    and 'V3', Finally, we find the non-virtual op F. we create 4 channels
    among E, V1, V2, V3 and F, the producer of V1, V2, V3 and F is E.
    
        DAG: [A -> B -> C -> D -> F]
               \-> E ----------/

        DAG view: [[A], [B, E], [C], [D], [F]]
        BUILD DAG: [A -> B -> C -> D -> E -> F]
                     \-> E -> V1-> V2-> V3/
    """
1916 1917 1918

    def __init__(self, name, concurrency=1):
        super(VirtualOp, self).__init__(
B
barrierye 已提交
1919
            name=name, input_ops=None, concurrency=concurrency)
1920 1921 1922
        self._virtual_pred_ops = []

    def add_virtual_pred_op(self, op):
1923 1924 1925 1926 1927 1928 1929 1930 1931
        """
        Add the front op of current vritual op.
        
        Args:
            op: one op object, may be a virtual op or not.

        Returns:
            None
        """
1932 1933
        self._virtual_pred_ops.append(op)

B
barrierye 已提交
1934
    def _actual_pred_op_names(self, op):
1935 1936 1937 1938 1939 1940 1941 1942 1943
        """
        Recursively find the front op which is a non-virtual op.
   
        Args:
            op: one op object
            
        Returns:
            names: the name of non-virtual pred ops.
        """
B
barriery 已提交
1944
        # can use disjoint-set, but it's not necessary
B
barrierye 已提交
1945 1946 1947 1948 1949 1950 1951
        if not isinstance(op, VirtualOp):
            return [op.name]
        names = []
        for x in op._virtual_pred_ops:
            names.extend(self._actual_pred_op_names(x))
        return names

1952
    def add_output_channel(self, channel):
1953 1954 1955 1956 1957 1958 1959 1960 1961
        """
        Adding the output channel of non-virtual pred ops.

        Args:
            channel: one channel.
          
        Returns:
            None.
        """
1962
        if not isinstance(channel, (ThreadChannel, ProcessChannel)):
1963
            _LOGGER.critical(
B
barriery 已提交
1964 1965 1966
                self._log("Failed to add output_channel: output_channel"
                          " must be Channel type, not {}".format(
                              type(channel))))
1967
            os._exit(-1)
1968
        for op in self._virtual_pred_ops:
B
barrierye 已提交
1969 1970
            for op_name in self._actual_pred_op_names(op):
                channel.add_producer(op_name)
1971
        self._outputs.append(channel)
D
dongdaxiang 已提交
1972

1973
    def _run(self, concurrency_idx, input_channel, output_channels, client_type,
1974
             is_thread_op):
1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988
        """
        The target function _run() only transfers data between OPs in one thread
        or process.

        Args:
            concurrency_idx: process id, not avaliable in thread mode.
            input_channel: input channel
            output_channels: output channels
            client_type: no use
            is_thread_op: True, thread mode; False, process mode

        Returns:
            None
        """
1989
        op_info_prefix = "[{}|{}]".format(self.name, concurrency_idx)
B
barrierye 已提交
1990 1991 1992
        log = get_log_func(op_info_prefix)
        tid = threading.current_thread().ident

1993 1994 1995 1996 1997 1998 1999
        batch_generator = self._auto_batching_generator(
            input_channel=input_channel,
            op_name=self.name,
            batch_size=1,
            timeout=None,
            log_func=log)

B
barrierye 已提交
2000 2001
        while True:
            try:
2002
                channeldata_dict_batch = next(batch_generator)
B
barrierye 已提交
2003
            except ChannelStopError:
B
barriery 已提交
2004
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
2005
                self._finalize(is_thread_op)
B
barrierye 已提交
2006
                break
D
dongdaxiang 已提交
2007

B
barrierye 已提交
2008
            try:
2009 2010 2011 2012
                for channeldata_dict in channeldata_dict_batch:
                    for name, data in channeldata_dict.items():
                        self._push_to_output_channels(
                            data, channels=output_channels, name=name)
B
barrierye 已提交
2013
            except ChannelStopError:
B
barriery 已提交
2014
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
2015
                self._finalize(is_thread_op)
B
barrierye 已提交
2016
                break