operator.py 78.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
B
barriery 已提交
15
from time import time as _time
B
barriery 已提交
16
import time
17 18
import threading
import multiprocessing
H
HexToString 已提交
19
from paddle_serving_client import Client
20 21 22
from concurrent import futures
import logging
import func_timeout
23
import os
B
barrierye 已提交
24
import sys
25
import collections
B
barrierye 已提交
26
import numpy as np
T
TeslaZhao 已提交
27
import json
B
barrierye 已提交
28
from numpy import *
29
from io import BytesIO
B
barrierye 已提交
30 31 32 33 34 35
if sys.version_info.major == 2:
    import Queue
elif sys.version_info.major == 3:
    import queue as Queue
else:
    raise Exception("Error Python version")
36

37
from .error_catch import ErrorCatch, CustomException, CustomExceptionCode
B
barrierye 已提交
38
from .proto import pipeline_service_pb2
T
TeslaZhao 已提交
39
from .channel import (ThreadChannel, ProcessChannel, ChannelDataErrcode,
B
bug fix  
barriery 已提交
40
                      ChannelData, ChannelDataType, ChannelStopError,
T
TeslaZhao 已提交
41
                      ChannelTimeoutError, ProductErrCode)
B
barrierye 已提交
42
from .util import NameGenerator
B
barriery 已提交
43
from .profiler import UnsafeTimeProfiler as TimeProfiler
W
wangjiawei04 已提交
44
from . import local_service_handler
45
from .pipeline_client import PipelineClient as PPClient
46

47
_LOGGER = logging.getLogger(__name__)
B
barrierye 已提交
48 49
_op_name_gen = NameGenerator("Op")

50 51 52 53 54 55 56 57 58 59 60 61 62 63
# data type of tensor to numpy_data
_TENSOR_DTYPE_2_NUMPY_DATA_DTYPE = {
    0: "int64",  # VarType.INT64
    1: "float32",  # VarType.FP32
    2: "int32",  # VarType.INT32
    3: "float64",  # VarType.FP64
    4: "int16",  # VarType.int16
    5: "float16",  # VarType.FP32
    6: "uint16",  # VarType.BF16
    7: "uint8",  # VarType.UINT8
    8: "int8",  # VarType.INT8
    9: "bool",  # VarType.BOOL
    10: "complex64",  # VarType.COMPLEX64
    11: "complex128",  # VarType.COMPLEX128
64 65
    12: "string",  # load by numpy
    13: "bytes",  # load by numpy
66 67
}

D
dongdaxiang 已提交
68 69 70

class Op(object):
    def __init__(self,
B
barrierye 已提交
71
                 name=None,
D
dongdaxiang 已提交
72
                 input_ops=[],
B
barriery 已提交
73 74
                 server_endpoints=None,
                 fetch_list=None,
B
barrierye 已提交
75
                 client_config=None,
W
wangjiawei04 已提交
76
                 client_type=None,
B
barriery 已提交
77 78
                 concurrency=None,
                 timeout=None,
T
TeslaZhao 已提交
79
                 retry=0,
B
barriery 已提交
80
                 batch_size=None,
81
                 auto_batching_timeout=None,
82 83
                 local_service_handler=None,
                 jump_to_ops=[]):
B
barriery 已提交
84
        # In __init__, all the parameters are just saved and Op is not initialized
B
barrierye 已提交
85
        if name is None:
B
barrierye 已提交
86
            name = _op_name_gen.next()
87
        self.name = name  # to identify the type of OP, it must be globally unique
B
barrierye 已提交
88
        self.concurrency = concurrency  # amount of concurrency
B
barrierye 已提交
89
        self.set_input_ops(input_ops)
90
        self.set_jump_to_ops(jump_to_ops)
B
barrierye 已提交
91

W
wangjiawei04 已提交
92
        self._local_service_handler = local_service_handler
B
barriery 已提交
93
        self._server_endpoints = server_endpoints
B
barrierye 已提交
94
        self._fetch_names = fetch_list
B
barriery 已提交
95
        self._client_config = client_config
W
wangjiawei04 已提交
96
        self.client_type = client_type
B
barriery 已提交
97
        self._timeout = timeout
98
        self._retry = max(1, retry)
B
barriery 已提交
99 100 101
        self._batch_size = batch_size
        self._auto_batching_timeout = auto_batching_timeout

102 103
        self._input = None
        self._outputs = []
B
barrierye 已提交
104

B
barriery 已提交
105 106 107
        self._server_use_profile = False
        self._tracer = None

108 109 110
        # for grpc_pipeline predict mode. False, string key/val; True, tensor format.
        self._pack_tensor_format = False

B
barriery 已提交
111 112 113 114 115 116
        # only for thread op
        self._for_init_op_lock = threading.Lock()
        self._for_close_op_lock = threading.Lock()
        self._succ_init_op = False
        self._succ_close_op = False

B
barriery 已提交
117
    def init_from_dict(self, conf):
118 119 120 121 122 123 124 125 126 127 128
        """
        Initializing one Op from config.yaml. If server_endpoints exist,
        which is remote RPC mode, otherwise it is local RPC mode. There
        are three types of predictios in local RPC mode, brpc, grpc and
        local_predictor.

        Args:
            conf: config.yaml

        Returns:
        """
B
barriery 已提交
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
        if self.concurrency is None:
            self.concurrency = conf["concurrency"]
        if self._retry is None:
            self._retry = conf["retry"]
        if self._fetch_names is None:
            self._fetch_names = conf.get("fetch_list")
        if self._client_config is None:
            self._client_config = conf.get("client_config")

        if self._timeout is None:
            self._timeout = conf["timeout"]
        if self._timeout > 0:
            self._timeout = self._timeout / 1000.0
        else:
            self._timeout = -1

        if self._batch_size is None:
            self._batch_size = conf["batch_size"]
        if self._auto_batching_timeout is None:
            self._auto_batching_timeout = conf["auto_batching_timeout"]
        if self._auto_batching_timeout <= 0 or self._batch_size == 1:
150
            _LOGGER.debug(
B
barriery 已提交
151 152 153 154 155 156 157
                self._log(
                    "Because auto_batching_timeout <= 0 or batch_size == 1,"
                    " set auto_batching_timeout to None."))
            self._auto_batching_timeout = None
        else:
            self._auto_batching_timeout = self._auto_batching_timeout / 1000.0

158 159 160
        self.model_config = None
        self.workdir = None
        self.thread_num = self.concurrency
161
        self.device_type = -1
162 163 164
        self.devices = ""
        self.mem_optim = False
        self.ir_optim = False
165
        self.precision = "fp32"
T
TeslaZhao 已提交
166 167 168 169 170
        self.use_mkldnn = False
        self.mkldnn_cache_capacity = 0
        self.mkldnn_op_list = None
        self.mkldnn_bf16_op_list = None

B
barriery 已提交
171 172 173 174 175 176
        if self._server_endpoints is None:
            server_endpoints = conf.get("server_endpoints", [])
            if len(server_endpoints) != 0:
                # remote service
                self.with_serving = True
                self._server_endpoints = server_endpoints
177
                self.client_type = conf["client_type"]
178
            else:
W
wangjiawei04 已提交
179
                if self._local_service_handler is None:
B
barriery 已提交
180
                    local_service_conf = conf.get("local_service_conf")
B
barriery 已提交
181 182
                    _LOGGER.info("local_service_conf: {}".format(
                        local_service_conf))
183
                    self.model_config = local_service_conf.get("model_config")
W
wangjiawei04 已提交
184
                    self.client_type = local_service_conf.get("client_type")
185 186
                    self.workdir = local_service_conf.get("workdir")
                    self.thread_num = local_service_conf.get("thread_num")
187
                    self.device_type = local_service_conf.get("device_type")
188 189 190 191
                    self.devices = local_service_conf.get("devices")
                    self.mem_optim = local_service_conf.get("mem_optim")
                    self.ir_optim = local_service_conf.get("ir_optim")
                    self._fetch_names = local_service_conf.get("fetch_list")
192
                    self.precision = local_service_conf.get("precision")
T
TeslaZhao 已提交
193 194 195 196 197 198 199 200
                    self.use_mkldnn = local_service_conf.get("use_mkldnn")
                    self.mkldnn_cache_capacity = local_service_conf.get(
                        "mkldnn_cache_capacity")
                    self.mkldnn_op_list = local_service_conf.get(
                        "mkldnn_op_list")
                    self.mkldnn_bf16_op_list = local_service_conf.get(
                        "mkldnn_bf16_op_list")

201
                    if self.model_config is None:
B
barriery 已提交
202 203 204 205
                        self.with_serving = False
                    else:
                        # local rpc service
                        self.with_serving = True
W
wangjiawei04 已提交
206 207
                        if self.client_type == "brpc" or self.client_type == "grpc":
                            service_handler = local_service_handler.LocalServiceHandler(
208
                                model_config=self.model_config,
W
wangjiawei04 已提交
209
                                client_type=self.client_type,
210 211
                                workdir=self.workdir,
                                thread_num=self.thread_num,
212
                                device_type=self.device_type,
213 214
                                devices=self.devices,
                                mem_optim=self.mem_optim,
215
                                ir_optim=self.ir_optim,
T
TeslaZhao 已提交
216 217 218 219 220 221
                                precision=self.precision,
                                use_mkldnn=self.use_mkldnn,
                                mkldnn_cache_capacity=self.
                                mkldnn_cache_capacity,
                                mkldnn_op_list=self.mkldnn_bf16_op_list,
                                mkldnn_bf16_op_list=self.mkldnn_bf16_op_list)
W
wangjiawei04 已提交
222 223 224 225 226 227 228 229 230 231 232 233
                            service_handler.prepare_server()  # get fetch_list
                            serivce_ports = service_handler.get_port_list()
                            self._server_endpoints = [
                                "127.0.0.1:{}".format(p) for p in serivce_ports
                            ]
                            if self._client_config is None:
                                self._client_config = service_handler.get_client_config(
                                )
                            if self._fetch_names is None:
                                self._fetch_names = service_handler.get_fetch_list(
                                )
                        elif self.client_type == "local_predictor":
W
wangjiawei04 已提交
234
                            service_handler = local_service_handler.LocalServiceHandler(
235
                                model_config=self.model_config,
W
wangjiawei04 已提交
236
                                client_type=self.client_type,
237 238
                                workdir=self.workdir,
                                thread_num=self.thread_num,
239
                                device_type=self.device_type,
240
                                devices=self.devices,
241 242
                                fetch_names=self._fetch_names,
                                mem_optim=self.mem_optim,
243
                                ir_optim=self.ir_optim,
T
TeslaZhao 已提交
244 245 246 247 248 249
                                precision=self.precision,
                                use_mkldnn=self.use_mkldnn,
                                mkldnn_cache_capacity=self.
                                mkldnn_cache_capacity,
                                mkldnn_op_list=self.mkldnn_op_list,
                                mkldnn_bf16_op_list=self.mkldnn_bf16_op_list)
W
wangjiawei04 已提交
250 251 252 253
                            if self._client_config is None:
                                self._client_config = service_handler.get_client_config(
                                )
                        self._local_service_handler = service_handler
B
barriery 已提交
254
                else:
B
barriery 已提交
255
                    self.with_serving = True
W
wangjiawei04 已提交
256
                    self._local_service_handler.prepare_server(
B
barriery 已提交
257
                    )  # get fetch_list
W
wangjiawei04 已提交
258
                    serivce_ports = self._local_service_handler.get_port_list()
B
barriery 已提交
259 260 261
                    self._server_endpoints = [
                        "127.0.0.1:{}".format(p) for p in serivce_ports
                    ]
B
barriery 已提交
262
                    if self._client_config is None:
W
wangjiawei04 已提交
263
                        self._client_config = self._local_service_handler.get_client_config(
B
barriery 已提交
264
                        )
B
barriery 已提交
265
                    if self._fetch_names is None:
W
wangjiawei04 已提交
266
                        self._fetch_names = self._local_service_handler.get_fetch_list(
B
barriery 已提交
267
                        )
B
barriery 已提交
268 269
        else:
            self.with_serving = True
B
barriery 已提交
270

271 272 273 274 275 276 277 278 279 280 281
        if not isinstance(self, RequestOp) and not isinstance(self, ResponseOp):
            _LOGGER.info(
                self._log("\n\tinput_ops: {},"
                          "\n\tserver_endpoints: {}"
                          "\n\tfetch_list: {}"
                          "\n\tclient_config: {}"
                          "\n\tconcurrency: {},"
                          "\n\ttimeout(s): {},"
                          "\n\tretry: {},"
                          "\n\tbatch_size: {},"
                          "\n\tauto_batching_timeout(s): {}".format(
B
barriery 已提交
282
                              ", ".join([op.name for op in self._input_ops
283 284 285 286
                                         ]), self._server_endpoints,
                              self._fetch_names, self._client_config,
                              self.concurrency, self._timeout, self._retry,
                              self._batch_size, self._auto_batching_timeout)))
B
barriery 已提交
287

288
    def launch_local_rpc_service(self):
289 290 291 292 293 294 295 296 297
        """
        Launching multiple local rpc servers.

        Args:
            None

        Returns:
            None
        """
W
wangjiawei04 已提交
298
        if self._local_service_handler is None:
B
barriery 已提交
299 300
            _LOGGER.warning(
                self._log("Failed to launch local rpc"
W
wangjiawei04 已提交
301
                          " service: local_service_handler is None."))
B
barriery 已提交
302
            return
W
wangjiawei04 已提交
303
        port = self._local_service_handler.get_port_list()
W
wangjiawei04 已提交
304 305 306
        #if self._local_service_handler.client_type == "local_predictor":
        #    _LOGGER.info("Op({}) use local predictor.")
        #    return
W
wangjiawei04 已提交
307
        self._local_service_handler.start_server()
B
barriery 已提交
308
        _LOGGER.info("Op({}) use local rpc service at port: {}"
309 310
                     .format(self.name, port))

B
barriery 已提交
311
    def use_default_auto_batching_config(self):
312 313 314 315 316 317 318 319 320
        """
        Set the auto batching config default.

        Args:
            None

        Returns:
            None
        """
B
bug fix  
barriery 已提交
321
        if self._batch_size != 1:
322 323
            _LOGGER.warning("Op({}) reset batch_size=1 (original: {})"
                            .format(self.name, self._batch_size))
B
bug fix  
barriery 已提交
324 325
            self._batch_size = 1
        if self._auto_batching_timeout != None:
326
            _LOGGER.warning(
B
barriery 已提交
327 328
                "Op({}) reset auto_batching_timeout=None (original: {})"
                .format(self.name, self._auto_batching_timeout))
B
bug fix  
barriery 已提交
329
            self._auto_batching_timeout = None
B
barriery 已提交
330

B
barrierye 已提交
331
    def use_profiler(self, use_profile):
B
barrierye 已提交
332
        self._server_use_profile = use_profile
333

B
barriery 已提交
334 335 336
    def set_tracer(self, tracer):
        self._tracer = tracer

W
wangjiawei04 已提交
337
    def init_client(self, client_config, server_endpoints):
338 339 340 341 342 343 344 345 346 347 348 349
        """
        Initialize the client object. There are three types of clients, brpc,
        grpc and local_predictor. In grpc or brpc mode, the client connects 
        endpoints.

        Args:
            client_config: client config info
            server_endpoints: server IP/Port list.

        Returns:
            client: client object.
        """
350
        if self.with_serving == False:
B
barriery 已提交
351
            _LOGGER.info("Op({}) has no client (and it also do not "
352
                         "run the process function)".format(self.name))
B
barrierye 已提交
353
            return None
W
wangjiawei04 已提交
354
        if self.client_type == 'brpc':
B
barrierye 已提交
355 356
            client = Client()
            client.load_client_config(client_config)
357 358
        elif self.client_type == 'pipeline_grpc':
            client = PPClient()
W
wangjiawei04 已提交
359 360 361 362
        elif self.client_type == 'local_predictor':
            if self.local_predictor is None:
                raise ValueError("local predictor not yet created")
            client = self.local_predictor
363
        else:
B
barriery 已提交
364
            raise ValueError("Failed to init client: unknow client "
W
wangjiawei04 已提交
365
                             "type {}".format(self.client_type))
W
wangjiawei04 已提交
366 367 368
        if self._fetch_names is None:
            self._fetch_names = client.fetch_names_
            _LOGGER.info("Op({}) has no fetch name set. So fetch all vars")
W
wangjiawei04 已提交
369 370
        if self.client_type != "local_predictor":
            client.connect(server_endpoints)
B
barrierye 已提交
371
        return client
372 373 374 375 376

    def get_input_ops(self):
        return self._input_ops

    def set_input_ops(self, ops):
377 378 379 380 381 382 383 384 385 386
        """
        Set input ops.Each op have many input ops, but only one input
        channel.

        Args:
            ops: op list

        Returns:
            None.
        """
387 388 389 390 391
        if not isinstance(ops, list):
            ops = [] if ops is None else [ops]
        self._input_ops = []
        for op in ops:
            if not isinstance(op, Op):
392
                _LOGGER.critical(
B
barriery 已提交
393 394
                    self._log("Failed to set input_ops: input op "
                              "must be Op type, not {}".format(type(op))))
395
                os._exit(-1)
396
            self._input_ops.append(op)
D
dongdaxiang 已提交
397

398 399 400
    def set_pack_tensor_format(self, is_tensor_format=False):
        self._pack_tensor_format = is_tensor_format

401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
    def get_jump_to_ops(self):
        return self._jump_to_ops

    def set_jump_to_ops(self, ops):
        """
        Set jump to ops, then, this op can send channeldata to output channel.

        Args:
            ops: op list to be jumpped

        Returns:
            None.
        """
        if not isinstance(ops, list):
            ops = [] if ops is None else [ops]

        self._jump_to_ops = []
        for op in ops:
            if not isinstance(op, Op):
                _LOGGER.critical(
                    self._log("Failed to set input_ops: input op "
                              "must be Op type, not {}".format(type(op))))
                os._exit(-1)
            self._jump_to_ops.append(op)

    def is_jump_op(self):
        """
        The op has _jump_to_ops members or not.

        Args:
            None

        Returns:
            True or False
        """
        return len(self._jump_to_ops) > 0

    def check_jumping(self, input_data):
        """
        Check whether to send data to jump ops.WhileOp needs to rewrite 
        this interface. this function returns False default.
     
        Args:
            input_data: input data to be preprocessed

        Returns:
            True, send data to the output channel of jump ops
            False, send data to output channel.
        """
        return False

    def get_output_channels_of_jump_ops(self):
        """
        Get output channels of jump ops

        Args:
            None

        Returns:
            list of channels
        """
        channels = []
        if self.is_jump_op() is False:
            return channels
        for op in self._jump_to_ops:
            _LOGGER.info("op:{} extend op._get_output_channels:{}".format(
                op.name, op._get_output_channels()))
            channels.extend(op._get_output_channels())

        _LOGGER.info("get_output_channels_of_jump_ops, channels:{}".format(
            channels))
        return channels

474
    def add_input_channel(self, channel):
475 476 477 478
        """
        Adding one input channel to the Op. Each op have many front op,
        but, only one input channel.
        """
479
        if not isinstance(channel, (ThreadChannel, ProcessChannel)):
480
            _LOGGER.critical(
B
barriery 已提交
481 482 483
                self._log("Failed to set input_channel: input "
                          "channel must be Channel type, not {}".format(
                              type(channel))))
484
            os._exit(-1)
485 486
        channel.add_consumer(self.name)
        self._input = channel
D
dongdaxiang 已提交
487

488
    def clean_input_channel(self):
B
barrierye 已提交
489 490 491 492
        self._input = None

    def _get_input_channel(self):
        return self._input
D
dongdaxiang 已提交
493

494
    def add_output_channel(self, channel):
495 496 497 498 499 500 501 502 503 504
        """
        Adding one output channel to the Op. Each op have many output channels,
        But only one front channel.

        Args:
            channel: an output channel object.

        Returns:
            None
        """
505
        if not isinstance(channel, (ThreadChannel, ProcessChannel)):
506
            _LOGGER.critical(
B
barriery 已提交
507 508
                self._log("Failed to add output_channel: output channel "
                          "must be Channel type, not {}".format(type(channel))))
509
            os._exit(-1)
510 511
        channel.add_producer(self.name)
        self._outputs.append(channel)
512
        _LOGGER.debug("op:{} add output_channel {}".format(self.name, channel))
D
dongdaxiang 已提交
513

514
    def clean_output_channels(self):
B
barrierye 已提交
515 516 517 518 519
        self._outputs = []

    def _get_output_channels(self):
        return self._outputs

520
    def preprocess(self, input_dicts, data_id=0, log_id=0):
T
TeslaZhao 已提交
521 522 523 524 525 526
        """
        In preprocess stage, assembling data for process stage. users can 
        override this function for model feed features.

        Args:
            input_dicts: input data to be preprocessed
527
            data_id: inner unique id, increase auto
528
            log_id: global unique id for RTT, 0 default
T
TeslaZhao 已提交
529 530

        Return:
T
TeslaZhao 已提交
531
            output_data: data for process stage
T
TeslaZhao 已提交
532 533 534 535 536
            is_skip_process: skip process stage or not, False default
            prod_errcode: None default, otherwise, product errores occured.
                          It is handled in the same way as exception. 
            prod_errinfo: "" default
        """
B
barrierye 已提交
537
        # multiple previous Op
B
barrierye 已提交
538
        if len(input_dicts) != 1:
539 540
            _LOGGER.critical(
                self._log(
B
barriery 已提交
541 542
                    "Failed to run preprocess: this Op has multiple previous "
                    "inputs. Please override this func."))
543
            os._exit(-1)
D
dongdaxiang 已提交
544

B
barrierye 已提交
545
        (_, input_dict), = input_dicts.items()
T
TeslaZhao 已提交
546
        return input_dict, False, None, ""
B
barrierye 已提交
547

548
    def process(self, feed_batch, typical_logid=0):
T
TeslaZhao 已提交
549 550 551 552 553
        """
        In process stage, send requests to the inference server or predict locally.
        users do not need to inherit this function
        Args:
            feed_batch: data to be fed to inference server
554 555
            typical_logid: mark batch predicts, usually the first logid in batch,
                0 default.
T
TeslaZhao 已提交
556 557 558 559

        Returns:
            call_result: predict result
        """
560 561 562 563 564

        call_result = None
        err_code = ChannelDataErrcode.OK.value
        err_info = ""

W
wangjiawei04 已提交
565
        if self.client_type == "local_predictor":
566 567 568 569 570 571 572 573
            err, err_info = ChannelData.check_batch_npdata(feed_batch)
            if err != 0:
                _LOGGER.error(
                    self._log("Failed to run process: {}. feed_batch must be \
                        npdata in process for local_predictor mode."
                              .format(err_info)))
                return call_result, ChannelDataErrcode.TYPE_ERROR.value, "feed_batch must be npdata"

W
wangjiawei04 已提交
574 575
            call_result = self.client.predict(
                feed=feed_batch[0],
W
wangjiawei04 已提交
576
                fetch=self._fetch_names,
W
wangjiawei04 已提交
577 578
                batch=True,
                log_id=typical_logid)
579 580 581 582 583 584 585 586

        elif self.client_type == "brpc":
            err, err_info = ChannelData.check_batch_npdata(feed_batch)
            if err != 0:
                _LOGGER.error(
                    self._log("Failed to run process: {}. feed_batch must be \
                        npdata in process for brpc mode.".format(err_info)))
                return call_result, ChannelDataErrcode.TYPE_ERROR.value, "feed_batch must be npdata"
W
wangjiawei04 已提交
587
            call_result = self.client.predict(
588
                feed=feed_batch[0],
W
wangjiawei04 已提交
589
                fetch=self._fetch_names,
W
wangjiawei04 已提交
590 591
                batch=True,
                log_id=typical_logid)
592 593 594 595 596 597 598 599 600 601 602 603 604 605

        elif self.client_type == "pipeline_grpc":
            err, err_info = ChannelData.check_dictdata(feed_batch)
            if err != 0:
                _LOGGER.error(
                    self._log("Failed to run process: {}. feed_batch must be \
                       npdata in process for pipeline_grpc mode."
                              .format(err_info)))
                return call_result, ChannelDataErrcode.TYPE_ERROR.value, "feed_batch must be dict"

            call_result = self.client.predict(
                feed_dict=feed_batch[0],
                fetch=self._fetch_names,
                asyn=False,
606
                pack_tensor_format=self._pack_tensor_format,
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
                profile=False)
            if call_result is None:
                _LOGGER.error(
                    self._log("Failed in pipeline_grpc. call_result is None."))
                return call_result, ChannelDataErrcode.UNKNOW.value, "pipeline_grpc error"
            if call_result.err_no != 0:
                _LOGGER.error(
                    self._log("Failed in pipeline_grpc. err_no:{}, err_info:{}".
                              format(call_result.err_no, call_result.err_msg)))
                return call_result, ChannelDataErrcode(
                    call_result.err_no).value, call_result.err_msg

            new_dict = {}
            err_code = ChannelDataErrcode(call_result.err_no).value
            err_info = call_result.err_msg
            for idx, key in enumerate(call_result.key):
                new_dict[key] = [call_result.value[idx]]
            call_result = new_dict

        return call_result, err_code, err_info
627

628
    def postprocess(self, input_data, fetch_data, data_id=0, log_id=0):
T
TeslaZhao 已提交
629 630 631
        """
        In postprocess stage, assemble data for next op or output.
        Args:
T
TeslaZhao 已提交
632 633
            input_data: data returned in preprocess stage, dict(for single predict) or list(for batch predict)
            fetch_data: data returned in process stage, dict(for single predict) or list(for batch predict)
634
            data_id: inner unique id, increase auto
635
            log_id: logid, 0 default
T
TeslaZhao 已提交
636 637

        Returns: 
T
TeslaZhao 已提交
638
            fetch_dict: fetch result must be dict type.
T
TeslaZhao 已提交
639 640 641 642
            prod_errcode: None default, otherwise, product errores occured.
                          It is handled in the same way as exception.
            prod_errinfo: "" default
        """
T
TeslaZhao 已提交
643 644 645
        fetch_dict = {}
        if isinstance(fetch_data, dict):
            fetch_dict = fetch_data
T
TeslaZhao 已提交
646
        return fetch_dict, None, ""
D
dongdaxiang 已提交
647

B
barrierye 已提交
648
    def _parse_channeldata(self, channeldata_dict):
T
TeslaZhao 已提交
649 650 651 652 653 654 655 656 657 658 659 660 661
        """
        Parse one channeldata 
        Args:
            channeldata_dict : channel data to be parsed, dict type
        
        Return:
            data_id: created by dag._id_generator, unique
            error_channeldata: error channeldata
            parsed_data: get np/dict data from channeldata
            client_need_profile: need profile info
            profile_set: profile info
            log_id: logid for tracing a request 
        """
662
        data_id, error_channeldata = None, None
B
barrierye 已提交
663
        client_need_profile, profile_set = False, set()
B
barrierye 已提交
664 665 666 667
        parsed_data = {}

        key = list(channeldata_dict.keys())[0]
        data_id = channeldata_dict[key].id
T
TeslaZhao 已提交
668
        log_id = channeldata_dict[key].log_id
B
barrierye 已提交
669
        client_need_profile = channeldata_dict[key].client_need_profile
B
barrierye 已提交
670 671

        for name, data in channeldata_dict.items():
T
TeslaZhao 已提交
672
            if data.error_code != ChannelDataErrcode.OK.value:
B
barrierye 已提交
673 674 675
                error_channeldata = data
                break
            parsed_data[name] = data.parse()
B
barrierye 已提交
676
            if client_need_profile:
B
barrierye 已提交
677
                profile_set |= data.profile_data_set
B
barrierye 已提交
678
        return (data_id, error_channeldata, parsed_data, client_need_profile,
T
TeslaZhao 已提交
679
                profile_set, log_id)
B
barrierye 已提交
680 681 682 683 684

    def _push_to_output_channels(self,
                                 data,
                                 channels,
                                 name=None,
B
barriery 已提交
685
                                 profile_str=None,
B
barrierye 已提交
686
                                 client_need_profile=False,
B
barrierye 已提交
687
                                 profile_set=None):
T
TeslaZhao 已提交
688 689 690 691 692 693 694 695 696 697 698 699 700 701
        """
        Push data to output channels, Do not run the later stage(preprocess,
        process, postprocess)
        Args:
            data: channeldata, to be pushed
            channels: output channels
            name: op name  
            profile_str: one profile message
            client_need_profile: False default
            profile_set: profile message collections

        Returns:
            None
        """
702 703
        if name is None:
            name = self.name
B
barrierye 已提交
704

B
barriery 已提交
705
        # add profile into channeldata
B
barrierye 已提交
706
        if client_need_profile and profile_set is not None:
B
barriery 已提交
707 708
            if profile_str is not None:
                profile_set.add(profile_str)
B
barrierye 已提交
709
            data.add_profile(profile_set)
B
barrierye 已提交
710

B
barriery 已提交
711 712 713
        for channel in channels:
            channel.push(data, name)

W
wangjiawei04 已提交
714
    def start_with_process(self):
715 716 717 718 719 720 721 722 723 724
        """
        Each OP creates a process to run the main loop, initializes the CUDA
        environment in each individual process.

        Args:
            None

        Returns:
            process array
        """
B
barriery 已提交
725 726 727
        trace_buffer = None
        if self._tracer is not None:
            trace_buffer = self._tracer.data_buffer()
W
wangjiawei04 已提交
728
        process = []
B
barrierye 已提交
729
        for concurrency_idx in range(self.concurrency):
730 731
            p = multiprocessing.Process(
                target=self._run,
B
barrierye 已提交
732
                args=(concurrency_idx, self._get_input_channel(),
733 734
                      self._get_output_channels(), False, trace_buffer,
                      self.model_config, self.workdir, self.thread_num,
735
                      self.device_type, self.devices, self.mem_optim,
T
TeslaZhao 已提交
736 737
                      self.ir_optim, self.precision, self.use_mkldnn,
                      self.mkldnn_cache_capacity, self.mkldnn_op_list,
738 739
                      self.mkldnn_bf16_op_list, self.is_jump_op(),
                      self.get_output_channels_of_jump_ops()))
B
barriery 已提交
740
            p.daemon = True
741
            p.start()
W
wangjiawei04 已提交
742 743
            process.append(p)
        return process
744

W
wangjiawei04 已提交
745
    def start_with_thread(self):
746 747 748 749 750 751 752 753 754 755
        """
        Each OP creates a thread to run the main loop, initializes the CUDA 
        environment in the main thread.

        Args:
            None
 
        Returns:
            thread array
        """
B
barriery 已提交
756 757 758
        trace_buffer = None
        if self._tracer is not None:
            trace_buffer = self._tracer.data_buffer()
759 760 761 762

        #Init cuda env in main thread
        if self.client_type == "local_predictor":
            _LOGGER.info("Init cuda env in main thread")
763
            self.local_predictor = self._local_service_handler.get_client(0)
764

765
        threads = []
B
barrierye 已提交
766
        for concurrency_idx in range(self.concurrency):
767 768
            t = threading.Thread(
                target=self._run,
B
barrierye 已提交
769
                args=(concurrency_idx, self._get_input_channel(),
770 771
                      self._get_output_channels(), True, trace_buffer,
                      self.model_config, self.workdir, self.thread_num,
772
                      self.device_type, self.devices, self.mem_optim,
T
TeslaZhao 已提交
773 774
                      self.ir_optim, self.precision, self.use_mkldnn,
                      self.mkldnn_cache_capacity, self.mkldnn_op_list,
775 776
                      self.mkldnn_bf16_op_list, self.is_jump_op(),
                      self.get_output_channels_of_jump_ops()))
B
barriery 已提交
777 778 779
            # When a process exits, it attempts to terminate
            # all of its daemonic child processes.
            t.daemon = True
780 781 782 783
            t.start()
            threads.append(t)
        return threads

B
barrierye 已提交
784
    def init_op(self):
B
barrierye 已提交
785 786
        pass

T
TeslaZhao 已提交
787 788 789 790 791 792 793 794 795 796 797 798 799 800
    def _run_preprocess(self, parsed_data_dict, op_info_prefix, logid_dict):
        """
        Run preprocess stage
        Args:
            parsed_data_dict: data to be pre-processed
            op_info_prefix: input op info
            logid_dict: logid dict

        Returns:
            preped_data_dict: data preprocessed, to be processed 
            err_channeldata_dict: when exceptions occurred, putting errors in it.
            skip_process_dict: skip process stage or not

        """
B
barriery 已提交
801
        _LOGGER.debug("{} Running preprocess".format(op_info_prefix))
802 803
        preped_data_dict = collections.OrderedDict()
        err_channeldata_dict = collections.OrderedDict()
T
TeslaZhao 已提交
804
        skip_process_dict = {}
805 806 807 808 809 810
        @ErrorCatch
        def preprocess_help(self, parsed_data, data_id, logid_dict):
            preped_data, is_skip_process, prod_errcode, prod_errinfo = self.preprocess(
                parsed_data, data_id, logid_dict.get(data_id))
            return preped_data, is_skip_process, prod_errcode, prod_errinfo
            
811 812
        for data_id, parsed_data in parsed_data_dict.items():
            preped_data, error_channeldata = None, None
T
TeslaZhao 已提交
813 814 815
            is_skip_process = False
            prod_errcode, prod_errinfo = None, None
            log_id = logid_dict.get(data_id)
816 817 818
            process_res, resp = preprocess_help(self, parsed_data, data_id, logid_dict)
            if resp.err_no is 200:
                preped_data, is_skip_process, prod_errcode, prod_errinfo = process_res
T
TeslaZhao 已提交
819 820
                if is_skip_process is True:
                    skip_process_dict[data_id] = True
821 822 823 824 825 826 827 828 829 830 831
                if prod_errcode is not None:
                    _LOGGER.error("data_id: {} return product error. Product ErrNo:{}, Product ErrMsg: {}".format(data_id, prod_errcode, prod_errinfo))
                    error_channeldata = ChannelData(
                      error_code=ChannelDataErrcode.PRODUCT_ERROR.value,
                      error_info="",
                      prod_error_code=prod_errcode,
                      prod_error_info=prod_errinfo,
                      data_id=data_id,
                      log_id=log_id)
            else:
                
T
TeslaZhao 已提交
832
                error_channeldata = ChannelData(
833 834 835 836 837
                  error_code=resp.err_no,
                  error_info=resp.err_msg,
                  data_id=data_id,
                  log_id=log_id)
                skip_process_dict[data_id] = True 
T
TeslaZhao 已提交
838

839 840 841 842
            if error_channeldata is not None:
                err_channeldata_dict[data_id] = error_channeldata
            else:
                preped_data_dict[data_id] = preped_data
B
barriery 已提交
843
        _LOGGER.debug("{} Succ preprocess".format(op_info_prefix))
T
TeslaZhao 已提交
844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859
        return preped_data_dict, err_channeldata_dict, skip_process_dict

    def _run_process(self, preped_data_dict, op_info_prefix, skip_process_dict,
                     logid_dict):
        """
        Run process stage
        Args:
            preped_data_dict: feed the data to be predicted by the model.  
            op_info_prefix: prefix op info
            skip_process_dict: skip process stage or not
            logid_dict: logid dict

        Returns:
            midped_data_dict: data midprocessed, to be post-processed 
            err_channeldata_dict: when exceptions occurred, putting errors in it 
        """
B
barriery 已提交
860
        _LOGGER.debug("{} Running process".format(op_info_prefix))
861 862
        midped_data_dict = collections.OrderedDict()
        err_channeldata_dict = collections.OrderedDict()
T
TeslaZhao 已提交
863
        is_skip_process = False
T
TeslaZhao 已提交
864
        data_ids = list(preped_data_dict.keys())
T
TeslaZhao 已提交
865 866

        # skip process stage
T
TeslaZhao 已提交
867 868
        if len(data_ids) == 1 and skip_process_dict.get(data_ids[0]) == True:
            is_skip_process = True
T
TeslaZhao 已提交
869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903
        if self.with_serving is False or is_skip_process is True:
            midped_data_dict = preped_data_dict
            _LOGGER.warning("(data_id={} log_id={}) OP={} skip process stage. " \
                "with_serving={}, is_skip_process={}".format(data_ids[0],
                logid_dict.get(data_ids[0]), self.name, self.with_serving,
                is_skip_process))
            return midped_data_dict, err_channeldata_dict

        # use typical_logid to mark batch data
        # data_ids is one self-increasing unique key. 
        typical_logid = data_ids[0]
        if len(data_ids) != 1:
            for data_id in data_ids:
                _LOGGER.info(
                    "(data_id={} logid={}) Auto-batching is On Op={}!!" \
                    "We selected logid={} (from batch: {}) as a " \
                    "representative for logging.".format(
                    data_id, logid_dict.get(data_id), self.name,
                    typical_logid, data_ids))

        one_input = preped_data_dict[data_ids[0]]
        feed_batch = []
        feed_dict = {}
        cur_offset = 0
        input_offset_dict = {}
        batch_input = False

        if isinstance(one_input, dict):
            # For dict type, data structure is dict.
            # Merge multiple dicts for data_ids into one dict.
            # feed_batch is the input param of predict func.
            # input_offset_dict is used for data restration[data_ids]
            if len(data_ids) == 1:
                feed_batch = [preped_data_dict[data_id] for data_id in data_ids]
            else:
904 905
                for data_id in data_ids:
                    for key, val in preped_data_dict[data_id].items():
T
TeslaZhao 已提交
906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950
                        has_val = feed_dict.get(key)
                        if has_val is None:
                            feed_dict[key] = val
                            continue
                        # merge 2 np.arrray
                        if isinstance(val, np.ndarray):
                            feed_dict[key] = np.append(
                                feed_dict[key], val, axis=0)
                feed_batch.append(feed_dict)

            for data_id in data_ids:
                start = cur_offset
                for key, val in preped_data_dict[data_id].items():
                    if isinstance(val, (list, np.ndarray)):
                        cur_offset += len(val)
                    else:
                        cur_offset += 1
                    break
                input_offset_dict[data_id] = [start, cur_offset]
        elif isinstance(one_input, list):
            # For list type, data structure of one_input is [dict, dict, ...]
            # Data structure of feed_batch is [dict1_1, dict1_2, dict2_1, ...]   
            # Data structure of input_offset_dict is { data_id : [start, end] }
            batch_input = True
            for data_id in data_ids:
                feed_batch.extend(preped_data_dict[data_id])
                data_size = len(preped_data_dict[data_id])
                start = cur_offset
                cur_offset = start + data_size
                input_offset_dict[data_id] = [start, cur_offset]
        else:
            _LOGGER.critical(
                "(data_id={} log_id={}){} Failed to process: expect input type is dict"
                " or list(batch input), but get {}".format(data_ids[
                    0], typical_logid, op_info_prefix, type(one_input)))
            for data_id in data_ids:
                error_code = ChannelDataErrcode.TYPE_ERROR.value
                error_info = "expect input type is dict or list, but get {}".format(
                    type(one_input))
                err_channeldata_dict[data_id] = ChannelData(
                    error_code=error_code,
                    error_info=error_info,
                    data_id=data_id,
                    log_id=logid_dict.get(data_id))
            return midped_data_dict, err_channeldata_dict
B
barrierye 已提交
951

T
TeslaZhao 已提交
952 953
        midped_batch = None
        error_code = ChannelDataErrcode.OK.value
954
        error_info = ""
T
TeslaZhao 已提交
955 956 957 958
        if self._timeout <= 0:
            # No retry
            try:
                if batch_input is False:
959 960
                    midped_batch, error_code, error_info = self.process(
                        feed_batch, typical_logid)
T
TeslaZhao 已提交
961 962 963
                else:
                    midped_batch = []
                    for idx in range(len(feed_batch)):
964 965 966 967
                        predict_res, error_code, error_info = self.process(
                            [feed_batch[idx]], typical_logid)
                        if error_code != ChannelDataErrcode.OK.value:
                            break
T
TeslaZhao 已提交
968 969 970 971 972 973 974 975 976 977 978 979
                        midped_batch.append(predict_res)
            except Exception as e:
                error_code = ChannelDataErrcode.UNKNOW.value
                error_info = "(data_id={} log_id={}) {} Failed to process(batch: {}): {}".format(
                    data_ids[0], typical_logid, op_info_prefix, data_ids, e)
                _LOGGER.error(error_info, exc_info=True)
        else:
            # retry N times configed in yaml files.
            for i in range(self._retry):
                try:
                    # time out for each process
                    if batch_input is False:
980
                        midped_batch, error_code, error_info = func_timeout.func_timeout(
B
barriery 已提交
981 982 983
                            self._timeout,
                            self.process,
                            args=(feed_batch, typical_logid))
984
                    else:
T
TeslaZhao 已提交
985 986
                        midped_batch = []
                        for idx in range(len(feed_batch)):
987
                            predict_res, error_code, error_info = func_timeout.func_timeout(
T
TeslaZhao 已提交
988 989 990 991 992 993 994 995 996 997 998
                                self._timeout,
                                self.process,
                                args=([feed_batch[idx]], typical_logid))
                            midped_batch[idx].append(predict_res)

                except func_timeout.FunctionTimedOut as e:
                    if i + 1 >= self._retry:
                        error_code = ChannelDataErrcode.TIMEOUT.value
                        error_info = "(log_id={}) {} Failed to process(batch: {}): " \
                            "exceeded retry count.".format(typical_logid, op_info_prefix, data_ids)
                        _LOGGER.error(error_info)
B
barrierye 已提交
999
                    else:
T
TeslaZhao 已提交
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
                        _LOGGER.warning(
                            "(log_id={}) {} Failed to process(batch: {}): timeout,"
                            " and retrying({}/{})...".format(
                                typical_logid, op_info_prefix, data_ids, i + 1,
                                self._retry))
                except Exception as e:
                    error_code = ChannelDataErrcode.UNKNOW.value
                    error_info = "(log_id={}) {} Failed to process(batch: {}): {}".format(
                        typical_logid, op_info_prefix, data_ids, e)
                    _LOGGER.error(error_info, exc_info=True)
                    break
                else:
                    break

        # 2 kinds of errors
        if error_code != ChannelDataErrcode.OK.value or midped_batch is None:
1016
            error_info = "(log_id={}) {} failed to predict. Please check the input dict and checkout PipelineServingLogs/pipeline.log for more details.".format(
T
TeslaZhao 已提交
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
                typical_logid, self.name)
            _LOGGER.error(error_info)
            for data_id in data_ids:
                err_channeldata_dict[data_id] = ChannelData(
                    error_code=ChannelDataErrcode.CLIENT_ERROR.value,
                    error_info=error_info,
                    data_id=data_id,
                    log_id=logid_dict.get(data_id))
            return midped_data_dict, err_channeldata_dict

        # Split batch infer result to each data_ids
        if batch_input is False:
            var_names = midped_batch.keys()
            lod_var_names = set()
            lod_offset_names = set()
            # midped_batch is dict type for single input 
            for name in var_names:
                lod_offset_name = "{}.lod".format(name)
                if lod_offset_name in var_names:
                    _LOGGER.debug("(log_id={}) {} {} is LodTensor".format(
                        typical_logid, op_info_prefix, name))
                    lod_var_names.add(name)
                    lod_offset_names.add(lod_offset_name)

            for idx, data_id in enumerate(data_ids):
                midped_data_dict[data_id] = {}

            for name, value in midped_batch.items():
                if name in lod_offset_names:
                    continue
                if name in lod_var_names:
                    # lodtensor
                    lod_offset_name = "{}.lod".format(name)
                    lod_offset = midped_batch[lod_offset_name]
                    for idx, data_id in enumerate(data_ids):
                        data_offset_left = input_offset_dict[data_id][0]
                        data_offset_right = input_offset_dict[data_id][1]
                        lod_offset_left = lod_offset[data_offset_left]
                        lod_offset_right = lod_offset[data_offset_right]
                        midped_data_dict[data_id][name] = value[
                            lod_offset_left:lod_offset_right]
                        midped_data_dict[data_id][lod_offset_name] = \
                            lod_offset[data_offset_left:data_offset_right + 1] - lod_offset[data_offset_left]
                else:
                    # normal tensor
                    for idx, data_id in enumerate(data_ids):
                        start = input_offset_dict[data_id][0]
                        end = input_offset_dict[data_id][1]
                        midped_data_dict[data_id][name] = value[start:end]
1066
        else:
T
TeslaZhao 已提交
1067 1068 1069 1070 1071
            # midped_batch is list type for batch input
            for idx, data_id in enumerate(data_ids):
                start = input_offset_dict[data_id][0]
                end = input_offset_dict[data_id][1]
                midped_data_dict[data_id] = midped_batch[start:end]
1072 1073
        return midped_data_dict, err_channeldata_dict

B
barriery 已提交
1074
    def _run_postprocess(self, parsed_data_dict, midped_data_dict,
T
TeslaZhao 已提交
1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
                         op_info_prefix, logid_dict):
        """
        Run postprocess stage.
        Args:
            parsed_data_dict: data returned in preprocess stage 
            midped_data_dict: data returned in process stage
            op_info_prefix: prefix op info
            logid_dict: logid dict

        Returns:
            postped_data_dict: data postprocessed 
            err_channeldata_dict: when exceptions occurred, putting errors in it
 
        """
B
barriery 已提交
1089
        _LOGGER.debug("{} Running postprocess".format(op_info_prefix))
1090 1091
        postped_data_dict = collections.OrderedDict()
        err_channeldata_dict = collections.OrderedDict()
1092 1093 1094 1095 1096
        @ErrorCatch
        def postprocess_help(self, parsed_data_dict, midped_data, data_id, logid_dict):
            postped_data, prod_errcode, prod_errinfo = self.postprocess(parsed_data_dict[data_id], 
              midped_data, data_id, logid_dict.get(data_id))
            if not isinstance(postped_data, dict):
F
felixhjh 已提交
1097
                raise CustomException(CustomExceptionCode.TYPE_ERROR, "postprocess should return dict", True)
1098 1099
            return postped_data, prod_errcode, prod_errinfo

B
bug fix  
barriery 已提交
1100
        for data_id, midped_data in midped_data_dict.items():
T
TeslaZhao 已提交
1101
            log_id = logid_dict.get(data_id)
1102
            postped_data, err_channeldata = None, None
T
TeslaZhao 已提交
1103 1104
            prod_errcode, prod_errinfo = None, None

1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
            post_res, resp = postprocess_help(self, parsed_data_dict, midped_data, data_id, logid_dict)
            if resp.err_no is 200:
                postped_data, prod_errcode, prod_errinfo = post_res
                if prod_errcode is not None:
                  # product errors occured
                    err_channeldata = ChannelData(
                      error_code=ChannelDataErrcode.PRODUCT_ERROR.value,
                      error_info="",
                      prod_error_code=prod_errcode,
                      prod_error_info=prod_errinfo,
                      data_id=data_id,
                      log_id=log_id)
            else:
T
TeslaZhao 已提交
1118
                err_channeldata = ChannelData(
1119 1120
                    error_code=resp.err_no,
                    error_info=resp.err_msg,
T
TeslaZhao 已提交
1121 1122 1123
                    data_id=data_id,
                    log_id=log_id)

1124 1125 1126 1127
            if err_channeldata is not None:
                err_channeldata_dict[data_id] = err_channeldata
                continue

1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142
            output_data = None
            err, _ = ChannelData.check_npdata(postped_data)
            if err == 0:
                output_data = ChannelData(
                  ChannelDataType.CHANNEL_NPDATA.value,
                  npdata=postped_data,
                  data_id=data_id,
                  log_id=log_id)
            else:
                output_data = ChannelData(
                  ChannelDataType.DICT.value,
                  dictdata=postped_data,
                  data_id=data_id,
                  log_id=log_id)
            postped_data_dict[data_id] = output_data
B
barriery 已提交
1143
        _LOGGER.debug("{} Succ postprocess".format(op_info_prefix))
1144
        return postped_data_dict, err_channeldata_dict
B
barriery 已提交
1145 1146

    def _auto_batching_generator(self, input_channel, op_name, batch_size,
B
barriery 已提交
1147
                                 timeout, op_info_prefix):
1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
        """
        Merge batch_size requests for one prediction.Taking one piece of data 
        from the input channel each time until equals batch_size, or the waiting 
        time exceeds auto_batching_timeout.

        Args:
            input_channel: the input channel of Op
            op_name: op name
            batch_size: batch size, Less than worker_num
            timeout: batch timeout, seconds, If timeout is None, and the quantity 
                taken from the front is less than batch_size, blocking occured.
            op_info_prefix: op link info.

        Returns:
            None
        """
B
barriery 已提交
1164 1165 1166 1167 1168 1169 1170 1171 1172
        while True:
            batch = []
            while len(batch) == 0:
                endtime = None
                if timeout is not None:
                    endtime = _time() + timeout
                for idx in range(batch_size):
                    try:
                        channeldata_dict = None
1173
                        front_start_time = int(round(_time() * 1000000))
B
barriery 已提交
1174 1175 1176
                        if timeout is not None:
                            remaining = endtime - _time()
                            if remaining <= 0.0:
B
barriery 已提交
1177 1178
                                _LOGGER.debug("{} Failed to generate batch: "
                                              "timeout".format(op_info_prefix))
B
barriery 已提交
1179
                                break
B
barriery 已提交
1180 1181
                            channeldata_dict = input_channel.front(op_name,
                                                                   timeout)
B
barriery 已提交
1182 1183 1184
                        else:
                            channeldata_dict = input_channel.front(op_name)
                        batch.append(channeldata_dict)
1185
                        _LOGGER.debug(
1186 1187
                            "_auto_batching_generator get {} channeldata from op:{} input channel. time={}".
                            format(idx, op_name, front_start_time))
B
barriery 已提交
1188
                    except ChannelTimeoutError:
B
barriery 已提交
1189 1190
                        _LOGGER.debug("{} Failed to generate batch: "
                                      "timeout".format(op_info_prefix))
B
barriery 已提交
1191
                        break
B
barriery 已提交
1192 1193
            _LOGGER.debug("{} Got actual batch_size: {}".format(op_info_prefix,
                                                                len(batch)))
B
barriery 已提交
1194
            yield batch
1195

1196
    def _parse_channeldata_batch(self, batch, output_channels):
T
TeslaZhao 已提交
1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
        """
        Parse channeldatas batch
        Args:
            batch: auto-batching batch datas
            output_channels: output channels 

        Returns:
            parsed_data_dict: parsed from channeldata in batch
            need_profile_dict: need profile dict in batch 
            profile_dict: profile info dict in batch
            logid_dict: trace each request in batch
        """
1209
        parsed_data_dict = collections.OrderedDict()
1210 1211
        need_profile_dict = {}
        profile_dict = {}
T
TeslaZhao 已提交
1212
        logid_dict = {}
B
bug fix  
barriery 已提交
1213
        for channeldata_dict in batch:
1214
            (data_id, error_channeldata, parsed_data,
T
TeslaZhao 已提交
1215
                    client_need_profile, profile_set, log_id) = \
1216 1217 1218 1219 1220
                            self._parse_channeldata(channeldata_dict)
            if error_channeldata is None:
                parsed_data_dict[data_id] = parsed_data
                need_profile_dict[data_id] = client_need_profile
                profile_dict[data_id] = profile_set
T
TeslaZhao 已提交
1221
                logid_dict[data_id] = log_id
1222 1223 1224
            else:
                # error data in predecessor Op
                # (error_channeldata with profile info)
B
barriery 已提交
1225 1226
                self._push_to_output_channels(error_channeldata,
                                              output_channels)
1227

T
TeslaZhao 已提交
1228
        return parsed_data_dict, need_profile_dict, profile_dict, logid_dict
B
barriery 已提交
1229

W
wangjiawei04 已提交
1230
    def _run(self, concurrency_idx, input_channel, output_channels,
1231
             is_thread_op, trace_buffer, model_config, workdir, thread_num,
T
TeslaZhao 已提交
1232
             device_type, devices, mem_optim, ir_optim, precision, use_mkldnn,
1233 1234
             mkldnn_cache_capacity, mkldnn_op_list, mkldnn_bf16_op_list,
             is_jump_op, output_channels_of_jump_ops):
1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251
        """
        _run() is the entry function of OP process / thread model.When client 
        type is local_predictor in process mode, the CUDA environment needs to 
        be initialized by LocalServiceHandler[child process], otherwise, Cuda
        error(3), initialization error is occured. Preprocess, process and 
        postprocess are executed in the main loop. The preprocess and postprocess
        function is usually rewrited by users. Trace data is recorded by trace_que.

        Args:
            concurrency_idx: thread/process index
            input_channel: input channel, take the data to be processed
            output_channels: output channel, store processed data
            is_thread_op: False, It's process op; True, It's thread op
            trace_buffer: store trace infomations
            model_config: model config path
            workdir: work directory
            thread_num: number of threads, concurrent quantity
1252
            device_type: support multiple devices
1253 1254
            devices: gpu id list[gpu], "" default[cpu]
            mem_optim: use memory/graphics memory optimization, True default.
1255
            ir_optim: use calculation chart optimization, False default.
T
TeslaZhao 已提交
1256 1257 1258 1259 1260
            precision: inference precision, e.g. "fp32", "fp16", "int8", "bf16"
            use_mkldnn: use mkldnn, default False.
            mkldnn_cache_capacity: cache capacity of mkldnn, 0 means no limit.
            mkldnn_op_list: OP list optimized by mkldnn, None default.
            mkldnn_bf16_op_list: OP list optimized by mkldnn bf16, None default.
1261 1262
            is_jump_op: OP has jump op list or not, False default.
            output_channels_of_jump_ops: all output channels of jump ops.
1263 1264 1265 1266

        Returns:
            None
        """
1267
        op_info_prefix = "[{}|{}]".format(self.name, concurrency_idx)
B
barrierye 已提交
1268

1269
        # init ops
B
barriery 已提交
1270
        profiler = None
B
barrierye 已提交
1271
        try:
1272 1273 1274 1275 1276 1277
            if is_thread_op == False and self.client_type == "local_predictor":
                self.service_handler = local_service_handler.LocalServiceHandler(
                    model_config=model_config,
                    client_type="local_predictor",
                    workdir=workdir,
                    thread_num=thread_num,
1278
                    device_type=device_type,
1279 1280
                    devices=devices,
                    mem_optim=mem_optim,
1281
                    ir_optim=ir_optim,
T
TeslaZhao 已提交
1282 1283 1284 1285 1286
                    precision=precision,
                    use_mkldnn=use_mkldnn,
                    mkldnn_cache_capacity=mkldnn_cache_capacity,
                    mkldnn_op_list=mkldnn_op_list,
                    mkldnn_bf16_op_list=mkldnn_bf16_op_list)
1287 1288 1289

                _LOGGER.info("Init cuda env in process {}".format(
                    concurrency_idx))
1290 1291
                self.local_predictor = self.service_handler.get_client(
                    concurrency_idx)
1292
            # check all ops initialized successfully.
W
wangjiawei04 已提交
1293
            profiler = self._initialize(is_thread_op, concurrency_idx)
1294

B
barrierye 已提交
1295
        except Exception as e:
B
barriery 已提交
1296
            _LOGGER.critical(
T
TeslaZhao 已提交
1297
                "{} failed to init op: {}".format(op_info_prefix, e),
B
barriery 已提交
1298
                exc_info=True)
B
barrierye 已提交
1299
            os._exit(-1)
B
barriery 已提交
1300
        _LOGGER.info("{} Succ init".format(op_info_prefix))
1301

B
barriery 已提交
1302
        batch_generator = self._auto_batching_generator(
B
barriery 已提交
1303 1304 1305 1306
            input_channel=input_channel,
            op_name=self.name,
            batch_size=self._batch_size,
            timeout=self._auto_batching_timeout,
B
barriery 已提交
1307
            op_info_prefix=op_info_prefix)
B
barriery 已提交
1308

B
barriery 已提交
1309
        start, end = None, None
B
barrierye 已提交
1310
        trace_que = collections.deque()
B
barrierye 已提交
1311
        while True:
B
barriery 已提交
1312
            start = int(round(_time() * 1000000))
B
barrierye 已提交
1313
            try:
B
barriery 已提交
1314
                channeldata_dict_batch = next(batch_generator)
B
barrierye 已提交
1315
            except ChannelStopError:
B
barriery 已提交
1316
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
B
barriery 已提交
1317
                self._finalize(is_thread_op)
B
barrierye 已提交
1318
                break
B
barriery 已提交
1319
            end = int(round(_time() * 1000000))
B
barrierye 已提交
1320
            in_time = end - start
1321 1322
            _LOGGER.debug("op:{} in_time_end:{}".format(op_info_prefix,
                                                        time.time()))
1323

B
barriery 已提交
1324 1325
            # parse channeldata batch
            try:
T
TeslaZhao 已提交
1326
                parsed_data_dict, need_profile_dict, profile_dict, logid_dict\
1327 1328
                        = self._parse_channeldata_batch(
                                channeldata_dict_batch, output_channels)
B
barriery 已提交
1329
            except ChannelStopError:
B
barriery 已提交
1330
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
1331
                self._finalize(is_thread_op)
B
barriery 已提交
1332
                break
1333 1334 1335
            if len(parsed_data_dict) == 0:
                # data in the whole batch is all error data
                continue
1336 1337
            _LOGGER.debug("op:{} parse_end:{}".format(op_info_prefix,
                                                      time.time()))
1338

1339 1340 1341 1342 1343 1344 1345
            # print
            front_cost = int(round(_time() * 1000000)) - start
            for data_id, parsed_data in parsed_data_dict.items():
                _LOGGER.debug(
                    "(data_id={}) POP INPUT CHANNEL! op:{}, cost:{} ms".format(
                        data_id, self.name, front_cost / 1000.0))

1346
            # preprecess
B
barriery 已提交
1347
            start = profiler.record("prep#{}_0".format(op_info_prefix))
T
TeslaZhao 已提交
1348 1349
            preped_data_dict, err_channeldata_dict, skip_process_dict \
                    = self._run_preprocess(parsed_data_dict, op_info_prefix, logid_dict)
B
barriery 已提交
1350
            end = profiler.record("prep#{}_1".format(op_info_prefix))
B
barrierye 已提交
1351
            prep_time = end - start
1352 1353
            _LOGGER.debug("op:{} preprocess_end:{}, cost:{}".format(
                op_info_prefix, time.time(), prep_time))
1354
            try:
T
TeslaZhao 已提交
1355
                # put error requests into output channel, skip process and postprocess stage
1356
                for data_id, err_channeldata in err_channeldata_dict.items():
B
barrierye 已提交
1357
                    self._push_to_output_channels(
B
barriery 已提交
1358 1359
                        data=err_channeldata,
                        channels=output_channels,
1360 1361 1362
                        client_need_profile=need_profile_dict[data_id],
                        profile_set=profile_dict[data_id])
            except ChannelStopError:
B
barriery 已提交
1363
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
1364 1365
                self._finalize(is_thread_op)
                break
B
bug fix  
barrierye 已提交
1366
            if len(preped_data_dict) == 0:
1367 1368
                continue

B
barrierye 已提交
1369
            # process
B
barriery 已提交
1370
            start = profiler.record("midp#{}_0".format(op_info_prefix))
1371
            midped_data_dict, err_channeldata_dict \
T
TeslaZhao 已提交
1372
                    = self._run_process(preped_data_dict, op_info_prefix, skip_process_dict, logid_dict)
B
barriery 已提交
1373
            end = profiler.record("midp#{}_1".format(op_info_prefix))
B
barrierye 已提交
1374
            midp_time = end - start
1375 1376
            _LOGGER.debug("op:{} process_end:{}, cost:{}".format(
                op_info_prefix, time.time(), midp_time))
1377 1378
            try:
                for data_id, err_channeldata in err_channeldata_dict.items():
B
barrierye 已提交
1379
                    self._push_to_output_channels(
B
barriery 已提交
1380 1381
                        data=err_channeldata,
                        channels=output_channels,
B
barriery 已提交
1382 1383
                        client_need_profile=need_profile_dict[data_id],
                        profile_set=profile_dict[data_id])
1384
            except ChannelStopError:
B
barriery 已提交
1385
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
1386 1387 1388
                self._finalize(is_thread_op)
                break
            if len(midped_data_dict) == 0:
1389
                continue
1390 1391

            # postprocess
B
barriery 已提交
1392
            start = profiler.record("postp#{}_0".format(op_info_prefix))
1393
            postped_data_dict, err_channeldata_dict \
T
TeslaZhao 已提交
1394
                    = self._run_postprocess(parsed_data_dict, midped_data_dict, op_info_prefix, logid_dict)
B
barriery 已提交
1395
            end = profiler.record("postp#{}_1".format(op_info_prefix))
B
barrierye 已提交
1396
            postp_time = end - start
1397
            after_postp_time = _time()
1398 1399
            _LOGGER.debug("op:{} postprocess_end:{}, cost:{}".format(
                op_info_prefix, time.time(), postp_time))
1400 1401
            try:
                for data_id, err_channeldata in err_channeldata_dict.items():
B
barrierye 已提交
1402
                    self._push_to_output_channels(
B
bug fix  
barrierye 已提交
1403
                        data=err_channeldata,
B
barriery 已提交
1404
                        channels=output_channels,
B
barriery 已提交
1405 1406
                        client_need_profile=need_profile_dict[data_id],
                        profile_set=profile_dict[data_id])
1407
            except ChannelStopError:
B
barriery 已提交
1408
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
1409 1410 1411
                self._finalize(is_thread_op)
                break
            if len(postped_data_dict) == 0:
1412
                continue
1413

1414
            # push data to channel (if run succ)
B
barriery 已提交
1415
            start = int(round(_time() * 1000000))
B
barrierye 已提交
1416
            try:
B
barriery 已提交
1417
                profile_str = profiler.gen_profile_str()
1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452
                if self.is_jump_op() is True and self.check_jumping(
                        postped_data_dict) is True:
                    # push data to output channel of ops to be jumped 
                    for data_id, postped_data in postped_data_dict.items():
                        if self._server_use_profile:
                            sys.stderr.write(profile_str)
                        self._push_to_output_channels(
                            data=postped_data,
                            channels=output_channels_of_jump_ops,
                            profile_str=profile_str,
                            client_need_profile=need_profile_dict[data_id],
                            profile_set=profile_dict[data_id])
                        after_outchannel_time = _time()
                        _LOGGER.debug(
                            "(data_id={}) PUSH OUTPUT CHANNEL OF JUMP OPs! op:{} push cost:{} ms".
                            format(data_id, self.name, (after_outchannel_time -
                                                        after_postp_time) *
                                   1000))
                else:
                    # push data to output channel.
                    for data_id, postped_data in postped_data_dict.items():
                        if self._server_use_profile:
                            sys.stderr.write(profile_str)
                        self._push_to_output_channels(
                            data=postped_data,
                            channels=output_channels,
                            profile_str=profile_str,
                            client_need_profile=need_profile_dict[data_id],
                            profile_set=profile_dict[data_id])
                        after_outchannel_time = _time()
                        _LOGGER.debug(
                            "(data_id={}) PUSH OUTPUT CHANNEL! op:{} push cost:{} ms".
                            format(data_id, self.name, (after_outchannel_time -
                                                        after_postp_time) *
                                   1000))
B
barrierye 已提交
1453
            except ChannelStopError:
B
barriery 已提交
1454
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
1455
                self._finalize(is_thread_op)
B
barrierye 已提交
1456
                break
B
barriery 已提交
1457
            end = int(round(_time() * 1000000))
B
barrierye 已提交
1458
            out_time = end - start
1459
            after_outchannel_time = int(round(_time() * 1000000))
B
barriery 已提交
1460
            if trace_buffer is not None:
B
barrierye 已提交
1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477
                trace_que.append({
                    "name": self.name,
                    "actions": {
                        "in": in_time,
                        "prep": prep_time,
                        "midp": midp_time,
                        "postp": postp_time,
                        "out": out_time,
                    }
                })
                while trace_que:
                    info = trace_que[0]
                    try:
                        trace_buffer.put_nowait(info)
                        trace_que.popleft()
                    except Queue.Full:
                        break
B
barriery 已提交
1478

W
wangjiawei04 已提交
1479
    def _initialize(self, is_thread_op, concurrency_idx):
1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492
        """
        Initialize one OP object in the target function of a thread or porcess.
        Initialize the client object with _client_config and _server_endpoints.
        Create a TimeProfiler per thread or process for recording profiler info.

        Args:
            is_thread_op: True, one op runs in one thread; False, one op runs
                in one process.
            concurrency_idx: process id, Thread mode does not use this param.

        Returns:
            TimeProfiler
        """
1493 1494 1495 1496 1497 1498 1499 1500 1501
        @ErrorCatch
        def init_helper(self, is_thread_op, concurrency_idx):
            if is_thread_op:
                with self._for_init_op_lock:
                    if not self._succ_init_op:
                        # for the threaded version of Op, each thread cannot get its concurrency_idx
                        self.concurrency_idx = None
                        # init client
                        self.client = self.init_client(self._client_config,
W
wangjiawei04 已提交
1502
                                                   self._server_endpoints)
1503 1504 1505 1506 1507 1508 1509 1510
                        # user defined
                        self.init_op()
                        self._succ_init_op = True
                        self._succ_close_op = False
            else:
                self.concurrency_idx = concurrency_idx
                # init client
                self.client = self.init_client(self._client_config,
W
wangjiawei04 已提交
1511
                                           self._server_endpoints)
1512 1513 1514 1515
                # user defined
                self.init_op() 
        
        init_helper(self, is_thread_op, concurrency_idx)
B
barriery 已提交
1516 1517 1518 1519 1520
        # use a separate TimeProfiler per thread or process
        profiler = TimeProfiler()
        profiler.enable(True)
        return profiler

B
barriery 已提交
1521 1522 1523 1524 1525 1526 1527 1528
    def _finalize(self, is_thread_op):
        if is_thread_op:
            with self._for_close_op_lock:
                if not self._succ_close_op:
                    self._profiler = None
                    self.client = None
                    self._succ_init_op = False
                    self._succ_close_op = True
1529 1530 1531 1532 1533

    def _log(self, info):
        return "{} {}".format(self.name, info)


B
barrierye 已提交
1534
class RequestOp(Op):
1535 1536 1537 1538 1539 1540
    """
    RequestOp is a special Op, for unpacking one request package. If the
    request needs one special unpackaging method, you need to inherit class
    RequestOp and rewrite function unpack_request_package.Notice!!! Class
    RequestOp does not run preprocess, process, postprocess.
    """
B
barrierye 已提交
1541

B
barrierye 已提交
1542
    def __init__(self):
1543 1544 1545
        """
        Initialize the RequestOp
        """
B
barriery 已提交
1546 1547
        # PipelineService.name = "@DAGExecutor"
        super(RequestOp, self).__init__(name="@DAGExecutor", input_ops=[])
B
barrierye 已提交
1548
        # init op
1549
        try:
1550
            self.init_op()
1551
        except Exception as e:
B
barriery 已提交
1552
            _LOGGER.critical("Op(Request) Failed to init: {}".format(e))
1553
            os._exit(-1)
B
barrierye 已提交
1554

1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567
    def proto_tensor_2_numpy(self, tensor):
        """
        Convert proto tensor to numpy array, The supported types are as follows:
                INT64
                FP32
		INT32
		FP64
		INT16
		FP16
		BF16
		UINT8
		INT8
		BOOL
1568
                BYTES
1569
        Unsupported type:
1570
                STRING
1571 1572 1573 1574 1575 1576 1577
                COMPLEX64
                COMPLEX128

        Args:
            tensor: one tensor in request.tensors.

        Returns:
T
TeslaZhao 已提交
1578 1579
            np_data: np.ndnumpy, the tensor data is converted to numpy.
            lod_info: np.ndnumpy, lod info of the tensor data, None default.
1580 1581 1582 1583 1584 1585
        """
        if tensor is None or tensor.elem_type is None or tensor.name is None:
            _LOGGER.error("input params of tensor is wrong. tensor: {}".format(
                tensor))
            return None

T
TeslaZhao 已提交
1586
        # Set dim shape
1587 1588 1589 1590 1591 1592 1593
        dims = []
        if tensor.shape is None:
            dims.append(1)
        else:
            for one_dim in tensor.shape:
                dims.append(one_dim)

T
TeslaZhao 已提交
1594 1595 1596 1597 1598
        # Set up 2-d lod tensor
        np_lod = None
        if len(tensor.lod) > 0:
            np_lod = np.array(tensor.lod).astype(int32).reshape(2, -1)

1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632
        np_data = None
        _LOGGER.info("proto_to_numpy, name:{}, type:{}, dims:{}".format(
            tensor.name, tensor.elem_type, dims))
        if tensor.elem_type == 0:
            # VarType: INT64
            np_data = np.array(tensor.int64_data).astype(int64).reshape(dims)
        elif tensor.elem_type == 1:
            # VarType: FP32
            np_data = np.array(tensor.float_data).astype(float32).reshape(dims)
        elif tensor.elem_type == 2:
            # VarType: INT32
            np_data = np.array(tensor.int_data).astype(int32).reshape(dims)
        elif tensor.elem_type == 3:
            # VarType: FP64
            np_data = np.array(tensor.float64_data).astype(float64).reshape(
                dims)
        elif tensor.elem_type == 4:
            # VarType: INT16
            np_data = np.array(tensor.int_data).astype(int16).reshape(dims)
        elif tensor.elem_type == 5:
            # VarType: FP16
            np_data = np.array(tensor.float_data).astype(float16).reshape(dims)
        elif tensor.elem_type == 6:
            # VarType: BF16
            np_data = np.array(tensor.uint32_data).astype(uint16).reshape(dims)
        elif tensor.elem_type == 7:
            # VarType: UINT8
            np_data = np.array(tensor.uint32_data).astype(uint8).reshape(dims)
        elif tensor.elem_type == 8:
            # VarType: INT8
            np_data = np.array(tensor.int_data).astype(int8).reshape(dims)
        elif tensor.elem_type == 9:
            # VarType: BOOL
            np_data = np.array(tensor.bool_data).astype(bool).reshape(dims)
1633 1634 1635 1636
        elif tensor.elem_type == 13:
            # VarType: BYTES
            byte_data = BytesIO(tensor.byte_data)
            np_data = np.load(byte_data, allow_pickle=True)
1637 1638 1639 1640 1641 1642 1643
        else:
            _LOGGER.error("Sorry, the type {} of tensor {} is not supported.".
                          format(tensor.elem_type, tensor.name))
            raise ValueError(
                "Sorry, the type {} of tensor {} is not supported.".format(
                    tensor.elem_type, tensor.name))

T
TeslaZhao 已提交
1644
        return np_data, np_lod
1645

B
barrierye 已提交
1646
    def unpack_request_package(self, request):
T
TeslaZhao 已提交
1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664
        """
        Unpack request package by gateway.proto
        Args:
            request: HTTP body, JSON format

        Returns:
            dict_data: json fields in HTTP body
            log_id: log_id
            prod_errcode: None or ProductErrCode.SUCC.value default, otherwise,
                          product errores occured.It is handled in the same way
                          as exception.
            prod_errinfo: "" default 
        """
        dict_data = {}
        log_id = None
        if request is None:
            _LOGGER.critical("request is None")
            raise ValueError("request is None")
1665

1666
        # unpack key/value string list
1667
        for idx, key in enumerate(request.key):
1668
            dict_data[key] = request.value[idx]
T
TeslaZhao 已提交
1669
        log_id = request.logid
1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700

        # unpack proto.tensors data.
        for one_tensor in request.tensors:
            name = one_tensor.name
            elem_type = one_tensor.elem_type

            if one_tensor.name is None:
                _LOGGER.error("Tensor name is None.")
                raise ValueError("Tensor name is None.")

            numpy_dtype = _TENSOR_DTYPE_2_NUMPY_DATA_DTYPE.get(elem_type)
            if numpy_dtype is None:
                _LOGGER.error(
                    "elem_type:{} is dismatch in unpack_request_package.",
                    format(elem_type))
                raise ValueError("elem_type:{} error".format(elem_type))

            if numpy_dtype == "string":
                new_string = ""
                if one_tensor.str_data is None:
                    _LOGGER.error(
                        "str_data of tensor:{} is None, elem_type is {}.".
                        format(name, elem_type))
                    raise ValueError(
                        "str_data of tensor:{} is None, elem_type is {}.".
                        format(name, elem_type))
                for one_str in one_tensor.str_data:
                    new_string += one_str

                dict_data[name] = new_string
            else:
T
TeslaZhao 已提交
1701 1702 1703 1704
                np_data, np_lod = self.proto_tensor_2_numpy(one_tensor)
                dict_data[name] = np_data
                if np_lod is not None:
                    dict_data[name + ".lod"] = np_lod
1705

1706 1707 1708 1709
        _LOGGER.info("RequestOp unpack one request. log_id:{}, clientip:{} \
            name:{}, method:{}, time:{}"
                     .format(log_id, request.clientip, request.name,
                             request.method, time.time()))
T
TeslaZhao 已提交
1710 1711

        return dict_data, log_id, None, ""
B
barrierye 已提交
1712 1713 1714


class ResponseOp(Op):
1715 1716 1717 1718 1719 1720
    """ 
    ResponseOp is a special Op, for packing one response package. If the channeldata 
    needs a special packaging method, you need to inherit class ReponseOp and rewrite
    pack_response_package function. Notice!!! Class ResponseOp does not run preprocess,
    process, postprocess.
    """
B
barrierye 已提交
1721

B
barrierye 已提交
1722
    def __init__(self, input_ops):
1723 1724 1725
        """
        Initialize the ResponseOp
        """
B
barriery 已提交
1726 1727
        super(ResponseOp, self).__init__(
            name="@DAGExecutor", input_ops=input_ops)
1728

B
barrierye 已提交
1729
        # init op
1730
        try:
1731
            self.init_op()
1732
        except Exception as e:
B
barriery 已提交
1733 1734
            _LOGGER.critical("Op(ResponseOp) Failed to init: {}".format(
                e, exc_info=True))
1735
            os._exit(-1)
B
barrierye 已提交
1736

1737 1738 1739 1740 1741 1742
        # init ResponseOp
        self.is_pack_tensor = False

    def set_pack_format(self, isTensor=False):
        self.is_pack_tensor = isTensor

B
barrierye 已提交
1743
    def pack_response_package(self, channeldata):
T
TeslaZhao 已提交
1744
        """
1745 1746 1747 1748 1749 1750 1751 1752
        Getting channeldata from the last channel, packting the response 
        package serialized by protobuf.  

        Args:
            channeldata: Type ChannelData

        Returns:
            resp: pipeline_service_pb2.Response()
T
TeslaZhao 已提交
1753
        """
B
barrierye 已提交
1754
        resp = pipeline_service_pb2.Response()
T
TeslaZhao 已提交
1755 1756 1757
        error_code = channeldata.error_code
        error_info = ""
        if error_code == ChannelDataErrcode.OK.value:
1758
            # Framework level errors
B
barrierye 已提交
1759 1760 1761 1762
            if channeldata.datatype == ChannelDataType.CHANNEL_NPDATA.value:
                feed = channeldata.parse()
                # ndarray to string:
                # https://stackoverflow.com/questions/30167538/convert-a-numpy-ndarray-to-stringor-bytes-and-convert-it-back-to-numpy-ndarray
B
barrierye 已提交
1763
                np.set_printoptions(threshold=sys.maxsize)
B
barrierye 已提交
1764
                for name, var in feed.items():
1765 1766
                    resp.value.append(var.__repr__())
                    resp.key.append(name)
B
barrierye 已提交
1767 1768 1769 1770
            elif channeldata.datatype == ChannelDataType.DICT.value:
                feed = channeldata.parse()
                for name, var in feed.items():
                    if not isinstance(var, str):
T
TeslaZhao 已提交
1771 1772
                        error_code = ChannelDataErrcode.TYPE_ERROR.value
                        error_info = self._log(
B
barrierye 已提交
1773 1774
                            "fetch var type must be str({}).".format(
                                type(var)))
B
barriery 已提交
1775 1776
                        _LOGGER.error("(logid={}) Failed to pack RPC "
                                      "response package: {}".format(
W
wangjiawei04 已提交
1777
                                          channeldata.id, resp.err_msg))
B
barrierye 已提交
1778
                        break
1779 1780
                    resp.value.append(var)
                    resp.key.append(name)
B
barrierye 已提交
1781
            else:
T
TeslaZhao 已提交
1782 1783 1784
                error_code = ChannelDataErrcode.TYPE_ERROR.value
                error_info = self._log("error type({}) in datatype.".format(
                    channeldata.datatype))
B
barriery 已提交
1785
                _LOGGER.error("(logid={}) Failed to pack RPC response"
T
TeslaZhao 已提交
1786
                              " package: {}".format(channeldata.id, error_info))
B
barrierye 已提交
1787
        else:
1788
            # Product level errors
T
TeslaZhao 已提交
1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800
            error_info = channeldata.error_info
            if error_code == ChannelDataErrcode.PRODUCT_ERROR.value:
                #rewrite error_code when product errors occured
                error_code = channeldata.prod_error_code
                error_info = channeldata.prod_error_info

        # pack results
        if error_code is None:
            error_code = 0
        resp.err_no = error_code
        resp.err_msg = error_info

B
barrierye 已提交
1801
        return resp
1802 1803 1804


class VirtualOp(Op):
1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821
    """ 
    To connect 2 ops across levels in dag view, we create virtual ops
    between non-virtual ops, and transfer data only. For examples, 
    the pred ops of F are D & E.In the process of building DAG, we will
    create channels layer by layer according to dag views.Op F is not 
    in the next layer view of [B, E], so we will create a virtual OP 
    'V1' whose pred OP is E. And so on, we create two virtual op 'V2'
    and 'V3', Finally, we find the non-virtual op F. we create 4 channels
    among E, V1, V2, V3 and F, the producer of V1, V2, V3 and F is E.
    
        DAG: [A -> B -> C -> D -> F]
               \-> E ----------/

        DAG view: [[A], [B, E], [C], [D], [F]]
        BUILD DAG: [A -> B -> C -> D -> E -> F]
                     \-> E -> V1-> V2-> V3/
    """
1822 1823 1824

    def __init__(self, name, concurrency=1):
        super(VirtualOp, self).__init__(
B
barrierye 已提交
1825
            name=name, input_ops=None, concurrency=concurrency)
1826 1827 1828
        self._virtual_pred_ops = []

    def add_virtual_pred_op(self, op):
1829 1830 1831 1832 1833 1834 1835 1836 1837
        """
        Add the front op of current vritual op.
        
        Args:
            op: one op object, may be a virtual op or not.

        Returns:
            None
        """
1838 1839
        self._virtual_pred_ops.append(op)

B
barrierye 已提交
1840
    def _actual_pred_op_names(self, op):
1841 1842 1843 1844 1845 1846 1847 1848 1849
        """
        Recursively find the front op which is a non-virtual op.
   
        Args:
            op: one op object
            
        Returns:
            names: the name of non-virtual pred ops.
        """
B
barriery 已提交
1850
        # can use disjoint-set, but it's not necessary
B
barrierye 已提交
1851 1852 1853 1854 1855 1856 1857
        if not isinstance(op, VirtualOp):
            return [op.name]
        names = []
        for x in op._virtual_pred_ops:
            names.extend(self._actual_pred_op_names(x))
        return names

1858
    def add_output_channel(self, channel):
1859 1860 1861 1862 1863 1864 1865 1866 1867
        """
        Adding the output channel of non-virtual pred ops.

        Args:
            channel: one channel.
          
        Returns:
            None.
        """
1868
        if not isinstance(channel, (ThreadChannel, ProcessChannel)):
1869
            _LOGGER.critical(
B
barriery 已提交
1870 1871 1872
                self._log("Failed to add output_channel: output_channel"
                          " must be Channel type, not {}".format(
                              type(channel))))
1873
            os._exit(-1)
1874
        for op in self._virtual_pred_ops:
B
barrierye 已提交
1875 1876
            for op_name in self._actual_pred_op_names(op):
                channel.add_producer(op_name)
1877
        self._outputs.append(channel)
D
dongdaxiang 已提交
1878

1879
    def _run(self, concurrency_idx, input_channel, output_channels, client_type,
1880
             is_thread_op):
1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894
        """
        The target function _run() only transfers data between OPs in one thread
        or process.

        Args:
            concurrency_idx: process id, not avaliable in thread mode.
            input_channel: input channel
            output_channels: output channels
            client_type: no use
            is_thread_op: True, thread mode; False, process mode

        Returns:
            None
        """
1895
        op_info_prefix = "[{}|{}]".format(self.name, concurrency_idx)
B
barrierye 已提交
1896 1897 1898
        log = get_log_func(op_info_prefix)
        tid = threading.current_thread().ident

1899 1900 1901 1902 1903 1904 1905
        batch_generator = self._auto_batching_generator(
            input_channel=input_channel,
            op_name=self.name,
            batch_size=1,
            timeout=None,
            log_func=log)

B
barrierye 已提交
1906 1907
        while True:
            try:
1908
                channeldata_dict_batch = next(batch_generator)
B
barrierye 已提交
1909
            except ChannelStopError:
B
barriery 已提交
1910
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
1911
                self._finalize(is_thread_op)
B
barrierye 已提交
1912
                break
D
dongdaxiang 已提交
1913

B
barrierye 已提交
1914
            try:
1915 1916 1917 1918
                for channeldata_dict in channeldata_dict_batch:
                    for name, data in channeldata_dict.items():
                        self._push_to_output_channels(
                            data, channels=output_channels, name=name)
B
barrierye 已提交
1919
            except ChannelStopError:
B
barriery 已提交
1920
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
1921
                self._finalize(is_thread_op)
B
barrierye 已提交
1922
                break