operator.py 78.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
B
barriery 已提交
15
from time import time as _time
B
barriery 已提交
16
import time
17 18
import threading
import multiprocessing
H
HexToString 已提交
19
from paddle_serving_client import Client
20 21 22
from concurrent import futures
import logging
import func_timeout
23
import os
B
barrierye 已提交
24
import sys
25
import collections
B
barrierye 已提交
26
import numpy as np
T
TeslaZhao 已提交
27
import json
B
barrierye 已提交
28
from numpy import *
29
from io import BytesIO
B
barrierye 已提交
30 31 32 33 34 35
if sys.version_info.major == 2:
    import Queue
elif sys.version_info.major == 3:
    import queue as Queue
else:
    raise Exception("Error Python version")
36

B
barrierye 已提交
37
from .proto import pipeline_service_pb2
T
TeslaZhao 已提交
38
from .channel import (ThreadChannel, ProcessChannel, ChannelDataErrcode,
B
bug fix  
barriery 已提交
39
                      ChannelData, ChannelDataType, ChannelStopError,
T
TeslaZhao 已提交
40
                      ChannelTimeoutError, ProductErrCode)
B
barrierye 已提交
41
from .util import NameGenerator
B
barriery 已提交
42
from .profiler import UnsafeTimeProfiler as TimeProfiler
W
wangjiawei04 已提交
43
from . import local_service_handler
44
from .pipeline_client import PipelineClient as PPClient
45

46
_LOGGER = logging.getLogger(__name__)
B
barrierye 已提交
47 48
_op_name_gen = NameGenerator("Op")

49 50 51 52 53 54 55 56 57 58 59 60 61 62
# data type of tensor to numpy_data
_TENSOR_DTYPE_2_NUMPY_DATA_DTYPE = {
    0: "int64",  # VarType.INT64
    1: "float32",  # VarType.FP32
    2: "int32",  # VarType.INT32
    3: "float64",  # VarType.FP64
    4: "int16",  # VarType.int16
    5: "float16",  # VarType.FP32
    6: "uint16",  # VarType.BF16
    7: "uint8",  # VarType.UINT8
    8: "int8",  # VarType.INT8
    9: "bool",  # VarType.BOOL
    10: "complex64",  # VarType.COMPLEX64
    11: "complex128",  # VarType.COMPLEX128
63 64
    12: "string",  # load by numpy
    13: "bytes",  # load by numpy
65 66
}

D
dongdaxiang 已提交
67 68 69

class Op(object):
    def __init__(self,
B
barrierye 已提交
70
                 name=None,
D
dongdaxiang 已提交
71
                 input_ops=[],
B
barriery 已提交
72 73
                 server_endpoints=None,
                 fetch_list=None,
B
barrierye 已提交
74
                 client_config=None,
W
wangjiawei04 已提交
75
                 client_type=None,
B
barriery 已提交
76 77
                 concurrency=None,
                 timeout=None,
T
TeslaZhao 已提交
78
                 retry=0,
B
barriery 已提交
79
                 batch_size=None,
80
                 auto_batching_timeout=None,
81 82
                 local_service_handler=None,
                 jump_to_ops=[]):
B
barriery 已提交
83
        # In __init__, all the parameters are just saved and Op is not initialized
B
barrierye 已提交
84
        if name is None:
B
barrierye 已提交
85
            name = _op_name_gen.next()
86
        self.name = name  # to identify the type of OP, it must be globally unique
B
barrierye 已提交
87
        self.concurrency = concurrency  # amount of concurrency
B
barrierye 已提交
88
        self.set_input_ops(input_ops)
89
        self.set_jump_to_ops(jump_to_ops)
B
barrierye 已提交
90

W
wangjiawei04 已提交
91
        self._local_service_handler = local_service_handler
B
barriery 已提交
92
        self._server_endpoints = server_endpoints
B
barrierye 已提交
93
        self._fetch_names = fetch_list
B
barriery 已提交
94
        self._client_config = client_config
W
wangjiawei04 已提交
95
        self.client_type = client_type
B
barriery 已提交
96
        self._timeout = timeout
97
        self._retry = max(1, retry)
B
barriery 已提交
98 99 100
        self._batch_size = batch_size
        self._auto_batching_timeout = auto_batching_timeout

101 102
        self._input = None
        self._outputs = []
B
barrierye 已提交
103

B
barriery 已提交
104 105 106
        self._server_use_profile = False
        self._tracer = None

107 108 109
        # for grpc_pipeline predict mode. False, string key/val; True, tensor format.
        self._pack_tensor_format = False

B
barriery 已提交
110 111 112 113 114 115
        # only for thread op
        self._for_init_op_lock = threading.Lock()
        self._for_close_op_lock = threading.Lock()
        self._succ_init_op = False
        self._succ_close_op = False

B
barriery 已提交
116
    def init_from_dict(self, conf):
117 118 119 120 121 122 123 124 125 126 127
        """
        Initializing one Op from config.yaml. If server_endpoints exist,
        which is remote RPC mode, otherwise it is local RPC mode. There
        are three types of predictios in local RPC mode, brpc, grpc and
        local_predictor.

        Args:
            conf: config.yaml

        Returns:
        """
B
barriery 已提交
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
        if self.concurrency is None:
            self.concurrency = conf["concurrency"]
        if self._retry is None:
            self._retry = conf["retry"]
        if self._fetch_names is None:
            self._fetch_names = conf.get("fetch_list")
        if self._client_config is None:
            self._client_config = conf.get("client_config")

        if self._timeout is None:
            self._timeout = conf["timeout"]
        if self._timeout > 0:
            self._timeout = self._timeout / 1000.0
        else:
            self._timeout = -1

        if self._batch_size is None:
            self._batch_size = conf["batch_size"]
        if self._auto_batching_timeout is None:
            self._auto_batching_timeout = conf["auto_batching_timeout"]
        if self._auto_batching_timeout <= 0 or self._batch_size == 1:
149
            _LOGGER.debug(
B
barriery 已提交
150 151 152 153 154 155 156
                self._log(
                    "Because auto_batching_timeout <= 0 or batch_size == 1,"
                    " set auto_batching_timeout to None."))
            self._auto_batching_timeout = None
        else:
            self._auto_batching_timeout = self._auto_batching_timeout / 1000.0

157 158 159
        self.model_config = None
        self.workdir = None
        self.thread_num = self.concurrency
160
        self.device_type = -1
161 162 163
        self.devices = ""
        self.mem_optim = False
        self.ir_optim = False
164
        self.precision = "fp32"
T
TeslaZhao 已提交
165 166 167 168 169
        self.use_mkldnn = False
        self.mkldnn_cache_capacity = 0
        self.mkldnn_op_list = None
        self.mkldnn_bf16_op_list = None

B
barriery 已提交
170 171 172 173 174 175
        if self._server_endpoints is None:
            server_endpoints = conf.get("server_endpoints", [])
            if len(server_endpoints) != 0:
                # remote service
                self.with_serving = True
                self._server_endpoints = server_endpoints
176
                self.client_type = conf["client_type"]
177
            else:
W
wangjiawei04 已提交
178
                if self._local_service_handler is None:
B
barriery 已提交
179
                    local_service_conf = conf.get("local_service_conf")
B
barriery 已提交
180 181
                    _LOGGER.info("local_service_conf: {}".format(
                        local_service_conf))
182
                    self.model_config = local_service_conf.get("model_config")
W
wangjiawei04 已提交
183
                    self.client_type = local_service_conf.get("client_type")
184 185
                    self.workdir = local_service_conf.get("workdir")
                    self.thread_num = local_service_conf.get("thread_num")
186
                    self.device_type = local_service_conf.get("device_type")
187 188 189 190
                    self.devices = local_service_conf.get("devices")
                    self.mem_optim = local_service_conf.get("mem_optim")
                    self.ir_optim = local_service_conf.get("ir_optim")
                    self._fetch_names = local_service_conf.get("fetch_list")
191
                    self.precision = local_service_conf.get("precision")
T
TeslaZhao 已提交
192 193 194 195 196 197 198 199
                    self.use_mkldnn = local_service_conf.get("use_mkldnn")
                    self.mkldnn_cache_capacity = local_service_conf.get(
                        "mkldnn_cache_capacity")
                    self.mkldnn_op_list = local_service_conf.get(
                        "mkldnn_op_list")
                    self.mkldnn_bf16_op_list = local_service_conf.get(
                        "mkldnn_bf16_op_list")

200
                    if self.model_config is None:
B
barriery 已提交
201 202 203 204
                        self.with_serving = False
                    else:
                        # local rpc service
                        self.with_serving = True
W
wangjiawei04 已提交
205 206
                        if self.client_type == "brpc" or self.client_type == "grpc":
                            service_handler = local_service_handler.LocalServiceHandler(
207
                                model_config=self.model_config,
W
wangjiawei04 已提交
208
                                client_type=self.client_type,
209 210
                                workdir=self.workdir,
                                thread_num=self.thread_num,
211
                                device_type=self.device_type,
212 213
                                devices=self.devices,
                                mem_optim=self.mem_optim,
214
                                ir_optim=self.ir_optim,
T
TeslaZhao 已提交
215 216 217 218 219 220
                                precision=self.precision,
                                use_mkldnn=self.use_mkldnn,
                                mkldnn_cache_capacity=self.
                                mkldnn_cache_capacity,
                                mkldnn_op_list=self.mkldnn_bf16_op_list,
                                mkldnn_bf16_op_list=self.mkldnn_bf16_op_list)
W
wangjiawei04 已提交
221 222 223 224 225 226 227 228 229 230 231 232
                            service_handler.prepare_server()  # get fetch_list
                            serivce_ports = service_handler.get_port_list()
                            self._server_endpoints = [
                                "127.0.0.1:{}".format(p) for p in serivce_ports
                            ]
                            if self._client_config is None:
                                self._client_config = service_handler.get_client_config(
                                )
                            if self._fetch_names is None:
                                self._fetch_names = service_handler.get_fetch_list(
                                )
                        elif self.client_type == "local_predictor":
W
wangjiawei04 已提交
233
                            service_handler = local_service_handler.LocalServiceHandler(
234
                                model_config=self.model_config,
W
wangjiawei04 已提交
235
                                client_type=self.client_type,
236 237
                                workdir=self.workdir,
                                thread_num=self.thread_num,
238
                                device_type=self.device_type,
239
                                devices=self.devices,
240 241
                                fetch_names=self._fetch_names,
                                mem_optim=self.mem_optim,
242
                                ir_optim=self.ir_optim,
T
TeslaZhao 已提交
243 244 245 246 247 248
                                precision=self.precision,
                                use_mkldnn=self.use_mkldnn,
                                mkldnn_cache_capacity=self.
                                mkldnn_cache_capacity,
                                mkldnn_op_list=self.mkldnn_op_list,
                                mkldnn_bf16_op_list=self.mkldnn_bf16_op_list)
W
wangjiawei04 已提交
249 250 251 252
                            if self._client_config is None:
                                self._client_config = service_handler.get_client_config(
                                )
                        self._local_service_handler = service_handler
B
barriery 已提交
253
                else:
B
barriery 已提交
254
                    self.with_serving = True
W
wangjiawei04 已提交
255
                    self._local_service_handler.prepare_server(
B
barriery 已提交
256
                    )  # get fetch_list
W
wangjiawei04 已提交
257
                    serivce_ports = self._local_service_handler.get_port_list()
B
barriery 已提交
258 259 260
                    self._server_endpoints = [
                        "127.0.0.1:{}".format(p) for p in serivce_ports
                    ]
B
barriery 已提交
261
                    if self._client_config is None:
W
wangjiawei04 已提交
262
                        self._client_config = self._local_service_handler.get_client_config(
B
barriery 已提交
263
                        )
B
barriery 已提交
264
                    if self._fetch_names is None:
W
wangjiawei04 已提交
265
                        self._fetch_names = self._local_service_handler.get_fetch_list(
B
barriery 已提交
266
                        )
B
barriery 已提交
267 268
        else:
            self.with_serving = True
B
barriery 已提交
269

270 271 272 273 274 275 276 277 278 279 280
        if not isinstance(self, RequestOp) and not isinstance(self, ResponseOp):
            _LOGGER.info(
                self._log("\n\tinput_ops: {},"
                          "\n\tserver_endpoints: {}"
                          "\n\tfetch_list: {}"
                          "\n\tclient_config: {}"
                          "\n\tconcurrency: {},"
                          "\n\ttimeout(s): {},"
                          "\n\tretry: {},"
                          "\n\tbatch_size: {},"
                          "\n\tauto_batching_timeout(s): {}".format(
B
barriery 已提交
281
                              ", ".join([op.name for op in self._input_ops
282 283 284 285
                                         ]), self._server_endpoints,
                              self._fetch_names, self._client_config,
                              self.concurrency, self._timeout, self._retry,
                              self._batch_size, self._auto_batching_timeout)))
B
barriery 已提交
286

287
    def launch_local_rpc_service(self):
288 289 290 291 292 293 294 295 296
        """
        Launching multiple local rpc servers.

        Args:
            None

        Returns:
            None
        """
W
wangjiawei04 已提交
297
        if self._local_service_handler is None:
B
barriery 已提交
298 299
            _LOGGER.warning(
                self._log("Failed to launch local rpc"
W
wangjiawei04 已提交
300
                          " service: local_service_handler is None."))
B
barriery 已提交
301
            return
W
wangjiawei04 已提交
302
        port = self._local_service_handler.get_port_list()
W
wangjiawei04 已提交
303 304 305
        #if self._local_service_handler.client_type == "local_predictor":
        #    _LOGGER.info("Op({}) use local predictor.")
        #    return
W
wangjiawei04 已提交
306
        self._local_service_handler.start_server()
B
barriery 已提交
307
        _LOGGER.info("Op({}) use local rpc service at port: {}"
308 309
                     .format(self.name, port))

B
barriery 已提交
310
    def use_default_auto_batching_config(self):
311 312 313 314 315 316 317 318 319
        """
        Set the auto batching config default.

        Args:
            None

        Returns:
            None
        """
B
bug fix  
barriery 已提交
320
        if self._batch_size != 1:
321 322
            _LOGGER.warning("Op({}) reset batch_size=1 (original: {})"
                            .format(self.name, self._batch_size))
B
bug fix  
barriery 已提交
323 324
            self._batch_size = 1
        if self._auto_batching_timeout != None:
325
            _LOGGER.warning(
B
barriery 已提交
326 327
                "Op({}) reset auto_batching_timeout=None (original: {})"
                .format(self.name, self._auto_batching_timeout))
B
bug fix  
barriery 已提交
328
            self._auto_batching_timeout = None
B
barriery 已提交
329

B
barrierye 已提交
330
    def use_profiler(self, use_profile):
B
barrierye 已提交
331
        self._server_use_profile = use_profile
332

B
barriery 已提交
333 334 335
    def set_tracer(self, tracer):
        self._tracer = tracer

W
wangjiawei04 已提交
336
    def init_client(self, client_config, server_endpoints):
337 338 339 340 341 342 343 344 345 346 347 348
        """
        Initialize the client object. There are three types of clients, brpc,
        grpc and local_predictor. In grpc or brpc mode, the client connects 
        endpoints.

        Args:
            client_config: client config info
            server_endpoints: server IP/Port list.

        Returns:
            client: client object.
        """
349
        if self.with_serving == False:
B
barriery 已提交
350
            _LOGGER.info("Op({}) has no client (and it also do not "
351
                         "run the process function)".format(self.name))
B
barrierye 已提交
352
            return None
W
wangjiawei04 已提交
353
        if self.client_type == 'brpc':
B
barrierye 已提交
354 355
            client = Client()
            client.load_client_config(client_config)
356 357
        elif self.client_type == 'pipeline_grpc':
            client = PPClient()
W
wangjiawei04 已提交
358 359 360 361
        elif self.client_type == 'local_predictor':
            if self.local_predictor is None:
                raise ValueError("local predictor not yet created")
            client = self.local_predictor
362
        else:
B
barriery 已提交
363
            raise ValueError("Failed to init client: unknow client "
W
wangjiawei04 已提交
364
                             "type {}".format(self.client_type))
W
wangjiawei04 已提交
365 366 367
        if self._fetch_names is None:
            self._fetch_names = client.fetch_names_
            _LOGGER.info("Op({}) has no fetch name set. So fetch all vars")
W
wangjiawei04 已提交
368 369
        if self.client_type != "local_predictor":
            client.connect(server_endpoints)
B
barrierye 已提交
370
        return client
371 372 373 374 375

    def get_input_ops(self):
        return self._input_ops

    def set_input_ops(self, ops):
376 377 378 379 380 381 382 383 384 385
        """
        Set input ops.Each op have many input ops, but only one input
        channel.

        Args:
            ops: op list

        Returns:
            None.
        """
386 387 388 389 390
        if not isinstance(ops, list):
            ops = [] if ops is None else [ops]
        self._input_ops = []
        for op in ops:
            if not isinstance(op, Op):
391
                _LOGGER.critical(
B
barriery 已提交
392 393
                    self._log("Failed to set input_ops: input op "
                              "must be Op type, not {}".format(type(op))))
394
                os._exit(-1)
395
            self._input_ops.append(op)
D
dongdaxiang 已提交
396

397 398 399
    def set_pack_tensor_format(self, is_tensor_format=False):
        self._pack_tensor_format = is_tensor_format

400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
    def get_jump_to_ops(self):
        return self._jump_to_ops

    def set_jump_to_ops(self, ops):
        """
        Set jump to ops, then, this op can send channeldata to output channel.

        Args:
            ops: op list to be jumpped

        Returns:
            None.
        """
        if not isinstance(ops, list):
            ops = [] if ops is None else [ops]

        self._jump_to_ops = []
        for op in ops:
            if not isinstance(op, Op):
                _LOGGER.critical(
                    self._log("Failed to set input_ops: input op "
                              "must be Op type, not {}".format(type(op))))
                os._exit(-1)
            self._jump_to_ops.append(op)

    def is_jump_op(self):
        """
        The op has _jump_to_ops members or not.

        Args:
            None

        Returns:
            True or False
        """
        return len(self._jump_to_ops) > 0

    def check_jumping(self, input_data):
        """
        Check whether to send data to jump ops.WhileOp needs to rewrite 
        this interface. this function returns False default.
     
        Args:
            input_data: input data to be preprocessed

        Returns:
            True, send data to the output channel of jump ops
            False, send data to output channel.
        """
        return False

    def get_output_channels_of_jump_ops(self):
        """
        Get output channels of jump ops

        Args:
            None

        Returns:
            list of channels
        """
        channels = []
        if self.is_jump_op() is False:
            return channels
        for op in self._jump_to_ops:
            _LOGGER.info("op:{} extend op._get_output_channels:{}".format(
                op.name, op._get_output_channels()))
            channels.extend(op._get_output_channels())

        _LOGGER.info("get_output_channels_of_jump_ops, channels:{}".format(
            channels))
        return channels

473
    def add_input_channel(self, channel):
474 475 476 477
        """
        Adding one input channel to the Op. Each op have many front op,
        but, only one input channel.
        """
478
        if not isinstance(channel, (ThreadChannel, ProcessChannel)):
479
            _LOGGER.critical(
B
barriery 已提交
480 481 482
                self._log("Failed to set input_channel: input "
                          "channel must be Channel type, not {}".format(
                              type(channel))))
483
            os._exit(-1)
484 485
        channel.add_consumer(self.name)
        self._input = channel
D
dongdaxiang 已提交
486

487
    def clean_input_channel(self):
B
barrierye 已提交
488 489 490 491
        self._input = None

    def _get_input_channel(self):
        return self._input
D
dongdaxiang 已提交
492

493
    def add_output_channel(self, channel):
494 495 496 497 498 499 500 501 502 503
        """
        Adding one output channel to the Op. Each op have many output channels,
        But only one front channel.

        Args:
            channel: an output channel object.

        Returns:
            None
        """
504
        if not isinstance(channel, (ThreadChannel, ProcessChannel)):
505
            _LOGGER.critical(
B
barriery 已提交
506 507
                self._log("Failed to add output_channel: output channel "
                          "must be Channel type, not {}".format(type(channel))))
508
            os._exit(-1)
509 510
        channel.add_producer(self.name)
        self._outputs.append(channel)
511
        _LOGGER.debug("op:{} add output_channel {}".format(self.name, channel))
D
dongdaxiang 已提交
512

513
    def clean_output_channels(self):
B
barrierye 已提交
514 515 516 517 518
        self._outputs = []

    def _get_output_channels(self):
        return self._outputs

519
    def preprocess(self, input_dicts, data_id=0, log_id=0):
T
TeslaZhao 已提交
520 521 522 523 524 525
        """
        In preprocess stage, assembling data for process stage. users can 
        override this function for model feed features.

        Args:
            input_dicts: input data to be preprocessed
526
            data_id: inner unique id, increase auto
527
            log_id: global unique id for RTT, 0 default
T
TeslaZhao 已提交
528 529

        Return:
T
TeslaZhao 已提交
530
            output_data: data for process stage
T
TeslaZhao 已提交
531 532 533 534 535
            is_skip_process: skip process stage or not, False default
            prod_errcode: None default, otherwise, product errores occured.
                          It is handled in the same way as exception. 
            prod_errinfo: "" default
        """
B
barrierye 已提交
536
        # multiple previous Op
B
barrierye 已提交
537
        if len(input_dicts) != 1:
538 539
            _LOGGER.critical(
                self._log(
B
barriery 已提交
540 541
                    "Failed to run preprocess: this Op has multiple previous "
                    "inputs. Please override this func."))
542
            os._exit(-1)
D
dongdaxiang 已提交
543

B
barrierye 已提交
544
        (_, input_dict), = input_dicts.items()
T
TeslaZhao 已提交
545
        return input_dict, False, None, ""
B
barrierye 已提交
546

547
    def process(self, feed_batch, typical_logid=0):
T
TeslaZhao 已提交
548 549 550 551 552
        """
        In process stage, send requests to the inference server or predict locally.
        users do not need to inherit this function
        Args:
            feed_batch: data to be fed to inference server
553 554
            typical_logid: mark batch predicts, usually the first logid in batch,
                0 default.
T
TeslaZhao 已提交
555 556 557 558

        Returns:
            call_result: predict result
        """
559 560 561 562 563

        call_result = None
        err_code = ChannelDataErrcode.OK.value
        err_info = ""

W
wangjiawei04 已提交
564
        if self.client_type == "local_predictor":
565 566 567 568 569 570 571 572
            err, err_info = ChannelData.check_batch_npdata(feed_batch)
            if err != 0:
                _LOGGER.error(
                    self._log("Failed to run process: {}. feed_batch must be \
                        npdata in process for local_predictor mode."
                              .format(err_info)))
                return call_result, ChannelDataErrcode.TYPE_ERROR.value, "feed_batch must be npdata"

W
wangjiawei04 已提交
573 574
            call_result = self.client.predict(
                feed=feed_batch[0],
W
wangjiawei04 已提交
575
                fetch=self._fetch_names,
W
wangjiawei04 已提交
576 577
                batch=True,
                log_id=typical_logid)
578 579 580 581 582 583 584 585

        elif self.client_type == "brpc":
            err, err_info = ChannelData.check_batch_npdata(feed_batch)
            if err != 0:
                _LOGGER.error(
                    self._log("Failed to run process: {}. feed_batch must be \
                        npdata in process for brpc mode.".format(err_info)))
                return call_result, ChannelDataErrcode.TYPE_ERROR.value, "feed_batch must be npdata"
W
wangjiawei04 已提交
586
            call_result = self.client.predict(
587
                feed=feed_batch[0],
W
wangjiawei04 已提交
588
                fetch=self._fetch_names,
W
wangjiawei04 已提交
589 590
                batch=True,
                log_id=typical_logid)
591 592 593 594 595 596 597 598 599 600 601 602 603 604

        elif self.client_type == "pipeline_grpc":
            err, err_info = ChannelData.check_dictdata(feed_batch)
            if err != 0:
                _LOGGER.error(
                    self._log("Failed to run process: {}. feed_batch must be \
                       npdata in process for pipeline_grpc mode."
                              .format(err_info)))
                return call_result, ChannelDataErrcode.TYPE_ERROR.value, "feed_batch must be dict"

            call_result = self.client.predict(
                feed_dict=feed_batch[0],
                fetch=self._fetch_names,
                asyn=False,
605
                pack_tensor_format=self._pack_tensor_format,
606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
                profile=False)
            if call_result is None:
                _LOGGER.error(
                    self._log("Failed in pipeline_grpc. call_result is None."))
                return call_result, ChannelDataErrcode.UNKNOW.value, "pipeline_grpc error"
            if call_result.err_no != 0:
                _LOGGER.error(
                    self._log("Failed in pipeline_grpc. err_no:{}, err_info:{}".
                              format(call_result.err_no, call_result.err_msg)))
                return call_result, ChannelDataErrcode(
                    call_result.err_no).value, call_result.err_msg

            new_dict = {}
            err_code = ChannelDataErrcode(call_result.err_no).value
            err_info = call_result.err_msg
            for idx, key in enumerate(call_result.key):
                new_dict[key] = [call_result.value[idx]]
            call_result = new_dict

        return call_result, err_code, err_info
626

627
    def postprocess(self, input_data, fetch_data, data_id=0, log_id=0):
T
TeslaZhao 已提交
628 629 630
        """
        In postprocess stage, assemble data for next op or output.
        Args:
T
TeslaZhao 已提交
631 632
            input_data: data returned in preprocess stage, dict(for single predict) or list(for batch predict)
            fetch_data: data returned in process stage, dict(for single predict) or list(for batch predict)
633
            data_id: inner unique id, increase auto
634
            log_id: logid, 0 default
T
TeslaZhao 已提交
635 636

        Returns: 
T
TeslaZhao 已提交
637
            fetch_dict: fetch result must be dict type.
T
TeslaZhao 已提交
638 639 640 641
            prod_errcode: None default, otherwise, product errores occured.
                          It is handled in the same way as exception.
            prod_errinfo: "" default
        """
T
TeslaZhao 已提交
642 643 644
        fetch_dict = {}
        if isinstance(fetch_data, dict):
            fetch_dict = fetch_data
T
TeslaZhao 已提交
645
        return fetch_dict, None, ""
D
dongdaxiang 已提交
646

B
barrierye 已提交
647
    def _parse_channeldata(self, channeldata_dict):
T
TeslaZhao 已提交
648 649 650 651 652 653 654 655 656 657 658 659 660
        """
        Parse one channeldata 
        Args:
            channeldata_dict : channel data to be parsed, dict type
        
        Return:
            data_id: created by dag._id_generator, unique
            error_channeldata: error channeldata
            parsed_data: get np/dict data from channeldata
            client_need_profile: need profile info
            profile_set: profile info
            log_id: logid for tracing a request 
        """
661
        data_id, error_channeldata = None, None
B
barrierye 已提交
662
        client_need_profile, profile_set = False, set()
B
barrierye 已提交
663 664 665 666
        parsed_data = {}

        key = list(channeldata_dict.keys())[0]
        data_id = channeldata_dict[key].id
T
TeslaZhao 已提交
667
        log_id = channeldata_dict[key].log_id
B
barrierye 已提交
668
        client_need_profile = channeldata_dict[key].client_need_profile
B
barrierye 已提交
669 670

        for name, data in channeldata_dict.items():
T
TeslaZhao 已提交
671
            if data.error_code != ChannelDataErrcode.OK.value:
B
barrierye 已提交
672 673 674
                error_channeldata = data
                break
            parsed_data[name] = data.parse()
B
barrierye 已提交
675
            if client_need_profile:
B
barrierye 已提交
676
                profile_set |= data.profile_data_set
B
barrierye 已提交
677
        return (data_id, error_channeldata, parsed_data, client_need_profile,
T
TeslaZhao 已提交
678
                profile_set, log_id)
B
barrierye 已提交
679 680 681 682 683

    def _push_to_output_channels(self,
                                 data,
                                 channels,
                                 name=None,
B
barriery 已提交
684
                                 profile_str=None,
B
barrierye 已提交
685
                                 client_need_profile=False,
B
barrierye 已提交
686
                                 profile_set=None):
T
TeslaZhao 已提交
687 688 689 690 691 692 693 694 695 696 697 698 699 700
        """
        Push data to output channels, Do not run the later stage(preprocess,
        process, postprocess)
        Args:
            data: channeldata, to be pushed
            channels: output channels
            name: op name  
            profile_str: one profile message
            client_need_profile: False default
            profile_set: profile message collections

        Returns:
            None
        """
701 702
        if name is None:
            name = self.name
B
barrierye 已提交
703

B
barriery 已提交
704
        # add profile into channeldata
B
barrierye 已提交
705
        if client_need_profile and profile_set is not None:
B
barriery 已提交
706 707
            if profile_str is not None:
                profile_set.add(profile_str)
B
barrierye 已提交
708
            data.add_profile(profile_set)
B
barrierye 已提交
709

B
barriery 已提交
710 711 712
        for channel in channels:
            channel.push(data, name)

W
wangjiawei04 已提交
713
    def start_with_process(self):
714 715 716 717 718 719 720 721 722 723
        """
        Each OP creates a process to run the main loop, initializes the CUDA
        environment in each individual process.

        Args:
            None

        Returns:
            process array
        """
B
barriery 已提交
724 725 726
        trace_buffer = None
        if self._tracer is not None:
            trace_buffer = self._tracer.data_buffer()
W
wangjiawei04 已提交
727
        process = []
B
barrierye 已提交
728
        for concurrency_idx in range(self.concurrency):
729 730
            p = multiprocessing.Process(
                target=self._run,
B
barrierye 已提交
731
                args=(concurrency_idx, self._get_input_channel(),
732 733
                      self._get_output_channels(), False, trace_buffer,
                      self.model_config, self.workdir, self.thread_num,
734
                      self.device_type, self.devices, self.mem_optim,
T
TeslaZhao 已提交
735 736
                      self.ir_optim, self.precision, self.use_mkldnn,
                      self.mkldnn_cache_capacity, self.mkldnn_op_list,
737 738
                      self.mkldnn_bf16_op_list, self.is_jump_op(),
                      self.get_output_channels_of_jump_ops()))
B
barriery 已提交
739
            p.daemon = True
740
            p.start()
W
wangjiawei04 已提交
741 742
            process.append(p)
        return process
743

W
wangjiawei04 已提交
744
    def start_with_thread(self):
745 746 747 748 749 750 751 752 753 754
        """
        Each OP creates a thread to run the main loop, initializes the CUDA 
        environment in the main thread.

        Args:
            None
 
        Returns:
            thread array
        """
B
barriery 已提交
755 756 757
        trace_buffer = None
        if self._tracer is not None:
            trace_buffer = self._tracer.data_buffer()
758 759 760 761

        #Init cuda env in main thread
        if self.client_type == "local_predictor":
            _LOGGER.info("Init cuda env in main thread")
762
            self.local_predictor = self._local_service_handler.get_client(0)
763

764
        threads = []
B
barrierye 已提交
765
        for concurrency_idx in range(self.concurrency):
766 767
            t = threading.Thread(
                target=self._run,
B
barrierye 已提交
768
                args=(concurrency_idx, self._get_input_channel(),
769 770
                      self._get_output_channels(), True, trace_buffer,
                      self.model_config, self.workdir, self.thread_num,
771
                      self.device_type, self.devices, self.mem_optim,
T
TeslaZhao 已提交
772 773
                      self.ir_optim, self.precision, self.use_mkldnn,
                      self.mkldnn_cache_capacity, self.mkldnn_op_list,
774 775
                      self.mkldnn_bf16_op_list, self.is_jump_op(),
                      self.get_output_channels_of_jump_ops()))
B
barriery 已提交
776 777 778
            # When a process exits, it attempts to terminate
            # all of its daemonic child processes.
            t.daemon = True
779 780 781 782
            t.start()
            threads.append(t)
        return threads

B
barrierye 已提交
783
    def init_op(self):
B
barrierye 已提交
784 785
        pass

T
TeslaZhao 已提交
786 787 788 789 790 791 792 793 794 795 796 797 798 799
    def _run_preprocess(self, parsed_data_dict, op_info_prefix, logid_dict):
        """
        Run preprocess stage
        Args:
            parsed_data_dict: data to be pre-processed
            op_info_prefix: input op info
            logid_dict: logid dict

        Returns:
            preped_data_dict: data preprocessed, to be processed 
            err_channeldata_dict: when exceptions occurred, putting errors in it.
            skip_process_dict: skip process stage or not

        """
B
barriery 已提交
800
        _LOGGER.debug("{} Running preprocess".format(op_info_prefix))
801 802
        preped_data_dict = collections.OrderedDict()
        err_channeldata_dict = collections.OrderedDict()
T
TeslaZhao 已提交
803
        skip_process_dict = {}
804 805
        for data_id, parsed_data in parsed_data_dict.items():
            preped_data, error_channeldata = None, None
T
TeslaZhao 已提交
806 807 808
            is_skip_process = False
            prod_errcode, prod_errinfo = None, None
            log_id = logid_dict.get(data_id)
809
            try:
T
TeslaZhao 已提交
810 811 812 813 814
                preped_data, is_skip_process, prod_errcode, prod_errinfo = self.preprocess(
                    parsed_data, data_id, logid_dict.get(data_id))
                # Set skip_process_dict
                if is_skip_process is True:
                    skip_process_dict[data_id] = True
815 816
            except TypeError as e:
                # Error type in channeldata.datatype
T
TeslaZhao 已提交
817 818
                error_info = "(data_id={} log_id={}) {} Failed to preprocess: {}".format(
                    data_id, log_id, op_info_prefix, e)
B
barriery 已提交
819
                _LOGGER.error(error_info, exc_info=True)
820
                error_channeldata = ChannelData(
T
TeslaZhao 已提交
821
                    error_code=ChannelDataErrcode.TYPE_ERROR.value,
822
                    error_info=error_info,
T
TeslaZhao 已提交
823 824
                    data_id=data_id,
                    log_id=log_id)
825
            except Exception as e:
T
TeslaZhao 已提交
826 827
                error_info = "(data_id={} log_id={}) {} Failed to preprocess: {}".format(
                    data_id, log_id, op_info_prefix, e)
B
barriery 已提交
828
                _LOGGER.error(error_info, exc_info=True)
829
                error_channeldata = ChannelData(
T
TeslaZhao 已提交
830
                    error_code=ChannelDataErrcode.UNKNOW.value,
831
                    error_info=error_info,
T
TeslaZhao 已提交
832 833 834 835 836 837 838 839 840 841 842 843 844
                    data_id=data_id,
                    log_id=log_id)

            if prod_errcode is not None:
                # product errors occured
                error_channeldata = ChannelData(
                    error_code=ChannelDataErrcode.PRODUCT_ERROR.value,
                    error_info="",
                    prod_error_code=prod_errcode,
                    prod_error_info=prod_errinfo,
                    data_id=data_id,
                    log_id=log_id)

845 846 847 848
            if error_channeldata is not None:
                err_channeldata_dict[data_id] = error_channeldata
            else:
                preped_data_dict[data_id] = preped_data
B
barriery 已提交
849
        _LOGGER.debug("{} Succ preprocess".format(op_info_prefix))
T
TeslaZhao 已提交
850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865
        return preped_data_dict, err_channeldata_dict, skip_process_dict

    def _run_process(self, preped_data_dict, op_info_prefix, skip_process_dict,
                     logid_dict):
        """
        Run process stage
        Args:
            preped_data_dict: feed the data to be predicted by the model.  
            op_info_prefix: prefix op info
            skip_process_dict: skip process stage or not
            logid_dict: logid dict

        Returns:
            midped_data_dict: data midprocessed, to be post-processed 
            err_channeldata_dict: when exceptions occurred, putting errors in it 
        """
B
barriery 已提交
866
        _LOGGER.debug("{} Running process".format(op_info_prefix))
867 868
        midped_data_dict = collections.OrderedDict()
        err_channeldata_dict = collections.OrderedDict()
T
TeslaZhao 已提交
869
        is_skip_process = False
T
TeslaZhao 已提交
870
        data_ids = list(preped_data_dict.keys())
T
TeslaZhao 已提交
871 872

        # skip process stage
T
TeslaZhao 已提交
873 874
        if len(data_ids) == 1 and skip_process_dict.get(data_ids[0]) == True:
            is_skip_process = True
T
TeslaZhao 已提交
875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909
        if self.with_serving is False or is_skip_process is True:
            midped_data_dict = preped_data_dict
            _LOGGER.warning("(data_id={} log_id={}) OP={} skip process stage. " \
                "with_serving={}, is_skip_process={}".format(data_ids[0],
                logid_dict.get(data_ids[0]), self.name, self.with_serving,
                is_skip_process))
            return midped_data_dict, err_channeldata_dict

        # use typical_logid to mark batch data
        # data_ids is one self-increasing unique key. 
        typical_logid = data_ids[0]
        if len(data_ids) != 1:
            for data_id in data_ids:
                _LOGGER.info(
                    "(data_id={} logid={}) Auto-batching is On Op={}!!" \
                    "We selected logid={} (from batch: {}) as a " \
                    "representative for logging.".format(
                    data_id, logid_dict.get(data_id), self.name,
                    typical_logid, data_ids))

        one_input = preped_data_dict[data_ids[0]]
        feed_batch = []
        feed_dict = {}
        cur_offset = 0
        input_offset_dict = {}
        batch_input = False

        if isinstance(one_input, dict):
            # For dict type, data structure is dict.
            # Merge multiple dicts for data_ids into one dict.
            # feed_batch is the input param of predict func.
            # input_offset_dict is used for data restration[data_ids]
            if len(data_ids) == 1:
                feed_batch = [preped_data_dict[data_id] for data_id in data_ids]
            else:
910 911
                for data_id in data_ids:
                    for key, val in preped_data_dict[data_id].items():
T
TeslaZhao 已提交
912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956
                        has_val = feed_dict.get(key)
                        if has_val is None:
                            feed_dict[key] = val
                            continue
                        # merge 2 np.arrray
                        if isinstance(val, np.ndarray):
                            feed_dict[key] = np.append(
                                feed_dict[key], val, axis=0)
                feed_batch.append(feed_dict)

            for data_id in data_ids:
                start = cur_offset
                for key, val in preped_data_dict[data_id].items():
                    if isinstance(val, (list, np.ndarray)):
                        cur_offset += len(val)
                    else:
                        cur_offset += 1
                    break
                input_offset_dict[data_id] = [start, cur_offset]
        elif isinstance(one_input, list):
            # For list type, data structure of one_input is [dict, dict, ...]
            # Data structure of feed_batch is [dict1_1, dict1_2, dict2_1, ...]   
            # Data structure of input_offset_dict is { data_id : [start, end] }
            batch_input = True
            for data_id in data_ids:
                feed_batch.extend(preped_data_dict[data_id])
                data_size = len(preped_data_dict[data_id])
                start = cur_offset
                cur_offset = start + data_size
                input_offset_dict[data_id] = [start, cur_offset]
        else:
            _LOGGER.critical(
                "(data_id={} log_id={}){} Failed to process: expect input type is dict"
                " or list(batch input), but get {}".format(data_ids[
                    0], typical_logid, op_info_prefix, type(one_input)))
            for data_id in data_ids:
                error_code = ChannelDataErrcode.TYPE_ERROR.value
                error_info = "expect input type is dict or list, but get {}".format(
                    type(one_input))
                err_channeldata_dict[data_id] = ChannelData(
                    error_code=error_code,
                    error_info=error_info,
                    data_id=data_id,
                    log_id=logid_dict.get(data_id))
            return midped_data_dict, err_channeldata_dict
B
barrierye 已提交
957

T
TeslaZhao 已提交
958 959
        midped_batch = None
        error_code = ChannelDataErrcode.OK.value
960
        error_info = ""
T
TeslaZhao 已提交
961 962 963 964
        if self._timeout <= 0:
            # No retry
            try:
                if batch_input is False:
965 966
                    midped_batch, error_code, error_info = self.process(
                        feed_batch, typical_logid)
T
TeslaZhao 已提交
967 968 969
                else:
                    midped_batch = []
                    for idx in range(len(feed_batch)):
970 971 972 973
                        predict_res, error_code, error_info = self.process(
                            [feed_batch[idx]], typical_logid)
                        if error_code != ChannelDataErrcode.OK.value:
                            break
T
TeslaZhao 已提交
974 975 976 977 978 979 980 981 982 983 984 985
                        midped_batch.append(predict_res)
            except Exception as e:
                error_code = ChannelDataErrcode.UNKNOW.value
                error_info = "(data_id={} log_id={}) {} Failed to process(batch: {}): {}".format(
                    data_ids[0], typical_logid, op_info_prefix, data_ids, e)
                _LOGGER.error(error_info, exc_info=True)
        else:
            # retry N times configed in yaml files.
            for i in range(self._retry):
                try:
                    # time out for each process
                    if batch_input is False:
986
                        midped_batch, error_code, error_info = func_timeout.func_timeout(
B
barriery 已提交
987 988 989
                            self._timeout,
                            self.process,
                            args=(feed_batch, typical_logid))
990
                    else:
T
TeslaZhao 已提交
991 992
                        midped_batch = []
                        for idx in range(len(feed_batch)):
993
                            predict_res, error_code, error_info = func_timeout.func_timeout(
T
TeslaZhao 已提交
994 995 996 997 998 999 1000 1001 1002 1003 1004
                                self._timeout,
                                self.process,
                                args=([feed_batch[idx]], typical_logid))
                            midped_batch[idx].append(predict_res)

                except func_timeout.FunctionTimedOut as e:
                    if i + 1 >= self._retry:
                        error_code = ChannelDataErrcode.TIMEOUT.value
                        error_info = "(log_id={}) {} Failed to process(batch: {}): " \
                            "exceeded retry count.".format(typical_logid, op_info_prefix, data_ids)
                        _LOGGER.error(error_info)
B
barrierye 已提交
1005
                    else:
T
TeslaZhao 已提交
1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
                        _LOGGER.warning(
                            "(log_id={}) {} Failed to process(batch: {}): timeout,"
                            " and retrying({}/{})...".format(
                                typical_logid, op_info_prefix, data_ids, i + 1,
                                self._retry))
                except Exception as e:
                    error_code = ChannelDataErrcode.UNKNOW.value
                    error_info = "(log_id={}) {} Failed to process(batch: {}): {}".format(
                        typical_logid, op_info_prefix, data_ids, e)
                    _LOGGER.error(error_info, exc_info=True)
                    break
                else:
                    break

        # 2 kinds of errors
        if error_code != ChannelDataErrcode.OK.value or midped_batch is None:
            error_info = "(log_id={}) {} failed to predict.".format(
                typical_logid, self.name)
            _LOGGER.error(error_info)
            for data_id in data_ids:
                err_channeldata_dict[data_id] = ChannelData(
                    error_code=ChannelDataErrcode.CLIENT_ERROR.value,
                    error_info=error_info,
                    data_id=data_id,
                    log_id=logid_dict.get(data_id))
            return midped_data_dict, err_channeldata_dict

        # Split batch infer result to each data_ids
        if batch_input is False:
            var_names = midped_batch.keys()
            lod_var_names = set()
            lod_offset_names = set()
            # midped_batch is dict type for single input 
            for name in var_names:
                lod_offset_name = "{}.lod".format(name)
                if lod_offset_name in var_names:
                    _LOGGER.debug("(log_id={}) {} {} is LodTensor".format(
                        typical_logid, op_info_prefix, name))
                    lod_var_names.add(name)
                    lod_offset_names.add(lod_offset_name)

            for idx, data_id in enumerate(data_ids):
                midped_data_dict[data_id] = {}

            for name, value in midped_batch.items():
                if name in lod_offset_names:
                    continue
                if name in lod_var_names:
                    # lodtensor
                    lod_offset_name = "{}.lod".format(name)
                    lod_offset = midped_batch[lod_offset_name]
                    for idx, data_id in enumerate(data_ids):
                        data_offset_left = input_offset_dict[data_id][0]
                        data_offset_right = input_offset_dict[data_id][1]
                        lod_offset_left = lod_offset[data_offset_left]
                        lod_offset_right = lod_offset[data_offset_right]
                        midped_data_dict[data_id][name] = value[
                            lod_offset_left:lod_offset_right]
                        midped_data_dict[data_id][lod_offset_name] = \
                            lod_offset[data_offset_left:data_offset_right + 1] - lod_offset[data_offset_left]
                else:
                    # normal tensor
                    for idx, data_id in enumerate(data_ids):
                        start = input_offset_dict[data_id][0]
                        end = input_offset_dict[data_id][1]
                        midped_data_dict[data_id][name] = value[start:end]
1072
        else:
T
TeslaZhao 已提交
1073 1074 1075 1076 1077
            # midped_batch is list type for batch input
            for idx, data_id in enumerate(data_ids):
                start = input_offset_dict[data_id][0]
                end = input_offset_dict[data_id][1]
                midped_data_dict[data_id] = midped_batch[start:end]
1078 1079
        return midped_data_dict, err_channeldata_dict

B
barriery 已提交
1080
    def _run_postprocess(self, parsed_data_dict, midped_data_dict,
T
TeslaZhao 已提交
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094
                         op_info_prefix, logid_dict):
        """
        Run postprocess stage.
        Args:
            parsed_data_dict: data returned in preprocess stage 
            midped_data_dict: data returned in process stage
            op_info_prefix: prefix op info
            logid_dict: logid dict

        Returns:
            postped_data_dict: data postprocessed 
            err_channeldata_dict: when exceptions occurred, putting errors in it
 
        """
B
barriery 已提交
1095
        _LOGGER.debug("{} Running postprocess".format(op_info_prefix))
1096 1097
        postped_data_dict = collections.OrderedDict()
        err_channeldata_dict = collections.OrderedDict()
B
bug fix  
barriery 已提交
1098
        for data_id, midped_data in midped_data_dict.items():
T
TeslaZhao 已提交
1099
            log_id = logid_dict.get(data_id)
1100
            postped_data, err_channeldata = None, None
T
TeslaZhao 已提交
1101
            prod_errcode, prod_errinfo = None, None
1102
            try:
T
TeslaZhao 已提交
1103
                postped_data, prod_errcode, prod_errinfo = self.postprocess(
1104
                    parsed_data_dict[data_id], midped_data, data_id,
T
TeslaZhao 已提交
1105
                    logid_dict.get(data_id))
1106
            except Exception as e:
T
TeslaZhao 已提交
1107 1108
                error_info = "(data_id={} log_id={}) {} Failed to postprocess: {}".format(
                    data_id, log_id, op_info_prefix, e)
B
barriery 已提交
1109
                _LOGGER.error(error_info, exc_info=True)
1110
                err_channeldata = ChannelData(
T
TeslaZhao 已提交
1111
                    error_code=ChannelDataErrcode.UNKNOW.value,
1112
                    error_info=error_info,
T
TeslaZhao 已提交
1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125
                    data_id=data_id,
                    log_id=log_id)

            if prod_errcode is not None:
                # product errors occured
                err_channeldata = ChannelData(
                    error_code=ChannelDataErrcode.PRODUCT_ERROR.value,
                    error_info="",
                    prod_error_code=prod_errcode,
                    prod_error_info=prod_errinfo,
                    data_id=data_id,
                    log_id=log_id)

1126 1127 1128 1129 1130
            if err_channeldata is not None:
                err_channeldata_dict[data_id] = err_channeldata
                continue
            else:
                if not isinstance(postped_data, dict):
T
TeslaZhao 已提交
1131
                    error_info = "(log_id={} log_id={}) {} Failed to postprocess: " \
B
barriery 已提交
1132 1133
                            "output of postprocess funticon must be " \
                            "dict type, but get {}".format(
T
TeslaZhao 已提交
1134
                                data_id, log_id, op_info_prefix,
B
barriery 已提交
1135
                                type(postped_data))
1136 1137
                    _LOGGER.error(error_info)
                    err_channeldata = ChannelData(
T
TeslaZhao 已提交
1138
                        error_code=ChannelDataErrcode.UNKNOW.value,
1139
                        error_info=error_info,
T
TeslaZhao 已提交
1140 1141
                        data_id=data_id,
                        log_id=log_id)
1142 1143 1144 1145 1146 1147 1148 1149 1150
                    err_channeldata_dict[data_id] = err_channeldata
                    continue

                output_data = None
                err, _ = ChannelData.check_npdata(postped_data)
                if err == 0:
                    output_data = ChannelData(
                        ChannelDataType.CHANNEL_NPDATA.value,
                        npdata=postped_data,
T
TeslaZhao 已提交
1151 1152
                        data_id=data_id,
                        log_id=log_id)
1153 1154 1155 1156
                else:
                    output_data = ChannelData(
                        ChannelDataType.DICT.value,
                        dictdata=postped_data,
T
TeslaZhao 已提交
1157 1158
                        data_id=data_id,
                        log_id=log_id)
1159
                postped_data_dict[data_id] = output_data
B
barriery 已提交
1160
        _LOGGER.debug("{} Succ postprocess".format(op_info_prefix))
1161
        return postped_data_dict, err_channeldata_dict
B
barriery 已提交
1162 1163

    def _auto_batching_generator(self, input_channel, op_name, batch_size,
B
barriery 已提交
1164
                                 timeout, op_info_prefix):
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180
        """
        Merge batch_size requests for one prediction.Taking one piece of data 
        from the input channel each time until equals batch_size, or the waiting 
        time exceeds auto_batching_timeout.

        Args:
            input_channel: the input channel of Op
            op_name: op name
            batch_size: batch size, Less than worker_num
            timeout: batch timeout, seconds, If timeout is None, and the quantity 
                taken from the front is less than batch_size, blocking occured.
            op_info_prefix: op link info.

        Returns:
            None
        """
B
barriery 已提交
1181 1182 1183 1184 1185 1186 1187 1188 1189
        while True:
            batch = []
            while len(batch) == 0:
                endtime = None
                if timeout is not None:
                    endtime = _time() + timeout
                for idx in range(batch_size):
                    try:
                        channeldata_dict = None
1190
                        front_start_time = int(round(_time() * 1000000))
B
barriery 已提交
1191 1192 1193
                        if timeout is not None:
                            remaining = endtime - _time()
                            if remaining <= 0.0:
B
barriery 已提交
1194 1195
                                _LOGGER.debug("{} Failed to generate batch: "
                                              "timeout".format(op_info_prefix))
B
barriery 已提交
1196
                                break
B
barriery 已提交
1197 1198
                            channeldata_dict = input_channel.front(op_name,
                                                                   timeout)
B
barriery 已提交
1199 1200 1201
                        else:
                            channeldata_dict = input_channel.front(op_name)
                        batch.append(channeldata_dict)
1202
                        _LOGGER.debug(
1203 1204
                            "_auto_batching_generator get {} channeldata from op:{} input channel. time={}".
                            format(idx, op_name, front_start_time))
B
barriery 已提交
1205
                    except ChannelTimeoutError:
B
barriery 已提交
1206 1207
                        _LOGGER.debug("{} Failed to generate batch: "
                                      "timeout".format(op_info_prefix))
B
barriery 已提交
1208
                        break
B
barriery 已提交
1209 1210
            _LOGGER.debug("{} Got actual batch_size: {}".format(op_info_prefix,
                                                                len(batch)))
B
barriery 已提交
1211
            yield batch
1212

1213
    def _parse_channeldata_batch(self, batch, output_channels):
T
TeslaZhao 已提交
1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
        """
        Parse channeldatas batch
        Args:
            batch: auto-batching batch datas
            output_channels: output channels 

        Returns:
            parsed_data_dict: parsed from channeldata in batch
            need_profile_dict: need profile dict in batch 
            profile_dict: profile info dict in batch
            logid_dict: trace each request in batch
        """
1226
        parsed_data_dict = collections.OrderedDict()
1227 1228
        need_profile_dict = {}
        profile_dict = {}
T
TeslaZhao 已提交
1229
        logid_dict = {}
B
bug fix  
barriery 已提交
1230
        for channeldata_dict in batch:
1231
            (data_id, error_channeldata, parsed_data,
T
TeslaZhao 已提交
1232
                    client_need_profile, profile_set, log_id) = \
1233 1234 1235 1236 1237
                            self._parse_channeldata(channeldata_dict)
            if error_channeldata is None:
                parsed_data_dict[data_id] = parsed_data
                need_profile_dict[data_id] = client_need_profile
                profile_dict[data_id] = profile_set
T
TeslaZhao 已提交
1238
                logid_dict[data_id] = log_id
1239 1240 1241
            else:
                # error data in predecessor Op
                # (error_channeldata with profile info)
B
barriery 已提交
1242 1243
                self._push_to_output_channels(error_channeldata,
                                              output_channels)
1244

T
TeslaZhao 已提交
1245
        return parsed_data_dict, need_profile_dict, profile_dict, logid_dict
B
barriery 已提交
1246

W
wangjiawei04 已提交
1247
    def _run(self, concurrency_idx, input_channel, output_channels,
1248
             is_thread_op, trace_buffer, model_config, workdir, thread_num,
T
TeslaZhao 已提交
1249
             device_type, devices, mem_optim, ir_optim, precision, use_mkldnn,
1250 1251
             mkldnn_cache_capacity, mkldnn_op_list, mkldnn_bf16_op_list,
             is_jump_op, output_channels_of_jump_ops):
1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268
        """
        _run() is the entry function of OP process / thread model.When client 
        type is local_predictor in process mode, the CUDA environment needs to 
        be initialized by LocalServiceHandler[child process], otherwise, Cuda
        error(3), initialization error is occured. Preprocess, process and 
        postprocess are executed in the main loop. The preprocess and postprocess
        function is usually rewrited by users. Trace data is recorded by trace_que.

        Args:
            concurrency_idx: thread/process index
            input_channel: input channel, take the data to be processed
            output_channels: output channel, store processed data
            is_thread_op: False, It's process op; True, It's thread op
            trace_buffer: store trace infomations
            model_config: model config path
            workdir: work directory
            thread_num: number of threads, concurrent quantity
1269
            device_type: support multiple devices
1270 1271
            devices: gpu id list[gpu], "" default[cpu]
            mem_optim: use memory/graphics memory optimization, True default.
1272
            ir_optim: use calculation chart optimization, False default.
T
TeslaZhao 已提交
1273 1274 1275 1276 1277
            precision: inference precision, e.g. "fp32", "fp16", "int8", "bf16"
            use_mkldnn: use mkldnn, default False.
            mkldnn_cache_capacity: cache capacity of mkldnn, 0 means no limit.
            mkldnn_op_list: OP list optimized by mkldnn, None default.
            mkldnn_bf16_op_list: OP list optimized by mkldnn bf16, None default.
1278 1279
            is_jump_op: OP has jump op list or not, False default.
            output_channels_of_jump_ops: all output channels of jump ops.
1280 1281 1282 1283

        Returns:
            None
        """
1284
        op_info_prefix = "[{}|{}]".format(self.name, concurrency_idx)
B
barrierye 已提交
1285

1286
        # init ops
B
barriery 已提交
1287
        profiler = None
B
barrierye 已提交
1288
        try:
1289 1290 1291 1292 1293 1294
            if is_thread_op == False and self.client_type == "local_predictor":
                self.service_handler = local_service_handler.LocalServiceHandler(
                    model_config=model_config,
                    client_type="local_predictor",
                    workdir=workdir,
                    thread_num=thread_num,
1295
                    device_type=device_type,
1296 1297
                    devices=devices,
                    mem_optim=mem_optim,
1298
                    ir_optim=ir_optim,
T
TeslaZhao 已提交
1299 1300 1301 1302 1303
                    precision=precision,
                    use_mkldnn=use_mkldnn,
                    mkldnn_cache_capacity=mkldnn_cache_capacity,
                    mkldnn_op_list=mkldnn_op_list,
                    mkldnn_bf16_op_list=mkldnn_bf16_op_list)
1304 1305 1306

                _LOGGER.info("Init cuda env in process {}".format(
                    concurrency_idx))
1307 1308
                self.local_predictor = self.service_handler.get_client(
                    concurrency_idx)
1309
            # check all ops initialized successfully.
W
wangjiawei04 已提交
1310
            profiler = self._initialize(is_thread_op, concurrency_idx)
1311

B
barrierye 已提交
1312
        except Exception as e:
B
barriery 已提交
1313
            _LOGGER.critical(
T
TeslaZhao 已提交
1314
                "{} failed to init op: {}".format(op_info_prefix, e),
B
barriery 已提交
1315
                exc_info=True)
B
barrierye 已提交
1316
            os._exit(-1)
B
barriery 已提交
1317
        _LOGGER.info("{} Succ init".format(op_info_prefix))
1318

B
barriery 已提交
1319
        batch_generator = self._auto_batching_generator(
B
barriery 已提交
1320 1321 1322 1323
            input_channel=input_channel,
            op_name=self.name,
            batch_size=self._batch_size,
            timeout=self._auto_batching_timeout,
B
barriery 已提交
1324
            op_info_prefix=op_info_prefix)
B
barriery 已提交
1325

B
barriery 已提交
1326
        start, end = None, None
B
barrierye 已提交
1327
        trace_que = collections.deque()
B
barrierye 已提交
1328
        while True:
B
barriery 已提交
1329
            start = int(round(_time() * 1000000))
B
barrierye 已提交
1330
            try:
B
barriery 已提交
1331
                channeldata_dict_batch = next(batch_generator)
B
barrierye 已提交
1332
            except ChannelStopError:
B
barriery 已提交
1333
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
B
barriery 已提交
1334
                self._finalize(is_thread_op)
B
barrierye 已提交
1335
                break
B
barriery 已提交
1336
            end = int(round(_time() * 1000000))
B
barrierye 已提交
1337
            in_time = end - start
1338 1339
            _LOGGER.debug("op:{} in_time_end:{}".format(op_info_prefix,
                                                        time.time()))
1340

B
barriery 已提交
1341 1342
            # parse channeldata batch
            try:
T
TeslaZhao 已提交
1343
                parsed_data_dict, need_profile_dict, profile_dict, logid_dict\
1344 1345
                        = self._parse_channeldata_batch(
                                channeldata_dict_batch, output_channels)
B
barriery 已提交
1346
            except ChannelStopError:
B
barriery 已提交
1347
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
1348
                self._finalize(is_thread_op)
B
barriery 已提交
1349
                break
1350 1351 1352
            if len(parsed_data_dict) == 0:
                # data in the whole batch is all error data
                continue
1353 1354
            _LOGGER.debug("op:{} parse_end:{}".format(op_info_prefix,
                                                      time.time()))
1355

1356 1357 1358 1359 1360 1361 1362
            # print
            front_cost = int(round(_time() * 1000000)) - start
            for data_id, parsed_data in parsed_data_dict.items():
                _LOGGER.debug(
                    "(data_id={}) POP INPUT CHANNEL! op:{}, cost:{} ms".format(
                        data_id, self.name, front_cost / 1000.0))

1363
            # preprecess
B
barriery 已提交
1364
            start = profiler.record("prep#{}_0".format(op_info_prefix))
T
TeslaZhao 已提交
1365 1366
            preped_data_dict, err_channeldata_dict, skip_process_dict \
                    = self._run_preprocess(parsed_data_dict, op_info_prefix, logid_dict)
B
barriery 已提交
1367
            end = profiler.record("prep#{}_1".format(op_info_prefix))
B
barrierye 已提交
1368
            prep_time = end - start
1369 1370
            _LOGGER.debug("op:{} preprocess_end:{}, cost:{}".format(
                op_info_prefix, time.time(), prep_time))
1371
            try:
T
TeslaZhao 已提交
1372
                # put error requests into output channel, skip process and postprocess stage
1373
                for data_id, err_channeldata in err_channeldata_dict.items():
B
barrierye 已提交
1374
                    self._push_to_output_channels(
B
barriery 已提交
1375 1376
                        data=err_channeldata,
                        channels=output_channels,
1377 1378 1379
                        client_need_profile=need_profile_dict[data_id],
                        profile_set=profile_dict[data_id])
            except ChannelStopError:
B
barriery 已提交
1380
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
1381 1382
                self._finalize(is_thread_op)
                break
B
bug fix  
barrierye 已提交
1383
            if len(preped_data_dict) == 0:
1384 1385
                continue

B
barrierye 已提交
1386
            # process
B
barriery 已提交
1387
            start = profiler.record("midp#{}_0".format(op_info_prefix))
1388
            midped_data_dict, err_channeldata_dict \
T
TeslaZhao 已提交
1389
                    = self._run_process(preped_data_dict, op_info_prefix, skip_process_dict, logid_dict)
B
barriery 已提交
1390
            end = profiler.record("midp#{}_1".format(op_info_prefix))
B
barrierye 已提交
1391
            midp_time = end - start
1392 1393
            _LOGGER.debug("op:{} process_end:{}, cost:{}".format(
                op_info_prefix, time.time(), midp_time))
1394 1395
            try:
                for data_id, err_channeldata in err_channeldata_dict.items():
B
barrierye 已提交
1396
                    self._push_to_output_channels(
B
barriery 已提交
1397 1398
                        data=err_channeldata,
                        channels=output_channels,
B
barriery 已提交
1399 1400
                        client_need_profile=need_profile_dict[data_id],
                        profile_set=profile_dict[data_id])
1401
            except ChannelStopError:
B
barriery 已提交
1402
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
1403 1404 1405
                self._finalize(is_thread_op)
                break
            if len(midped_data_dict) == 0:
1406
                continue
1407 1408

            # postprocess
B
barriery 已提交
1409
            start = profiler.record("postp#{}_0".format(op_info_prefix))
1410
            postped_data_dict, err_channeldata_dict \
T
TeslaZhao 已提交
1411
                    = self._run_postprocess(parsed_data_dict, midped_data_dict, op_info_prefix, logid_dict)
B
barriery 已提交
1412
            end = profiler.record("postp#{}_1".format(op_info_prefix))
B
barrierye 已提交
1413
            postp_time = end - start
1414
            after_postp_time = _time()
1415 1416
            _LOGGER.debug("op:{} postprocess_end:{}, cost:{}".format(
                op_info_prefix, time.time(), postp_time))
1417 1418
            try:
                for data_id, err_channeldata in err_channeldata_dict.items():
B
barrierye 已提交
1419
                    self._push_to_output_channels(
B
bug fix  
barrierye 已提交
1420
                        data=err_channeldata,
B
barriery 已提交
1421
                        channels=output_channels,
B
barriery 已提交
1422 1423
                        client_need_profile=need_profile_dict[data_id],
                        profile_set=profile_dict[data_id])
1424
            except ChannelStopError:
B
barriery 已提交
1425
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
1426 1427 1428
                self._finalize(is_thread_op)
                break
            if len(postped_data_dict) == 0:
1429
                continue
1430

1431
            # push data to channel (if run succ)
B
barriery 已提交
1432
            start = int(round(_time() * 1000000))
B
barrierye 已提交
1433
            try:
B
barriery 已提交
1434
                profile_str = profiler.gen_profile_str()
1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
                if self.is_jump_op() is True and self.check_jumping(
                        postped_data_dict) is True:
                    # push data to output channel of ops to be jumped 
                    for data_id, postped_data in postped_data_dict.items():
                        if self._server_use_profile:
                            sys.stderr.write(profile_str)
                        self._push_to_output_channels(
                            data=postped_data,
                            channels=output_channels_of_jump_ops,
                            profile_str=profile_str,
                            client_need_profile=need_profile_dict[data_id],
                            profile_set=profile_dict[data_id])
                        after_outchannel_time = _time()
                        _LOGGER.debug(
                            "(data_id={}) PUSH OUTPUT CHANNEL OF JUMP OPs! op:{} push cost:{} ms".
                            format(data_id, self.name, (after_outchannel_time -
                                                        after_postp_time) *
                                   1000))
                else:
                    # push data to output channel.
                    for data_id, postped_data in postped_data_dict.items():
                        if self._server_use_profile:
                            sys.stderr.write(profile_str)
                        self._push_to_output_channels(
                            data=postped_data,
                            channels=output_channels,
                            profile_str=profile_str,
                            client_need_profile=need_profile_dict[data_id],
                            profile_set=profile_dict[data_id])
                        after_outchannel_time = _time()
                        _LOGGER.debug(
                            "(data_id={}) PUSH OUTPUT CHANNEL! op:{} push cost:{} ms".
                            format(data_id, self.name, (after_outchannel_time -
                                                        after_postp_time) *
                                   1000))
B
barrierye 已提交
1470
            except ChannelStopError:
B
barriery 已提交
1471
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
1472
                self._finalize(is_thread_op)
B
barrierye 已提交
1473
                break
B
barriery 已提交
1474
            end = int(round(_time() * 1000000))
B
barrierye 已提交
1475
            out_time = end - start
1476
            after_outchannel_time = int(round(_time() * 1000000))
B
barriery 已提交
1477
            if trace_buffer is not None:
B
barrierye 已提交
1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494
                trace_que.append({
                    "name": self.name,
                    "actions": {
                        "in": in_time,
                        "prep": prep_time,
                        "midp": midp_time,
                        "postp": postp_time,
                        "out": out_time,
                    }
                })
                while trace_que:
                    info = trace_que[0]
                    try:
                        trace_buffer.put_nowait(info)
                        trace_que.popleft()
                    except Queue.Full:
                        break
B
barriery 已提交
1495

W
wangjiawei04 已提交
1496
    def _initialize(self, is_thread_op, concurrency_idx):
1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
        """
        Initialize one OP object in the target function of a thread or porcess.
        Initialize the client object with _client_config and _server_endpoints.
        Create a TimeProfiler per thread or process for recording profiler info.

        Args:
            is_thread_op: True, one op runs in one thread; False, one op runs
                in one process.
            concurrency_idx: process id, Thread mode does not use this param.

        Returns:
            TimeProfiler
        """
B
barriery 已提交
1510 1511 1512 1513 1514 1515
        if is_thread_op:
            with self._for_init_op_lock:
                if not self._succ_init_op:
                    # for the threaded version of Op, each thread cannot get its concurrency_idx
                    self.concurrency_idx = None
                    # init client
W
wangjiawei04 已提交
1516
                    self.client = self.init_client(self._client_config,
W
wangjiawei04 已提交
1517
                                                   self._server_endpoints)
B
barriery 已提交
1518 1519 1520 1521
                    # user defined
                    self.init_op()
                    self._succ_init_op = True
                    self._succ_close_op = False
B
bug fix  
barriery 已提交
1522 1523 1524
        else:
            self.concurrency_idx = concurrency_idx
            # init client
W
wangjiawei04 已提交
1525 1526
            self.client = self.init_client(self._client_config,
                                           self._server_endpoints)
B
bug fix  
barriery 已提交
1527 1528
            # user defined
            self.init_op()
B
barriery 已提交
1529

B
barriery 已提交
1530 1531 1532 1533 1534
        # use a separate TimeProfiler per thread or process
        profiler = TimeProfiler()
        profiler.enable(True)
        return profiler

B
barriery 已提交
1535 1536 1537 1538 1539 1540 1541 1542
    def _finalize(self, is_thread_op):
        if is_thread_op:
            with self._for_close_op_lock:
                if not self._succ_close_op:
                    self._profiler = None
                    self.client = None
                    self._succ_init_op = False
                    self._succ_close_op = True
1543 1544 1545 1546 1547

    def _log(self, info):
        return "{} {}".format(self.name, info)


B
barrierye 已提交
1548
class RequestOp(Op):
1549 1550 1551 1552 1553 1554
    """
    RequestOp is a special Op, for unpacking one request package. If the
    request needs one special unpackaging method, you need to inherit class
    RequestOp and rewrite function unpack_request_package.Notice!!! Class
    RequestOp does not run preprocess, process, postprocess.
    """
B
barrierye 已提交
1555

B
barrierye 已提交
1556
    def __init__(self):
1557 1558 1559
        """
        Initialize the RequestOp
        """
B
barriery 已提交
1560 1561
        # PipelineService.name = "@DAGExecutor"
        super(RequestOp, self).__init__(name="@DAGExecutor", input_ops=[])
B
barrierye 已提交
1562
        # init op
1563
        try:
1564
            self.init_op()
1565
        except Exception as e:
B
barriery 已提交
1566
            _LOGGER.critical("Op(Request) Failed to init: {}".format(e))
1567
            os._exit(-1)
B
barrierye 已提交
1568

1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581
    def proto_tensor_2_numpy(self, tensor):
        """
        Convert proto tensor to numpy array, The supported types are as follows:
                INT64
                FP32
		INT32
		FP64
		INT16
		FP16
		BF16
		UINT8
		INT8
		BOOL
1582
                BYTES
1583
        Unsupported type:
1584
                STRING
1585 1586 1587 1588 1589 1590 1591
                COMPLEX64
                COMPLEX128

        Args:
            tensor: one tensor in request.tensors.

        Returns:
1592 1593
            np_data: np.ndnumpy, the tensor data is converted to numpy.
            lod_info: np.ndnumpy, lod info of the tensor data, None default.
1594 1595 1596 1597 1598 1599
        """
        if tensor is None or tensor.elem_type is None or tensor.name is None:
            _LOGGER.error("input params of tensor is wrong. tensor: {}".format(
                tensor))
            return None

1600
        # Set dim shape
1601 1602 1603 1604 1605 1606 1607
        dims = []
        if tensor.shape is None:
            dims.append(1)
        else:
            for one_dim in tensor.shape:
                dims.append(one_dim)

1608 1609 1610 1611 1612
        # Set up 2-d lod tensor
        np_lod = None
        if len(tensor.lod) > 0:
            np_lod = np.array(tensor.lod).astype(int32).reshape(2, -1)

1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646
        np_data = None
        _LOGGER.info("proto_to_numpy, name:{}, type:{}, dims:{}".format(
            tensor.name, tensor.elem_type, dims))
        if tensor.elem_type == 0:
            # VarType: INT64
            np_data = np.array(tensor.int64_data).astype(int64).reshape(dims)
        elif tensor.elem_type == 1:
            # VarType: FP32
            np_data = np.array(tensor.float_data).astype(float32).reshape(dims)
        elif tensor.elem_type == 2:
            # VarType: INT32
            np_data = np.array(tensor.int_data).astype(int32).reshape(dims)
        elif tensor.elem_type == 3:
            # VarType: FP64
            np_data = np.array(tensor.float64_data).astype(float64).reshape(
                dims)
        elif tensor.elem_type == 4:
            # VarType: INT16
            np_data = np.array(tensor.int_data).astype(int16).reshape(dims)
        elif tensor.elem_type == 5:
            # VarType: FP16
            np_data = np.array(tensor.float_data).astype(float16).reshape(dims)
        elif tensor.elem_type == 6:
            # VarType: BF16
            np_data = np.array(tensor.uint32_data).astype(uint16).reshape(dims)
        elif tensor.elem_type == 7:
            # VarType: UINT8
            np_data = np.array(tensor.uint32_data).astype(uint8).reshape(dims)
        elif tensor.elem_type == 8:
            # VarType: INT8
            np_data = np.array(tensor.int_data).astype(int8).reshape(dims)
        elif tensor.elem_type == 9:
            # VarType: BOOL
            np_data = np.array(tensor.bool_data).astype(bool).reshape(dims)
1647 1648 1649 1650
        elif tensor.elem_type == 13:
            # VarType: BYTES
            byte_data = BytesIO(tensor.byte_data)
            np_data = np.load(byte_data, allow_pickle=True)
1651 1652 1653 1654 1655 1656 1657
        else:
            _LOGGER.error("Sorry, the type {} of tensor {} is not supported.".
                          format(tensor.elem_type, tensor.name))
            raise ValueError(
                "Sorry, the type {} of tensor {} is not supported.".format(
                    tensor.elem_type, tensor.name))

1658
        return np_data, np_lod
1659

B
barrierye 已提交
1660
    def unpack_request_package(self, request):
T
TeslaZhao 已提交
1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678
        """
        Unpack request package by gateway.proto
        Args:
            request: HTTP body, JSON format

        Returns:
            dict_data: json fields in HTTP body
            log_id: log_id
            prod_errcode: None or ProductErrCode.SUCC.value default, otherwise,
                          product errores occured.It is handled in the same way
                          as exception.
            prod_errinfo: "" default 
        """
        dict_data = {}
        log_id = None
        if request is None:
            _LOGGER.critical("request is None")
            raise ValueError("request is None")
1679

1680
        # unpack key/value string list
1681
        for idx, key in enumerate(request.key):
1682
            dict_data[key] = request.value[idx]
T
TeslaZhao 已提交
1683
        log_id = request.logid
1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714

        # unpack proto.tensors data.
        for one_tensor in request.tensors:
            name = one_tensor.name
            elem_type = one_tensor.elem_type

            if one_tensor.name is None:
                _LOGGER.error("Tensor name is None.")
                raise ValueError("Tensor name is None.")

            numpy_dtype = _TENSOR_DTYPE_2_NUMPY_DATA_DTYPE.get(elem_type)
            if numpy_dtype is None:
                _LOGGER.error(
                    "elem_type:{} is dismatch in unpack_request_package.",
                    format(elem_type))
                raise ValueError("elem_type:{} error".format(elem_type))

            if numpy_dtype == "string":
                new_string = ""
                if one_tensor.str_data is None:
                    _LOGGER.error(
                        "str_data of tensor:{} is None, elem_type is {}.".
                        format(name, elem_type))
                    raise ValueError(
                        "str_data of tensor:{} is None, elem_type is {}.".
                        format(name, elem_type))
                for one_str in one_tensor.str_data:
                    new_string += one_str

                dict_data[name] = new_string
            else:
1715 1716 1717 1718
                np_data, np_lod = self.proto_tensor_2_numpy(one_tensor)
                dict_data[name] = np_data
                if np_lod is not None:
                    dict_data[name + ".lod"] = np_lod
1719

1720 1721 1722 1723
        _LOGGER.info("RequestOp unpack one request. log_id:{}, clientip:{} \
            name:{}, method:{}, time:{}"
                     .format(log_id, request.clientip, request.name,
                             request.method, time.time()))
T
TeslaZhao 已提交
1724 1725

        return dict_data, log_id, None, ""
B
barrierye 已提交
1726 1727 1728


class ResponseOp(Op):
1729 1730 1731 1732 1733 1734
    """ 
    ResponseOp is a special Op, for packing one response package. If the channeldata 
    needs a special packaging method, you need to inherit class ReponseOp and rewrite
    pack_response_package function. Notice!!! Class ResponseOp does not run preprocess,
    process, postprocess.
    """
B
barrierye 已提交
1735

B
barrierye 已提交
1736
    def __init__(self, input_ops):
1737 1738 1739
        """
        Initialize the ResponseOp
        """
B
barriery 已提交
1740 1741
        super(ResponseOp, self).__init__(
            name="@DAGExecutor", input_ops=input_ops)
1742

B
barrierye 已提交
1743
        # init op
1744
        try:
1745
            self.init_op()
1746
        except Exception as e:
B
barriery 已提交
1747 1748
            _LOGGER.critical("Op(ResponseOp) Failed to init: {}".format(
                e, exc_info=True))
1749
            os._exit(-1)
B
barrierye 已提交
1750

1751 1752 1753 1754 1755 1756
        # init ResponseOp
        self.is_pack_tensor = False

    def set_pack_format(self, isTensor=False):
        self.is_pack_tensor = isTensor

B
barrierye 已提交
1757
    def pack_response_package(self, channeldata):
T
TeslaZhao 已提交
1758
        """
1759 1760 1761 1762 1763 1764 1765 1766
        Getting channeldata from the last channel, packting the response 
        package serialized by protobuf.  

        Args:
            channeldata: Type ChannelData

        Returns:
            resp: pipeline_service_pb2.Response()
T
TeslaZhao 已提交
1767
        """
B
barrierye 已提交
1768
        resp = pipeline_service_pb2.Response()
T
TeslaZhao 已提交
1769 1770 1771
        error_code = channeldata.error_code
        error_info = ""
        if error_code == ChannelDataErrcode.OK.value:
1772
            # Framework level errors
B
barrierye 已提交
1773 1774 1775 1776
            if channeldata.datatype == ChannelDataType.CHANNEL_NPDATA.value:
                feed = channeldata.parse()
                # ndarray to string:
                # https://stackoverflow.com/questions/30167538/convert-a-numpy-ndarray-to-stringor-bytes-and-convert-it-back-to-numpy-ndarray
B
barrierye 已提交
1777
                np.set_printoptions(threshold=sys.maxsize)
B
barrierye 已提交
1778
                for name, var in feed.items():
1779 1780
                    resp.value.append(var.__repr__())
                    resp.key.append(name)
B
barrierye 已提交
1781 1782 1783 1784
            elif channeldata.datatype == ChannelDataType.DICT.value:
                feed = channeldata.parse()
                for name, var in feed.items():
                    if not isinstance(var, str):
T
TeslaZhao 已提交
1785 1786
                        error_code = ChannelDataErrcode.TYPE_ERROR.value
                        error_info = self._log(
B
barrierye 已提交
1787 1788
                            "fetch var type must be str({}).".format(
                                type(var)))
B
barriery 已提交
1789 1790
                        _LOGGER.error("(logid={}) Failed to pack RPC "
                                      "response package: {}".format(
W
wangjiawei04 已提交
1791
                                          channeldata.id, resp.err_msg))
B
barrierye 已提交
1792
                        break
1793 1794
                    resp.value.append(var)
                    resp.key.append(name)
B
barrierye 已提交
1795
            else:
T
TeslaZhao 已提交
1796 1797 1798
                error_code = ChannelDataErrcode.TYPE_ERROR.value
                error_info = self._log("error type({}) in datatype.".format(
                    channeldata.datatype))
B
barriery 已提交
1799
                _LOGGER.error("(logid={}) Failed to pack RPC response"
T
TeslaZhao 已提交
1800
                              " package: {}".format(channeldata.id, error_info))
B
barrierye 已提交
1801
        else:
1802
            # Product level errors
T
TeslaZhao 已提交
1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814
            error_info = channeldata.error_info
            if error_code == ChannelDataErrcode.PRODUCT_ERROR.value:
                #rewrite error_code when product errors occured
                error_code = channeldata.prod_error_code
                error_info = channeldata.prod_error_info

        # pack results
        if error_code is None:
            error_code = 0
        resp.err_no = error_code
        resp.err_msg = error_info

B
barrierye 已提交
1815
        return resp
1816 1817 1818


class VirtualOp(Op):
1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835
    """ 
    To connect 2 ops across levels in dag view, we create virtual ops
    between non-virtual ops, and transfer data only. For examples, 
    the pred ops of F are D & E.In the process of building DAG, we will
    create channels layer by layer according to dag views.Op F is not 
    in the next layer view of [B, E], so we will create a virtual OP 
    'V1' whose pred OP is E. And so on, we create two virtual op 'V2'
    and 'V3', Finally, we find the non-virtual op F. we create 4 channels
    among E, V1, V2, V3 and F, the producer of V1, V2, V3 and F is E.
    
        DAG: [A -> B -> C -> D -> F]
               \-> E ----------/

        DAG view: [[A], [B, E], [C], [D], [F]]
        BUILD DAG: [A -> B -> C -> D -> E -> F]
                     \-> E -> V1-> V2-> V3/
    """
1836 1837 1838

    def __init__(self, name, concurrency=1):
        super(VirtualOp, self).__init__(
B
barrierye 已提交
1839
            name=name, input_ops=None, concurrency=concurrency)
1840 1841 1842
        self._virtual_pred_ops = []

    def add_virtual_pred_op(self, op):
1843 1844 1845 1846 1847 1848 1849 1850 1851
        """
        Add the front op of current vritual op.
        
        Args:
            op: one op object, may be a virtual op or not.

        Returns:
            None
        """
1852 1853
        self._virtual_pred_ops.append(op)

B
barrierye 已提交
1854
    def _actual_pred_op_names(self, op):
1855 1856 1857 1858 1859 1860 1861 1862 1863
        """
        Recursively find the front op which is a non-virtual op.
   
        Args:
            op: one op object
            
        Returns:
            names: the name of non-virtual pred ops.
        """
B
barriery 已提交
1864
        # can use disjoint-set, but it's not necessary
B
barrierye 已提交
1865 1866 1867 1868 1869 1870 1871
        if not isinstance(op, VirtualOp):
            return [op.name]
        names = []
        for x in op._virtual_pred_ops:
            names.extend(self._actual_pred_op_names(x))
        return names

1872
    def add_output_channel(self, channel):
1873 1874 1875 1876 1877 1878 1879 1880 1881
        """
        Adding the output channel of non-virtual pred ops.

        Args:
            channel: one channel.
          
        Returns:
            None.
        """
1882
        if not isinstance(channel, (ThreadChannel, ProcessChannel)):
1883
            _LOGGER.critical(
B
barriery 已提交
1884 1885 1886
                self._log("Failed to add output_channel: output_channel"
                          " must be Channel type, not {}".format(
                              type(channel))))
1887
            os._exit(-1)
1888
        for op in self._virtual_pred_ops:
B
barrierye 已提交
1889 1890
            for op_name in self._actual_pred_op_names(op):
                channel.add_producer(op_name)
1891
        self._outputs.append(channel)
D
dongdaxiang 已提交
1892

1893
    def _run(self, concurrency_idx, input_channel, output_channels, client_type,
1894
             is_thread_op):
1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
        """
        The target function _run() only transfers data between OPs in one thread
        or process.

        Args:
            concurrency_idx: process id, not avaliable in thread mode.
            input_channel: input channel
            output_channels: output channels
            client_type: no use
            is_thread_op: True, thread mode; False, process mode

        Returns:
            None
        """
1909
        op_info_prefix = "[{}|{}]".format(self.name, concurrency_idx)
B
barrierye 已提交
1910 1911 1912
        log = get_log_func(op_info_prefix)
        tid = threading.current_thread().ident

1913 1914 1915 1916 1917 1918 1919
        batch_generator = self._auto_batching_generator(
            input_channel=input_channel,
            op_name=self.name,
            batch_size=1,
            timeout=None,
            log_func=log)

B
barrierye 已提交
1920 1921
        while True:
            try:
1922
                channeldata_dict_batch = next(batch_generator)
B
barrierye 已提交
1923
            except ChannelStopError:
B
barriery 已提交
1924
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
1925
                self._finalize(is_thread_op)
B
barrierye 已提交
1926
                break
D
dongdaxiang 已提交
1927

B
barrierye 已提交
1928
            try:
1929 1930 1931 1932
                for channeldata_dict in channeldata_dict_batch:
                    for name, data in channeldata_dict.items():
                        self._push_to_output_channels(
                            data, channels=output_channels, name=name)
B
barrierye 已提交
1933
            except ChannelStopError:
B
barriery 已提交
1934
                _LOGGER.debug("{} Stop.".format(op_info_prefix))
1935
                self._finalize(is_thread_op)
B
barrierye 已提交
1936
                break