operator.py 18.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
D
dongdaxiang 已提交
15

16 17 18 19 20 21
import threading
import multiprocessing
from paddle_serving_client import MultiLangClient, Client
from concurrent import futures
import logging
import func_timeout
B
barrierye 已提交
22
from numpy import *
23

B
barrierye 已提交
24
from .proto import pipeline_service_pb2
25
from .channel import ThreadChannel, ProcessChannel, ChannelDataEcode, ChannelData, ChannelDataType
B
barrierye 已提交
26
from .util import NameGenerator
27

B
barrierye 已提交
28
_LOGGER = logging.getLogger(__name__)
B
barrierye 已提交
29 30
_op_name_gen = NameGenerator("Op")

D
dongdaxiang 已提交
31 32 33

class Op(object):
    def __init__(self,
B
barrierye 已提交
34
                 name=None,
D
dongdaxiang 已提交
35 36
                 input_ops=[],
                 server_endpoints=[],
B
barrierye 已提交
37 38
                 fetch_list=[],
                 client_config=None,
D
dongdaxiang 已提交
39 40 41
                 concurrency=1,
                 timeout=-1,
                 retry=1):
B
barrierye 已提交
42
        if name is None:
B
barrierye 已提交
43
            name = _op_name_gen.next()
44 45
        self._is_run = False
        self.name = name  # to identify the type of OP, it must be globally unique
B
barrierye 已提交
46
        self.concurrency = concurrency  # amount of concurrency
B
barrierye 已提交
47
        self.set_input_ops(input_ops)
B
barrierye 已提交
48 49

        self._server_endpoints = server_endpoints
50
        self.with_serving = False
B
barrierye 已提交
51
        if len(self._server_endpoints) != 0:
52
            self.with_serving = True
B
barrierye 已提交
53 54 55
        self._client_config = client_config
        self._fetch_names = fetch_list

56 57 58 59 60 61 62 63 64 65 66 67 68 69
        self._timeout = timeout
        self._retry = max(1, retry)
        self._input = None
        self._outputs = []
        self._profiler = None

    def init_profiler(self, profiler):
        self._profiler = profiler

    def _profiler_record(self, string):
        if self._profiler is None:
            return
        self._profiler.record(string)

B
barrierye 已提交
70 71
    def init_client(self, client_type, client_config, server_endpoints,
                    fetch_names):
72
        if self.with_serving == False:
B
barrierye 已提交
73
            _LOGGER.debug("{} no client".format(self.name))
74
            return
B
barrierye 已提交
75 76
        _LOGGER.debug("{} client_config: {}".format(self.name, client_config))
        _LOGGER.debug("{} fetch_names: {}".format(self.name, fetch_names))
77 78
        if client_type == 'brpc':
            self._client = Client()
B
barrierye 已提交
79
            self._client.load_client_config(client_config)
80 81 82 83
        elif client_type == 'grpc':
            self._client = MultiLangClient()
        else:
            raise ValueError("unknow client type: {}".format(client_type))
B
barrierye 已提交
84
        self._client.connect(server_endpoints)
85 86
        self._fetch_names = fetch_names

B
barrierye 已提交
87
    def _get_input_channel(self):
88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
        return self._input

    def get_input_ops(self):
        return self._input_ops

    def set_input_ops(self, ops):
        if not isinstance(ops, list):
            ops = [] if ops is None else [ops]
        self._input_ops = []
        for op in ops:
            if not isinstance(op, Op):
                raise TypeError(
                    self._log('input op must be Op type, not {}'.format(
                        type(op))))
            self._input_ops.append(op)
D
dongdaxiang 已提交
103

104 105 106 107 108 109 110
    def add_input_channel(self, channel):
        if not isinstance(channel, (ThreadChannel, ProcessChannel)):
            raise TypeError(
                self._log('input channel must be Channel type, not {}'.format(
                    type(channel))))
        channel.add_consumer(self.name)
        self._input = channel
D
dongdaxiang 已提交
111

B
barrierye 已提交
112
    def _get_output_channels(self):
113
        return self._outputs
D
dongdaxiang 已提交
114

115 116 117 118 119 120 121
    def add_output_channel(self, channel):
        if not isinstance(channel, (ThreadChannel, ProcessChannel)):
            raise TypeError(
                self._log('output channel must be Channel type, not {}'.format(
                    type(channel))))
        channel.add_producer(self.name)
        self._outputs.append(channel)
D
dongdaxiang 已提交
122

B
barrierye 已提交
123
    def preprocess(self, input_dicts):
B
barrierye 已提交
124
        # multiple previous Op
B
barrierye 已提交
125
        if len(input_dicts) != 1:
126
            raise NotImplementedError(
B
barrierye 已提交
127
                'this Op has multiple previous inputs. Please override this func.'
128
            )
D
dongdaxiang 已提交
129

B
barrierye 已提交
130 131
        (_, input_dict), = input_dicts.items()
        return input_dict
B
barrierye 已提交
132 133

    def process(self, feed_dict):
B
barrierye 已提交
134 135 136 137 138 139
        err, err_info = ChannelData.check_npdata(feed_dict)
        if err != 0:
            raise NotImplementedError(
                "{} Please override preprocess func.".format(err_info))
        _LOGGER.debug(self._log('feed_dict: {}'.format(feed_dict)))
        _LOGGER.debug(self._log('fetch: {}'.format(self._fetch_names)))
B
barrierye 已提交
140 141 142
        call_result = self._client.predict(
            feed=feed_dict, fetch=self._fetch_names)
        _LOGGER.debug(self._log("get call_result"))
143 144
        return call_result

B
barrierye 已提交
145 146
    def postprocess(self, fetch_dict):
        return fetch_dict
D
dongdaxiang 已提交
147 148

    def stop(self):
149 150
        self._is_run = False

B
barrierye 已提交
151
    def _parse_channeldata(self, channeldata_dict):
152
        data_id, error_channeldata = None, None
B
barrierye 已提交
153 154 155 156 157 158 159 160 161 162 163
        parsed_data = {}

        key = list(channeldata_dict.keys())[0]
        data_id = channeldata_dict[key].id

        for name, data in channeldata_dict.items():
            if data.ecode != ChannelDataEcode.OK.value:
                error_channeldata = data
                break
            parsed_data[name] = data.parse()
        return data_id, error_channeldata, parsed_data
164 165 166 167 168 169 170

    def _push_to_output_channels(self, data, channels, name=None):
        if name is None:
            name = self.name
        for channel in channels:
            channel.push(data, name)

B
barrierye 已提交
171
    def start_with_process(self, client_type):
172
        proces = []
B
barrierye 已提交
173
        for concurrency_idx in range(self.concurrency):
174 175
            p = multiprocessing.Process(
                target=self._run,
B
barrierye 已提交
176 177
                args=(concurrency_idx, self._get_input_channel(),
                      self._get_output_channels(), client_type))
178 179 180 181
            p.start()
            proces.append(p)
        return proces

B
barrierye 已提交
182
    def start_with_thread(self, client_type):
183
        threads = []
B
barrierye 已提交
184
        for concurrency_idx in range(self.concurrency):
185 186
            t = threading.Thread(
                target=self._run,
B
barrierye 已提交
187 188
                args=(concurrency_idx, self._get_input_channel(),
                      self._get_output_channels(), client_type))
189 190 191 192
            t.start()
            threads.append(t)
        return threads

B
barrierye 已提交
193 194 195
    def load_user_resources(self):
        pass

196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
    def _run_preprocess(self, parsed_data, data_id, log_func):
        preped_data, error_channeldata = None, None
        try:
            preped_data = self.preprocess(parsed_data)
        except NotImplementedError as e:
            # preprocess function not implemented
            error_info = log_func(e)
            _LOGGER.error(error_info)
            error_channeldata = ChannelData(
                ecode=ChannelDataEcode.NOT_IMPLEMENTED.value,
                error_info=error_info,
                data_id=data_id)
        except TypeError as e:
            # Error type in channeldata.datatype
            error_info = log_func(e)
            _LOGGER.error(error_info)
            error_channeldata = ChannelData(
                ecode=ChannelDataEcode.TYPE_ERROR.value,
                error_info=error_info,
                data_id=data_id)
        except Exception as e:
            error_info = log_func(e)
            _LOGGER.error(error_info)
            error_channeldata = ChannelData(
                ecode=ChannelDataEcode.UNKNOW.value,
                error_info=error_info,
                data_id=data_id)
        return preped_data, error_channeldata

    def _run_process(self, preped_data, data_id, log_func):
        midped_data, error_channeldata = None, None
        if self.with_serving:
            ecode = ChannelDataEcode.OK.value
            if self._timeout <= 0:
                try:
                    midped_data = self.process(preped_data)
                except Exception as e:
                    ecode = ChannelDataEcode.UNKNOW.value
                    error_info = log_func(e)
                    _LOGGER.error(error_info)
            else:
                for i in range(self._retry):
                    try:
                        midped_data = func_timeout.func_timeout(
                            self._timeout, self.process, args=(preped_data, ))
                    except func_timeout.FunctionTimedOut as e:
                        if i + 1 >= self._retry:
                            ecode = ChannelDataEcode.TIMEOUT.value
                            error_info = log_func(e)
                            _LOGGER.error(error_info)
                        else:
                            _LOGGER.warn(
                                log_func("timeout, retry({})".format(i + 1)))
                    except Exception as e:
                        ecode = ChannelDataEcode.UNKNOW.value
                        error_info = log_func(e)
                        _LOGGER.error(error_info)
                        break
                    else:
                        break
            if ecode != ChannelDataEcode.OK.value:
                error_channeldata = ChannelData(
                    ecode=ecode, error_info=error_info, data_id=data_id)
            elif midped_data is None:
                # op client return None
                error_channeldata = ChannelData(
                    ecode=ChannelDataEcode.CLIENT_ERROR.value,
                    error_info=log_func(
                        "predict failed. pls check the server side."),
                    data_id=data_id)
        else:
            midped_data = preped_data
        return midped_data, error_channeldata

    def _run_postprocess(self, midped_data, data_id, log_func):
        output_data, error_channeldata = None, None
        try:
            postped_data = self.postprocess(midped_data)
        except Exception as e:
            error_info = log_func(e)
            _LOGGER.error(error_info)
            error_channeldata = ChannelData(
                ecode=ChannelDataEcode.UNKNOW.value,
                error_info=error_info,
                data_id=data_id)
            return output_data, error_channeldata

        if not isinstance(postped_data, dict):
            error_info = log_func("output of postprocess funticon must be " \
                    "dict type, but get {}".format(type(postped_data)))
            _LOGGER.error(error_info)
            error_channeldata = ChannelData(
                ecode=ChannelDataEcode.UNKNOW.value,
                error_info=error_info,
                data_id=data_id)
            return output_data, error_channeldata

        err, _ = ChannelData.check_npdata(postped_data)
        if err == 0:
            output_data = ChannelData(
                ChannelDataType.CHANNEL_NPDATA.value,
                npdata=postped_data,
                data_id=data_id)
        else:
            output_data = ChannelData(
                ChannelDataType.DICT.value,
                dictdata=postped_data,
                data_id=data_id)
        return output_data, error_channeldata

B
barrierye 已提交
306 307 308 309 310 311 312 313
    def _run(self, concurrency_idx, input_channel, output_channels,
             client_type):
        def get_log_func(op_info_prefix):
            def log_func(info_str):
                return "{} {}".format(op_info_prefix, info_str)

            return log_func

314
        op_info_prefix = "[{}|{}]".format(self.name, concurrency_idx)
B
barrierye 已提交
315
        log = get_log_func(op_info_prefix)
B
barrierye 已提交
316
        tid = threading.current_thread().ident
B
barrierye 已提交
317 318 319 320 321

        # create client based on client_type
        self.init_client(client_type, self._client_config,
                         self._server_endpoints, self._fetch_names)

B
barrierye 已提交
322 323 324
        # load user resources
        self.load_user_resources()

B
barrierye 已提交
325
        self._is_run = True
326
        while self._is_run:
B
barrierye 已提交
327
            self._profiler_record("{}-get#{}_0".format(op_info_prefix, tid))
B
barrierye 已提交
328
            channeldata_dict = input_channel.front(self.name)
B
barrierye 已提交
329
            self._profiler_record("{}-get#{}_1".format(op_info_prefix, tid))
B
barrierye 已提交
330
            _LOGGER.debug(log("input_data: {}".format(channeldata_dict)))
331

B
barrierye 已提交
332 333
            data_id, error_channeldata, parsed_data = self._parse_channeldata(
                channeldata_dict)
334 335 336 337 338 339 340
            # error data in predecessor Op
            if error_channeldata is not None:
                self._push_to_output_channels(error_channeldata,
                                              output_channels)
                continue

            # preprecess
341 342 343 344 345 346 347
            self._profiler_record("{}-prep#{}_0".format(op_info_prefix, tid))
            preped_data, error_channeldata = self._run_preprocess(parsed_data,
                                                                  data_id, log)
            self._profiler_record("{}-prep#{}_1".format(op_info_prefix, tid))
            if error_channeldata is not None:
                self._push_to_output_channels(error_channeldata,
                                              output_channels)
348 349
                continue

B
barrierye 已提交
350
            # process
351 352 353 354 355 356 357 358
            self._profiler_record("{}-midp#{}_0".format(op_info_prefix, tid))
            midped_data, error_channeldata = self._run_process(preped_data,
                                                               data_id, log)
            self._profiler_record("{}-midp#{}_1".format(op_info_prefix, tid))
            if error_channeldata is not None:
                self._push_to_output_channels(error_channeldata,
                                              output_channels)
                continue
359 360

            # postprocess
B
barrierye 已提交
361
            self._profiler_record("{}-postp#{}_0".format(op_info_prefix, tid))
362 363
            output_data, error_channeldata = self._run_postprocess(midped_data,
                                                                   data_id, log)
B
barrierye 已提交
364
            self._profiler_record("{}-postp#{}_1".format(op_info_prefix, tid))
365 366 367 368
            if error_channeldata is not None:
                self._push_to_output_channels(error_channeldata,
                                              output_channels)
                continue
369 370

            # push data to channel (if run succ)
B
barrierye 已提交
371
            self._profiler_record("{}-push#{}_0".format(op_info_prefix, tid))
372
            self._push_to_output_channels(output_data, output_channels)
B
barrierye 已提交
373
            self._profiler_record("{}-push#{}_1".format(op_info_prefix, tid))
374 375 376 377 378

    def _log(self, info):
        return "{} {}".format(self.name, info)


B
barrierye 已提交
379 380 381
class RequestOp(Op):
    """ RequestOp do not run preprocess, process, postprocess. """

B
barrierye 已提交
382 383
    def __init__(self, concurrency=1):
        # PipelineService.name = "#G"
B
barrierye 已提交
384
        super(RequestOp, self).__init__(
B
barrierye 已提交
385
            name="#G", input_ops=[], concurrency=concurrency)
B
barrierye 已提交
386 387 388 389 390 391
        # load user resources
        self.load_user_resources()

    def unpack_request_package(self, request):
        dictdata = {}
        for idx, key in enumerate(request.key):
B
barrierye 已提交
392 393 394 395 396 397
            data = request.value[idx]
            try:
                data = eval(data)
            except Exception as e:
                pass
            dictdata[key] = data
B
barrierye 已提交
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
        return dictdata


class ResponseOp(Op):
    """ ResponseOp do not run preprocess, process, postprocess. """

    def __init__(self, input_ops, concurrency=1):
        super(ResponseOp, self).__init__(
            name="#R", input_ops=input_ops, concurrency=concurrency)
        # load user resources
        self.load_user_resources()

    def pack_response_package(self, channeldata):
        resp = pipeline_service_pb2.Response()
        resp.ecode = channeldata.ecode
        if resp.ecode == ChannelDataEcode.OK.value:
            if channeldata.datatype == ChannelDataType.CHANNEL_NPDATA.value:
                feed = channeldata.parse()
                # ndarray to string:
                # https://stackoverflow.com/questions/30167538/convert-a-numpy-ndarray-to-stringor-bytes-and-convert-it-back-to-numpy-ndarray
                for name, var in feed.items():
                    resp.value.append(var.__repr__())
                    resp.key.append(name)
            elif channeldata.datatype == ChannelDataType.DICT.value:
                feed = channeldata.parse()
                for name, var in feed.items():
                    if not isinstance(var, str):
                        resp.ecode = ChannelDataEcode.TYPE_ERROR.value
                        resp.error_info = self._log(
                            "fetch var type must be str({}).".format(
                                type(var)))
                        break
                    resp.value.append(var)
                    resp.key.append(name)
            else:
                resp.ecode = ChannelDataEcode.TYPE_ERROR.value
                resp.error_info = self._log(
                    "Error type({}) in datatype.".format(channeldata.datatype))
                _LOGGER.error(resp.error_info)
        else:
            resp.error_info = channeldata.error_info
        return resp
440 441 442 443 444 445 446


class VirtualOp(Op):
    ''' For connecting two channels. '''

    def __init__(self, name, concurrency=1):
        super(VirtualOp, self).__init__(
B
barrierye 已提交
447
            name=name, input_ops=None, concurrency=concurrency)
448 449 450 451 452 453 454 455 456 457 458 459 460
        self._virtual_pred_ops = []

    def add_virtual_pred_op(self, op):
        self._virtual_pred_ops.append(op)

    def add_output_channel(self, channel):
        if not isinstance(channel, (ThreadChannel, ProcessChannel)):
            raise TypeError(
                self._log('output channel must be Channel type, not {}'.format(
                    type(channel))))
        for op in self._virtual_pred_ops:
            channel.add_producer(op.name)
        self._outputs.append(channel)
D
dongdaxiang 已提交
461

B
barrierye 已提交
462 463 464 465 466 467 468 469
    def _run(self, concurrency_idx, input_channel, output_channels,
             client_type):
        def get_log_func(op_info_prefix):
            def log_func(info_str):
                return "{} {}".format(op_info_prefix, info_str)

            return log_func

470
        op_info_prefix = "[{}|{}]".format(self.name, concurrency_idx)
B
barrierye 已提交
471 472 473
        log = get_log_func(op_info_prefix)
        tid = threading.current_thread().ident

474 475
        self._is_run = True
        while self._is_run:
B
barrierye 已提交
476
            self._profiler_record("{}-get#{}_0".format(op_info_prefix, tid))
B
barrierye 已提交
477
            channeldata_dict = input_channel.front(self.name)
B
barrierye 已提交
478
            self._profiler_record("{}-get#{}_1".format(op_info_prefix, tid))
D
dongdaxiang 已提交
479

B
barrierye 已提交
480
            self._profiler_record("{}-push#{}_0".format(op_info_prefix, tid))
B
barrierye 已提交
481
            for name, data in channeldata_dict.items():
482
                self._push_to_output_channels(
B
barrierye 已提交
483
                    data, channels=output_channels, name=name)
B
barrierye 已提交
484
            self._profiler_record("{}-push#{}_1".format(op_info_prefix, tid))