operator.py 18.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
D
dongdaxiang 已提交
15

16 17 18 19 20 21 22
import threading
import multiprocessing
from paddle_serving_client import MultiLangClient, Client
from concurrent import futures
import logging
import func_timeout

B
barrierye 已提交
23
from .proto import pipeline_service_pb2
24
from .channel import ThreadChannel, ProcessChannel, ChannelDataEcode, ChannelData, ChannelDataType
B
barrierye 已提交
25
from .util import NameGenerator
26

B
barrierye 已提交
27
_LOGGER = logging.getLogger(__name__)
B
barrierye 已提交
28 29
_op_name_gen = NameGenerator("Op")

D
dongdaxiang 已提交
30 31 32

class Op(object):
    def __init__(self,
B
barrierye 已提交
33
                 name=None,
D
dongdaxiang 已提交
34 35
                 input_ops=[],
                 server_endpoints=[],
B
barrierye 已提交
36 37
                 fetch_list=[],
                 client_config=None,
D
dongdaxiang 已提交
38 39 40
                 concurrency=1,
                 timeout=-1,
                 retry=1):
B
barrierye 已提交
41
        if name is None:
B
barrierye 已提交
42
            name = _op_name_gen.next()
43 44
        self._is_run = False
        self.name = name  # to identify the type of OP, it must be globally unique
B
barrierye 已提交
45
        self.concurrency = concurrency  # amount of concurrency
B
barrierye 已提交
46
        self.set_input_ops(input_ops)
B
barrierye 已提交
47 48

        self._server_endpoints = server_endpoints
49
        self.with_serving = False
B
barrierye 已提交
50
        if len(self._server_endpoints) != 0:
51
            self.with_serving = True
B
barrierye 已提交
52 53 54
        self._client_config = client_config
        self._fetch_names = fetch_list

55 56 57 58 59 60 61 62 63 64 65 66 67 68
        self._timeout = timeout
        self._retry = max(1, retry)
        self._input = None
        self._outputs = []
        self._profiler = None

    def init_profiler(self, profiler):
        self._profiler = profiler

    def _profiler_record(self, string):
        if self._profiler is None:
            return
        self._profiler.record(string)

B
barrierye 已提交
69 70
    def init_client(self, client_type, client_config, server_endpoints,
                    fetch_names):
71
        if self.with_serving == False:
B
barrierye 已提交
72
            _LOGGER.debug("{} no client".format(self.name))
73
            return
B
barrierye 已提交
74 75
        _LOGGER.debug("{} client_config: {}".format(self.name, client_config))
        _LOGGER.debug("{} fetch_names: {}".format(self.name, fetch_names))
76 77
        if client_type == 'brpc':
            self._client = Client()
B
barrierye 已提交
78
            self._client.load_client_config(client_config)
79 80 81 82
        elif client_type == 'grpc':
            self._client = MultiLangClient()
        else:
            raise ValueError("unknow client type: {}".format(client_type))
B
barrierye 已提交
83
        self._client.connect(server_endpoints)
84 85
        self._fetch_names = fetch_names

B
barrierye 已提交
86
    def _get_input_channel(self):
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
        return self._input

    def get_input_ops(self):
        return self._input_ops

    def set_input_ops(self, ops):
        if not isinstance(ops, list):
            ops = [] if ops is None else [ops]
        self._input_ops = []
        for op in ops:
            if not isinstance(op, Op):
                raise TypeError(
                    self._log('input op must be Op type, not {}'.format(
                        type(op))))
            self._input_ops.append(op)
D
dongdaxiang 已提交
102

103 104 105 106 107 108 109
    def add_input_channel(self, channel):
        if not isinstance(channel, (ThreadChannel, ProcessChannel)):
            raise TypeError(
                self._log('input channel must be Channel type, not {}'.format(
                    type(channel))))
        channel.add_consumer(self.name)
        self._input = channel
D
dongdaxiang 已提交
110

B
barrierye 已提交
111
    def _get_output_channels(self):
112
        return self._outputs
D
dongdaxiang 已提交
113

114 115 116 117 118 119 120
    def add_output_channel(self, channel):
        if not isinstance(channel, (ThreadChannel, ProcessChannel)):
            raise TypeError(
                self._log('output channel must be Channel type, not {}'.format(
                    type(channel))))
        channel.add_producer(self.name)
        self._outputs.append(channel)
D
dongdaxiang 已提交
121

B
barrierye 已提交
122
    def preprocess(self, input_dicts):
B
barrierye 已提交
123
        # multiple previous Op
B
barrierye 已提交
124
        if len(input_dicts) != 1:
125
            raise NotImplementedError(
B
barrierye 已提交
126
                'this Op has multiple previous inputs. Please override this func.'
127
            )
D
dongdaxiang 已提交
128

B
barrierye 已提交
129 130
        (_, input_dict), = input_dicts.items()
        return input_dict
B
barrierye 已提交
131 132

    def process(self, feed_dict):
B
barrierye 已提交
133 134 135 136 137 138
        err, err_info = ChannelData.check_npdata(feed_dict)
        if err != 0:
            raise NotImplementedError(
                "{} Please override preprocess func.".format(err_info))
        _LOGGER.debug(self._log('feed_dict: {}'.format(feed_dict)))
        _LOGGER.debug(self._log('fetch: {}'.format(self._fetch_names)))
139 140
        if isinstance(self._client, MultiLangClient):
            call_result = self._client.predict(
B
barrierye 已提交
141
                feed=feed_dict, fetch=self._fetch_names)
B
barrierye 已提交
142
            _LOGGER.debug(self._log("get call_result"))
143 144
        else:
            call_result = self._client.predict(
B
barrierye 已提交
145
                feed=feed_dict, fetch=self._fetch_names)
B
barrierye 已提交
146
            _LOGGER.debug(self._log("get fetch_dict"))
147 148
        return call_result

B
barrierye 已提交
149 150
    def postprocess(self, fetch_dict):
        return fetch_dict
D
dongdaxiang 已提交
151 152

    def stop(self):
153 154 155 156 157
        self._input.stop()
        for channel in self._outputs:
            channel.stop()
        self._is_run = False

B
barrierye 已提交
158
    def _parse_channeldata(self, channeldata_dict):
159
        data_id, error_channeldata = None, None
B
barrierye 已提交
160 161 162 163 164 165 166 167 168 169 170
        parsed_data = {}

        key = list(channeldata_dict.keys())[0]
        data_id = channeldata_dict[key].id

        for name, data in channeldata_dict.items():
            if data.ecode != ChannelDataEcode.OK.value:
                error_channeldata = data
                break
            parsed_data[name] = data.parse()
        return data_id, error_channeldata, parsed_data
171 172 173 174 175 176 177

    def _push_to_output_channels(self, data, channels, name=None):
        if name is None:
            name = self.name
        for channel in channels:
            channel.push(data, name)

B
barrierye 已提交
178
    def start_with_process(self, client_type):
179
        proces = []
B
barrierye 已提交
180
        for concurrency_idx in range(self.concurrency):
181 182
            p = multiprocessing.Process(
                target=self._run,
B
barrierye 已提交
183 184
                args=(concurrency_idx, self._get_input_channel(),
                      self._get_output_channels(), client_type))
185 186 187 188
            p.start()
            proces.append(p)
        return proces

B
barrierye 已提交
189
    def start_with_thread(self, client_type):
190
        threads = []
B
barrierye 已提交
191
        for concurrency_idx in range(self.concurrency):
192 193
            t = threading.Thread(
                target=self._run,
B
barrierye 已提交
194 195
                args=(concurrency_idx, self._get_input_channel(),
                      self._get_output_channels(), client_type))
196 197 198 199
            t.start()
            threads.append(t)
        return threads

B
barrierye 已提交
200 201 202
    def load_user_resources(self):
        pass

203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
    def _run_preprocess(self, parsed_data, data_id, log_func):
        preped_data, error_channeldata = None, None
        try:
            preped_data = self.preprocess(parsed_data)
        except NotImplementedError as e:
            # preprocess function not implemented
            error_info = log_func(e)
            _LOGGER.error(error_info)
            error_channeldata = ChannelData(
                ecode=ChannelDataEcode.NOT_IMPLEMENTED.value,
                error_info=error_info,
                data_id=data_id)
        except TypeError as e:
            # Error type in channeldata.datatype
            error_info = log_func(e)
            _LOGGER.error(error_info)
            error_channeldata = ChannelData(
                ecode=ChannelDataEcode.TYPE_ERROR.value,
                error_info=error_info,
                data_id=data_id)
        except Exception as e:
            error_info = log_func(e)
            _LOGGER.error(error_info)
            error_channeldata = ChannelData(
                ecode=ChannelDataEcode.UNKNOW.value,
                error_info=error_info,
                data_id=data_id)
        return preped_data, error_channeldata

    def _run_process(self, preped_data, data_id, log_func):
        midped_data, error_channeldata = None, None
        if self.with_serving:
            ecode = ChannelDataEcode.OK.value
            if self._timeout <= 0:
                try:
                    midped_data = self.process(preped_data)
                except Exception as e:
                    ecode = ChannelDataEcode.UNKNOW.value
                    error_info = log_func(e)
                    _LOGGER.error(error_info)
            else:
                for i in range(self._retry):
                    try:
                        midped_data = func_timeout.func_timeout(
                            self._timeout, self.process, args=(preped_data, ))
                    except func_timeout.FunctionTimedOut as e:
                        if i + 1 >= self._retry:
                            ecode = ChannelDataEcode.TIMEOUT.value
                            error_info = log_func(e)
                            _LOGGER.error(error_info)
                        else:
                            _LOGGER.warn(
                                log_func("timeout, retry({})".format(i + 1)))
                    except Exception as e:
                        ecode = ChannelDataEcode.UNKNOW.value
                        error_info = log_func(e)
                        _LOGGER.error(error_info)
                        break
                    else:
                        break
            if ecode != ChannelDataEcode.OK.value:
                error_channeldata = ChannelData(
                    ecode=ecode, error_info=error_info, data_id=data_id)
            elif midped_data is None:
                # op client return None
                error_channeldata = ChannelData(
                    ecode=ChannelDataEcode.CLIENT_ERROR.value,
                    error_info=log_func(
                        "predict failed. pls check the server side."),
                    data_id=data_id)
        else:
            midped_data = preped_data
        return midped_data, error_channeldata

    def _run_postprocess(self, midped_data, data_id, log_func):
        output_data, error_channeldata = None, None
        try:
            postped_data = self.postprocess(midped_data)
        except Exception as e:
            error_info = log_func(e)
            _LOGGER.error(error_info)
            error_channeldata = ChannelData(
                ecode=ChannelDataEcode.UNKNOW.value,
                error_info=error_info,
                data_id=data_id)
            return output_data, error_channeldata

        if not isinstance(postped_data, dict):
            error_info = log_func("output of postprocess funticon must be " \
                    "dict type, but get {}".format(type(postped_data)))
            _LOGGER.error(error_info)
            error_channeldata = ChannelData(
                ecode=ChannelDataEcode.UNKNOW.value,
                error_info=error_info,
                data_id=data_id)
            return output_data, error_channeldata

        err, _ = ChannelData.check_npdata(postped_data)
        if err == 0:
            output_data = ChannelData(
                ChannelDataType.CHANNEL_NPDATA.value,
                npdata=postped_data,
                data_id=data_id)
        else:
            output_data = ChannelData(
                ChannelDataType.DICT.value,
                dictdata=postped_data,
                data_id=data_id)
        return output_data, error_channeldata

B
barrierye 已提交
313 314 315 316 317 318 319 320
    def _run(self, concurrency_idx, input_channel, output_channels,
             client_type):
        def get_log_func(op_info_prefix):
            def log_func(info_str):
                return "{} {}".format(op_info_prefix, info_str)

            return log_func

321
        op_info_prefix = "[{}|{}]".format(self.name, concurrency_idx)
B
barrierye 已提交
322
        log = get_log_func(op_info_prefix)
B
barrierye 已提交
323
        tid = threading.current_thread().ident
B
barrierye 已提交
324 325 326 327 328

        # create client based on client_type
        self.init_client(client_type, self._client_config,
                         self._server_endpoints, self._fetch_names)

B
barrierye 已提交
329 330 331
        # load user resources
        self.load_user_resources()

B
barrierye 已提交
332
        self._is_run = True
333
        while self._is_run:
B
barrierye 已提交
334
            self._profiler_record("{}-get#{}_0".format(op_info_prefix, tid))
B
barrierye 已提交
335
            channeldata_dict = input_channel.front(self.name)
B
barrierye 已提交
336
            self._profiler_record("{}-get#{}_1".format(op_info_prefix, tid))
B
barrierye 已提交
337
            _LOGGER.debug(log("input_data: {}".format(channeldata_dict)))
338

B
barrierye 已提交
339 340
            data_id, error_channeldata, parsed_data = self._parse_channeldata(
                channeldata_dict)
341 342 343 344 345 346 347
            # error data in predecessor Op
            if error_channeldata is not None:
                self._push_to_output_channels(error_channeldata,
                                              output_channels)
                continue

            # preprecess
348 349 350 351 352 353 354
            self._profiler_record("{}-prep#{}_0".format(op_info_prefix, tid))
            preped_data, error_channeldata = self._run_preprocess(parsed_data,
                                                                  data_id, log)
            self._profiler_record("{}-prep#{}_1".format(op_info_prefix, tid))
            if error_channeldata is not None:
                self._push_to_output_channels(error_channeldata,
                                              output_channels)
355 356
                continue

B
barrierye 已提交
357
            # process
358 359 360 361 362 363 364 365
            self._profiler_record("{}-midp#{}_0".format(op_info_prefix, tid))
            midped_data, error_channeldata = self._run_process(preped_data,
                                                               data_id, log)
            self._profiler_record("{}-midp#{}_1".format(op_info_prefix, tid))
            if error_channeldata is not None:
                self._push_to_output_channels(error_channeldata,
                                              output_channels)
                continue
366 367

            # postprocess
B
barrierye 已提交
368
            self._profiler_record("{}-postp#{}_0".format(op_info_prefix, tid))
369 370
            output_data, error_channeldata = self._run_postprocess(midped_data,
                                                                   data_id, log)
B
barrierye 已提交
371
            self._profiler_record("{}-postp#{}_1".format(op_info_prefix, tid))
372 373 374 375
            if error_channeldata is not None:
                self._push_to_output_channels(error_channeldata,
                                              output_channels)
                continue
376 377

            # push data to channel (if run succ)
B
barrierye 已提交
378
            self._profiler_record("{}-push#{}_0".format(op_info_prefix, tid))
379
            self._push_to_output_channels(output_data, output_channels)
B
barrierye 已提交
380
            self._profiler_record("{}-push#{}_1".format(op_info_prefix, tid))
381 382 383 384 385

    def _log(self, info):
        return "{} {}".format(self.name, info)


B
barrierye 已提交
386 387 388
class RequestOp(Op):
    """ RequestOp do not run preprocess, process, postprocess. """

B
barrierye 已提交
389 390
    def __init__(self, concurrency=1):
        # PipelineService.name = "#G"
B
barrierye 已提交
391
        super(RequestOp, self).__init__(
B
barrierye 已提交
392
            name="#G", input_ops=[], concurrency=concurrency)
B
barrierye 已提交
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
        # load user resources
        self.load_user_resources()

    def unpack_request_package(self, request):
        dictdata = {}
        for idx, key in enumerate(request.key):
            dictdata[key] = request.value[idx]
        return dictdata


class ResponseOp(Op):
    """ ResponseOp do not run preprocess, process, postprocess. """

    def __init__(self, input_ops, concurrency=1):
        super(ResponseOp, self).__init__(
            name="#R", input_ops=input_ops, concurrency=concurrency)
        # load user resources
        self.load_user_resources()

    def pack_response_package(self, channeldata):
        resp = pipeline_service_pb2.Response()
        resp.ecode = channeldata.ecode
        if resp.ecode == ChannelDataEcode.OK.value:
            if channeldata.datatype == ChannelDataType.CHANNEL_NPDATA.value:
                feed = channeldata.parse()
                # ndarray to string:
                # https://stackoverflow.com/questions/30167538/convert-a-numpy-ndarray-to-stringor-bytes-and-convert-it-back-to-numpy-ndarray
                for name, var in feed.items():
                    resp.value.append(var.__repr__())
                    resp.key.append(name)
            elif channeldata.datatype == ChannelDataType.DICT.value:
                feed = channeldata.parse()
                for name, var in feed.items():
                    if not isinstance(var, str):
                        resp.ecode = ChannelDataEcode.TYPE_ERROR.value
                        resp.error_info = self._log(
                            "fetch var type must be str({}).".format(
                                type(var)))
                        break
                    resp.value.append(var)
                    resp.key.append(name)
            else:
                resp.ecode = ChannelDataEcode.TYPE_ERROR.value
                resp.error_info = self._log(
                    "Error type({}) in datatype.".format(channeldata.datatype))
                _LOGGER.error(resp.error_info)
        else:
            resp.error_info = channeldata.error_info
        return resp
442 443 444 445 446 447 448


class VirtualOp(Op):
    ''' For connecting two channels. '''

    def __init__(self, name, concurrency=1):
        super(VirtualOp, self).__init__(
B
barrierye 已提交
449
            name=name, input_ops=None, concurrency=concurrency)
450 451 452 453 454 455 456 457 458 459 460 461 462
        self._virtual_pred_ops = []

    def add_virtual_pred_op(self, op):
        self._virtual_pred_ops.append(op)

    def add_output_channel(self, channel):
        if not isinstance(channel, (ThreadChannel, ProcessChannel)):
            raise TypeError(
                self._log('output channel must be Channel type, not {}'.format(
                    type(channel))))
        for op in self._virtual_pred_ops:
            channel.add_producer(op.name)
        self._outputs.append(channel)
D
dongdaxiang 已提交
463

B
barrierye 已提交
464 465 466 467 468 469 470 471
    def _run(self, concurrency_idx, input_channel, output_channels,
             client_type):
        def get_log_func(op_info_prefix):
            def log_func(info_str):
                return "{} {}".format(op_info_prefix, info_str)

            return log_func

472
        op_info_prefix = "[{}|{}]".format(self.name, concurrency_idx)
B
barrierye 已提交
473 474 475
        log = get_log_func(op_info_prefix)
        tid = threading.current_thread().ident

476 477
        self._is_run = True
        while self._is_run:
B
barrierye 已提交
478
            self._profiler_record("{}-get#{}_0".format(op_info_prefix, tid))
B
barrierye 已提交
479
            channeldata_dict = input_channel.front(self.name)
B
barrierye 已提交
480
            self._profiler_record("{}-get#{}_1".format(op_info_prefix, tid))
D
dongdaxiang 已提交
481

B
barrierye 已提交
482
            self._profiler_record("{}-push#{}_0".format(op_info_prefix, tid))
B
barrierye 已提交
483
            for name, data in channeldata_dict.items():
484
                self._push_to_output_channels(
B
barrierye 已提交
485
                    data, channels=output_channels, name=name)
B
barrierye 已提交
486
            self._profiler_record("{}-push#{}_1".format(op_info_prefix, tid))