dag.py 18.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=doc-string-missing
import threading
import multiprocessing
import sys
B
barrierye 已提交
18
import copy
19 20 21 22 23 24 25 26 27 28 29
if sys.version_info.major == 2:
    import Queue
elif sys.version_info.major == 3:
    import queue as Queue
else:
    raise Exception("Error Python version")
import os
import logging

from .operator import Op, RequestOp, ResponseOp, VirtualOp
from .channel import ThreadChannel, ProcessChannel, ChannelData, ChannelDataEcode, ChannelDataType
B
barrierye 已提交
30
from .profiler import TimeProfiler
31 32 33 34 35 36
from .util import NameGenerator

_LOGGER = logging.getLogger()


class DAGExecutor(object):
B
barrierye 已提交
37
    def __init__(self, response_op, yml_config, show_info):
B
barrierye 已提交
38 39 40 41 42 43
        self._retry = yml_config.get('retry', 1)

        client_type = yml_config.get('client_type', 'brpc')
        use_multithread = yml_config.get('use_multithread', True)
        use_profile = yml_config.get('profile', False)
        channel_size = yml_config.get('channel_size', 0)
B
barrierye 已提交
44
        self._asyn_profile = yml_config.get('asyn_profile', False)
B
barrierye 已提交
45

B
barrierye 已提交
46 47
        if show_info and use_profile:
            _LOGGER.info("================= PROFILER ================")
B
barrierye 已提交
48 49 50 51 52
            if use_multithread:
                _LOGGER.info("op: thread")
            else:
                _LOGGER.info("op: process")
            if self._asyn_profile:
B
barrierye 已提交
53 54
                _LOGGER.info("profile mode: asyn (This mode is only used"
                             " when using the process version Op)")
B
barrierye 已提交
55 56
            else:
                _LOGGER.info("profile mode: sync")
B
barrierye 已提交
57
            _LOGGER.info("-------------------------------------------")
B
barrierye 已提交
58

B
barrierye 已提交
59
        self.name = "@G"
B
barrierye 已提交
60 61 62
        self._profiler = TimeProfiler()
        self._profiler.enable(use_profile)

B
barrierye 已提交
63 64
        self._dag = DAG(self.name, response_op, use_profile, use_multithread,
                        client_type, channel_size, show_info)
B
barrierye 已提交
65 66
        (in_channel, out_channel, pack_rpc_func,
         unpack_rpc_func) = self._dag.build()
67 68 69
        self._dag.start()

        self._set_in_channel(in_channel)
70
        self._set_out_channel(out_channel)
71 72 73 74 75 76 77 78 79
        self._pack_rpc_func = pack_rpc_func
        self._unpack_rpc_func = unpack_rpc_func

        _LOGGER.debug(self._log(in_channel.debug()))
        _LOGGER.debug(self._log(out_channel.debug()))

        self._id_lock = threading.Lock()
        self._id_counter = 0
        self._reset_max_id = 1000000000000000000
B
barrierye 已提交
80 81 82
        self._cv_pool = {}
        self._cv_for_cv_pool = threading.Condition()
        self._fetch_buffer = None
83 84 85 86
        self._is_run = False
        self._recive_func = None

    def start(self):
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
        self._is_run = True
        self._recive_func = threading.Thread(
            target=DAGExecutor._recive_out_channel_func, args=(self, ))
        self._recive_func.start()

    def stop(self):
        self._is_run = False
        self._dag.stop()
        self._dag.join()

    def _get_next_data_id(self):
        with self._id_lock:
            if self._id_counter >= self._reset_max_id:
                self._id_counter -= self._reset_max_id
            self._id_counter += 1
            return self._id_counter - 1

    def _set_in_channel(self, in_channel):
        if not isinstance(in_channel, (ThreadChannel, ProcessChannel)):
            raise TypeError(
                self._log('in_channel must be Channel type, but get {}'.format(
                    type(in_channel))))
        in_channel.add_producer(self.name)
        self._in_channel = in_channel

    def _set_out_channel(self, out_channel):
        if not isinstance(out_channel, (ThreadChannel, ProcessChannel)):
            raise TypeError(
                self._log('out_channel must be Channel type, but get {}'.format(
                    type(out_channel))))
        out_channel.add_consumer(self.name)
        self._out_channel = out_channel

    def _recive_out_channel_func(self):
B
barrierye 已提交
121
        cv = None
122 123 124 125 126 127 128 129 130 131
        while self._is_run:
            channeldata_dict = self._out_channel.front(self.name)
            if len(channeldata_dict) != 1:
                _LOGGER.error("out_channel cannot have multiple input ops")
                os._exit(-1)
            (_, channeldata), = channeldata_dict.items()
            if not isinstance(channeldata, ChannelData):
                raise TypeError(
                    self._log('data must be ChannelData type, but get {}'.
                              format(type(channeldata))))
B
barrierye 已提交
132 133 134 135 136 137 138 139

            data_id = channeldata.id
            _LOGGER.debug("recive thread fetch data: {}".format(data_id))
            with self._cv_for_cv_pool:
                cv = self._cv_pool[data_id]
            with cv:
                self._fetch_buffer = channeldata
                cv.notify_all()
140 141 142

    def _get_channeldata_from_fetch_buffer(self, data_id):
        resp = None
B
barrierye 已提交
143 144 145 146 147 148 149 150
        cv = threading.Condition()
        with self._cv_for_cv_pool:
            self._cv_pool[data_id] = cv
        with cv:
            cv.wait()
            _LOGGER.debug("resp func get lock (data_id: {})".format(data_id))
            resp = copy.deepcopy(self._fetch_buffer)
            # cv.notify_all()
B
barrierye 已提交
151 152
        with self._cv_for_cv_pool:
            self._cv_pool.pop(data_id)
153 154
        return resp

B
barrierye 已提交
155
    def _pack_channeldata(self, rpc_request, data_id):
156 157 158 159 160 161 162 163
        _LOGGER.debug(self._log('start inferce'))
        dictdata = None
        try:
            dictdata = self._unpack_rpc_func(rpc_request)
        except Exception as e:
            return ChannelData(
                ecode=ChannelDataEcode.RPC_PACKAGE_ERROR.value,
                error_info="rpc package error: {}".format(e),
B
barrierye 已提交
164
                data_id=data_id)
165 166 167 168
        else:
            return ChannelData(
                datatype=ChannelDataType.DICT.value,
                dictdata=dictdata,
B
barrierye 已提交
169
                data_id=data_id)
170 171

    def call(self, rpc_request):
B
barrierye 已提交
172 173 174 175 176
        data_id = self._get_next_data_id()
        if self._asyn_profile:
            self._profiler.record("call_{}#DAG-{}_0".format(data_id, data_id))
        else:
            self._profiler.record("call_{}#DAG_0".format(data_id))
B
barrierye 已提交
177

B
barrierye 已提交
178 179 180
        self._profiler.record("prepack_{}#{}_0".format(data_id, self.name))
        req_channeldata = self._pack_channeldata(rpc_request, data_id)
        self._profiler.record("prepack_{}#{}_1".format(data_id, self.name))
181 182 183 184

        resp_channeldata = None
        for i in range(self._retry):
            _LOGGER.debug(self._log('push data'))
B
barrierye 已提交
185
            #self._profiler.record("push_{}#{}_0".format(data_id, self.name))
186
            self._in_channel.push(req_channeldata, self.name)
B
barrierye 已提交
187
            #self._profiler.record("push_{}#{}_1".format(data_id, self.name))
188 189

            _LOGGER.debug(self._log('wait for infer'))
B
barrierye 已提交
190
            #self._profiler.record("fetch_{}#{}_0".format(data_id, self.name))
191
            resp_channeldata = self._get_channeldata_from_fetch_buffer(data_id)
B
barrierye 已提交
192
            #self._profiler.record("fetch_{}#{}_1".format(data_id, self.name))
193 194 195 196 197 198 199

            if resp_channeldata.ecode == ChannelDataEcode.OK.value:
                break
            if i + 1 < self._retry:
                _LOGGER.warn("retry({}): {}".format(
                    i + 1, resp_channeldata.error_info))

B
barrierye 已提交
200
        self._profiler.record("postpack_{}#{}_0".format(data_id, self.name))
201
        rpc_resp = self._pack_for_rpc_resp(resp_channeldata)
B
barrierye 已提交
202
        self._profiler.record("postpack_{}#{}_1".format(data_id, self.name))
B
barrierye 已提交
203

B
barrierye 已提交
204 205 206 207
        if self._asyn_profile:
            self._profiler.record("call_{}#DAG-{}_1".format(data_id, data_id))
        else:
            self._profiler.record("call_{}#DAG_1".format(data_id))
208 209 210 211 212 213 214 215 216 217 218 219
        self._profiler.print_profile()
        return rpc_resp

    def _pack_for_rpc_resp(self, channeldata):
        _LOGGER.debug(self._log('get channeldata'))
        return self._pack_rpc_func(channeldata)

    def _log(self, info_str):
        return "[{}] {}".format(self.name, info_str)


class DAG(object):
B
barrierye 已提交
220 221 222
    def __init__(self, request_name, response_op, use_profile, use_multithread,
                 client_type, channel_size, show_info):
        self._request_name = request_name
223
        self._response_op = response_op
B
barrierye 已提交
224
        self._use_profile = use_profile
225 226 227
        self._use_multithread = use_multithread
        self._channel_size = channel_size
        self._client_type = client_type
B
barrierye 已提交
228
        self._show_info = show_info
229 230 231 232 233
        if not self._use_multithread:
            self._manager = multiprocessing.Manager()

    def get_use_ops(self, response_op):
        unique_names = set()
234
        used_ops = set()
235 236 237 238 239 240 241 242 243 244
        succ_ops_of_use_op = {}  # {op_name: succ_ops}
        que = Queue.Queue()
        que.put(response_op)
        while que.qsize() != 0:
            op = que.get()
            for pred_op in op.get_input_ops():
                if pred_op.name not in succ_ops_of_use_op:
                    succ_ops_of_use_op[pred_op.name] = []
                if op != response_op:
                    succ_ops_of_use_op[pred_op.name].append(op)
245
                if pred_op not in used_ops:
246
                    que.put(pred_op)
247
                    used_ops.add(pred_op)
248 249 250 251 252
                    # check the name of op is globally unique
                    if pred_op.name in unique_names:
                        raise Exception("the name of Op must be unique: {}".
                                        format(pred_op.name))
                    unique_names.add(pred_op.name)
253
        return used_ops, succ_ops_of_use_op
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275

    def _gen_channel(self, name_gen):
        channel = None
        if self._use_multithread:
            channel = ThreadChannel(
                name=name_gen.next(), maxsize=self._channel_size)
        else:
            channel = ProcessChannel(
                self._manager, name=name_gen.next(), maxsize=self._channel_size)
        return channel

    def _gen_virtual_op(self, name_gen):
        return VirtualOp(name=name_gen.next())

    def _topo_sort(self, used_ops, response_op, out_degree_ops):
        out_degree_num = {
            name: len(ops)
            for name, ops in out_degree_ops.items()
        }
        que_idx = 0  # scroll queue 
        ques = [Queue.Queue() for _ in range(2)]
        zero_indegree_num = 0
276
        for op in used_ops:
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302
            if len(op.get_input_ops()) == 0:
                zero_indegree_num += 1
        if zero_indegree_num != 1:
            raise Exception("DAG contains multiple input Ops")
        last_op = response_op.get_input_ops()[0]
        ques[que_idx].put(last_op)

        # topo sort to get dag_views
        dag_views = []
        sorted_op_num = 0
        while True:
            que = ques[que_idx]
            next_que = ques[(que_idx + 1) % 2]
            dag_view = []
            while que.qsize() != 0:
                op = que.get()
                dag_view.append(op)
                sorted_op_num += 1
                for pred_op in op.get_input_ops():
                    out_degree_num[pred_op.name] -= 1
                    if out_degree_num[pred_op.name] == 0:
                        next_que.put(pred_op)
            dag_views.append(dag_view)
            if next_que.qsize() == 0:
                break
            que_idx = (que_idx + 1) % 2
303
        if sorted_op_num < len(used_ops):
304 305 306 307
            raise Exception("not legal DAG")

        return dag_views, last_op

308
    def _build_dag(self, response_op):
309 310
        if response_op is None:
            raise Exception("response_op has not been set.")
311
        used_ops, out_degree_ops = self.get_use_ops(response_op)
B
barrierye 已提交
312 313 314 315 316 317
        if self._show_info:
            _LOGGER.info("================= USED OP =================")
            for op in used_ops:
                if op.name != self._request_name:
                    _LOGGER.info(op.name)
            _LOGGER.info("-------------------------------------------")
318
        if len(used_ops) <= 1:
319 320 321 322 323 324
            raise Exception(
                "Besides RequestOp and ResponseOp, there should be at least one Op in DAG."
            )

        dag_views, last_op = self._topo_sort(used_ops, response_op,
                                             out_degree_ops)
B
barrierye 已提交
325 326 327 328 329 330 331 332 333 334
        dag_views = list(reversed(dag_views))
        if self._show_info:
            _LOGGER.info("================== DAG ====================")
            for idx, view in enumerate(dag_views):
                _LOGGER.info("(VIEW {})".format(idx))
                for op in view:
                    _LOGGER.info("  [{}]".format(op.name))
                    for out_op in out_degree_ops[op.name]:
                        _LOGGER.info("    - {}".format(out_op.name))
            _LOGGER.info("-------------------------------------------")
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412

        # create channels and virtual ops
        virtual_op_name_gen = NameGenerator("vir")
        channel_name_gen = NameGenerator("chl")
        virtual_ops = []
        channels = []
        input_channel = None
        actual_view = None
        for v_idx, view in enumerate(dag_views):
            if v_idx + 1 >= len(dag_views):
                break
            next_view = dag_views[v_idx + 1]
            if actual_view is None:
                actual_view = view
            actual_next_view = []
            pred_op_of_next_view_op = {}
            for op in actual_view:
                # find actual succ op in next view and create virtual op
                for succ_op in out_degree_ops[op.name]:
                    if succ_op in next_view:
                        if succ_op not in actual_next_view:
                            actual_next_view.append(succ_op)
                        if succ_op.name not in pred_op_of_next_view_op:
                            pred_op_of_next_view_op[succ_op.name] = []
                        pred_op_of_next_view_op[succ_op.name].append(op)
                    else:
                        # create virtual op
                        virtual_op = self._gen_virtual_op(virtual_op_name_gen)
                        virtual_ops.append(virtual_op)
                        out_degree_ops[virtual_op.name] = [succ_op]
                        actual_next_view.append(virtual_op)
                        pred_op_of_next_view_op[virtual_op.name] = [op]
                        virtual_op.add_virtual_pred_op(op)
            actual_view = actual_next_view
            # create channel
            processed_op = set()
            for o_idx, op in enumerate(actual_next_view):
                if op.name in processed_op:
                    continue
                channel = self._gen_channel(channel_name_gen)
                channels.append(channel)
                _LOGGER.debug("{} => {}".format(channel.name, op.name))
                op.add_input_channel(channel)
                pred_ops = pred_op_of_next_view_op[op.name]
                if v_idx == 0:
                    input_channel = channel
                else:
                    # if pred_op is virtual op, it will use ancestors as producers to channel
                    for pred_op in pred_ops:
                        _LOGGER.debug("{} => {}".format(pred_op.name,
                                                        channel.name))
                        pred_op.add_output_channel(channel)
                processed_op.add(op.name)
                # find same input op to combine channel
                for other_op in actual_next_view[o_idx + 1:]:
                    if other_op.name in processed_op:
                        continue
                    other_pred_ops = pred_op_of_next_view_op[other_op.name]
                    if len(other_pred_ops) != len(pred_ops):
                        continue
                    same_flag = True
                    for pred_op in pred_ops:
                        if pred_op not in other_pred_ops:
                            same_flag = False
                            break
                    if same_flag:
                        _LOGGER.debug("{} => {}".format(channel.name,
                                                        other_op.name))
                        other_op.add_input_channel(channel)
                        processed_op.add(other_op.name)
        output_channel = self._gen_channel(channel_name_gen)
        channels.append(output_channel)
        last_op.add_output_channel(output_channel)

        pack_func, unpack_func = None, None
        pack_func = response_op.pack_response_package

        actual_ops = virtual_ops
413
        for op in used_ops:
414 415 416 417 418 419 420 421 422 423 424 425 426
            if len(op.get_input_ops()) == 0:
                unpack_func = op.unpack_request_package
                continue
            actual_ops.append(op)

        for c in channels:
            _LOGGER.debug(c.debug())

        return (actual_ops, channels, input_channel, output_channel, pack_func,
                unpack_func)

    def build(self):
        (actual_ops, channels, input_channel, output_channel, pack_func,
427
         unpack_func) = self._build_dag(self._response_op)
428 429 430 431 432 433 434 435 436 437 438 439 440

        self._actual_ops = actual_ops
        self._channels = channels
        self._input_channel = input_channel
        self._output_channel = output_channel
        self._pack_func = pack_func
        self._unpack_func = unpack_func

        return self._input_channel, self._output_channel, self._pack_func, self._unpack_func

    def start(self):
        self._threads_or_proces = []
        for op in self._actual_ops:
B
barrierye 已提交
441
            op.use_profiler(self._use_profile)
442
            if self._use_multithread:
443
                self._threads_or_proces.extend(
444 445
                    op.start_with_thread(self._client_type))
            else:
446
                self._threads_or_proces.extend(
447 448 449 450 451 452 453 454 455 456 457 458 459
                    op.start_with_process(self._client_type))
        # not join yet
        return self._threads_or_proces

    def join(self):
        for x in self._threads_or_proces:
            x.join()

    def stop(self):
        for op in self._actual_ops:
            op.stop()
        for chl in self._channels:
            chl.stop()