tracing.py 41.2 KB
Newer Older
1 2 3 4 5 6 7 8
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
M
Megvii Engine Team 已提交
9
import collections
M
Megvii Engine Team 已提交
10 11
import contextlib
import functools
M
Megvii Engine Team 已提交
12
import itertools
13
import json
14
import os
M
Megvii Engine Team 已提交
15
import typing
M
Megvii Engine Team 已提交
16
import warnings
M
Megvii Engine Team 已提交
17 18
import weakref

M
Megvii Engine Team 已提交
19 20
import numpy as np

21
from ..core._imperative_rt import GraphProfiler
22 23
from ..core._imperative_rt.ops import (
    CollectiveComm,
24
    GaussianRNG,
25 26
    RemoteRecv,
    RemoteSend,
27
    UniformRNG,
28 29
    VirtualDep,
)
30
from ..core._trace_option import set_symbolic_shape
31
from ..core._wrap import device as as_device
M
Megvii Engine Team 已提交
32 33
from ..core.ops.special import Const
from ..core.tensor import megbrain_graph as G
M
Megvii Engine Team 已提交
34
from ..core.tensor.core import OpBase, TensorBase, TensorWrapperBase, apply
M
Megvii Engine Team 已提交
35
from ..core.tensor.raw_tensor import OpDef, RawTensor, as_raw_tensor
M
Megvii Engine Team 已提交
36
from ..core.tensor.tensor import Tensor
37
from .sublinear_memory_config import SublinearMemoryConfig
M
Megvii Engine Team 已提交
38 39


40 41 42 43
def _input_node_use_static_shape():
    return os.environ.get("MEGENGINE_INPUT_NODE_USE_STATIC_SHAPE") is not None


M
Megvii Engine Team 已提交
44 45 46 47 48 49 50 51
class TraceMismatchError(RuntimeError):
    pass


active_trace = None
skip_tracing = False


52 53 54 55 56 57 58
def is_tracing():
    if active_trace is None:
        return False
    else:
        return not skip_tracing


M
Megvii Engine Team 已提交
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
@contextlib.contextmanager
def exclude_from_trace():
    global skip_tracing
    if skip_tracing:
        yield
        return
    try:
        skip_tracing = True
        if active_trace is not None:
            active_trace._begin_excluded_region()
        yield
    finally:
        skip_tracing = False


class TensorInfo:
    __slots__ = (
        # collected attributes
        "external",
        "exported",
        "data_read",
        "shape_read",
        "value_read",
        "device",
        "dtype",
84
        "shape",
85
        "is_const",
M
Megvii Engine Team 已提交
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
        "bound_data",
        # resources for execution
        "varnode",
        "data_setter",
        "shape_reader",
        "value_reader",
        "data_reader",
    )

    def __init__(self):
        self.exported = None
        self.data_read = None
        self.shape_read = None
        self.value_read = None
        self.bound_data = None

        self.data_setter = None
        self.shape_reader = None
        self.value_reader = None
        self.data_reader = None


108 109 110
_io_op_types = {CollectiveComm, RemoteSend, RemoteRecv}


M
Megvii Engine Team 已提交
111
class trace:
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
    """
    Wraps a callable and provide:

    * tracing via :meth:`.trace` and :meth:`.dump`
    * accelerated evalutaion via :meth:`.__call__`

    :param function: the function will be traced.
    :param symbolic: whether to apply symbolic execution for tracing. Default: False
    :param capture_as_const: capture global vars or closures as const value. Default: False
    :param sublinear_memory_config: configuration for sublinear memory optimization.
        If not None, it enables sublinear memory optimization with given setting.
    :param profiling: whether to profile compiled trace. Default: False
    :param opt_level: optimization level for compiling trace.
    :param symbolic_shape: whether to use symbolic shape for tracing. Default: True
    """

M
Megvii Engine Team 已提交
128 129 130
    def __new__(cls, *args, **kwargs):
        if not args:
            return functools.partial(cls, **kwargs)
131
        return super().__new__(cls)
M
Megvii Engine Team 已提交
132

133 134 135 136 137 138
    def __init__(
        self,
        function,
        symbolic=False,
        capture_as_const=False,
        sublinear_memory_config: SublinearMemoryConfig = None,
139
        profiling: bool = False,
140
        opt_level: int = None,
141
        symbolic_shape: bool = True,
142
    ):
M
Megvii Engine Team 已提交
143 144 145
        self.__wrapped__ = function
        self._symbolic = symbolic
        self._capture_as_const = capture_as_const
146
        self._sublinear_memory_config = sublinear_memory_config
147 148
        self._profiling = profiling
        self._profiler = None
149
        self._graph_opt_level = opt_level
150
        self._symbolic_shape = symbolic_shape
M
Megvii Engine Team 已提交
151

152 153 154
        self._reset()

    def _reset(self):
M
Megvii Engine Team 已提交
155 156 157 158 159 160 161
        self._untraced = True
        self._tinfo = []  # handle -> TensorInfo
        self._seq = []
        self._pc = 0
        self._graph = None
        self._need_reset_nodes = None
        self._lazy_eval_graph = None
162 163
        self._lazy_eval_tensors = weakref.WeakSet()
        self._lazy_eval_links = None
M
Megvii Engine Team 已提交
164
        self._active_tensors = weakref.WeakSet()
M
Megvii Engine Team 已提交
165 166
        self._tensor_remaps = None
        self._inputs_to_restore = None
167 168
        self._arg_bindings = None
        self._kwarg_bindings = None
M
Megvii Engine Team 已提交
169 170
        self._output_bindings = None
        self._output_names = None
M
Megvii Engine Team 已提交
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185

    def _new_handle(self):
        handle = len(self._tinfo)
        info = TensorInfo()
        self._tinfo.append(info)
        return handle, info

    def _apply_op(self, op, args):
        assert not self._untraced
        # check against trace
        if self._pc >= len(self._seq):
            raise TraceMismatchError("trace should end here, but more op observed")
        record = self._seq[self._pc]
        op_, ihandles, ohandles = record
        if op != op_:
186
            raise TraceMismatchError("op different from last time")
M
Megvii Engine Team 已提交
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
        if len(ihandles) != len(args):
            raise TraceMismatchError("op input size different from last time")

        for h, x in zip(ihandles, args):
            info = self._tinfo[h]
            if info.external:
                if (
                    x.__class__ is CompiledTensorProxy
                    and not self._tinfo[x._CompiledTensorProxy__handle].exported
                ):
                    raise TraceMismatchError(
                        "failed to capture: input was an external tensor "
                        "last time, got an internal tensor this time"
                    )
                if info.bound_data:
                    if x.__class__ is CompiledTensorProxy:
                        raise TraceMismatchError(
                            "const capture violated: was an external tensor "
                            "last time, got an internal tensor this time"
                        )
                    if x._handle != info.bound_data._handle:
208
                        if not np.array_equal(x.numpy(), info.bound_data.numpy()):
M
Megvii Engine Team 已提交
209 210 211 212
                            raise TraceMismatchError(
                                "const capture violated: got "
                                "a different tensor this time"
                            )
M
Megvii Engine Team 已提交
213 214 215 216 217 218 219 220 221 222 223 224
                else:
                    if info.dtype != x.dtype:
                        raise TraceMismatchError(
                            "failed to capture: different dtype from last time"
                        )
                    if info.device != x.device:
                        raise TraceMismatchError(
                            "failed to capture: different device from last time"
                        )
                    info.data_setter.set_value(x._dev_tensor())
            else:
                if x.__class__ is not CompiledTensorProxy:
M
Megvii Engine Team 已提交
225 226 227 228 229 230 231
                    if x not in self._tensor_remaps:
                        raise TraceMismatchError(
                            "unexpected capture: trying to use an external tensor as "
                            "input, but that input was an internal tensor last time"
                        )
                    else:
                        x = self._tensor_remaps[x]
M
Megvii Engine Team 已提交
232 233 234 235 236 237 238 239 240 241 242
                if x._CompiledTensorProxy__handle != h:
                    raise TraceMismatchError(
                        "mis-wiring: input edge to an data flow "
                        "graph node is different from last time"
                    )

        self._pc += 1
        outputs = tuple([CompiledTensorProxy(h) for h in ohandles])
        self._active_tensors.update(outputs)
        return outputs

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
    def _apply_const(self, op, args):
        assert not self._untraced
        # check against trace
        if self._pc >= len(self._seq):
            raise TraceMismatchError("trace should end here, but more op observed")
        record = self._seq[self._pc]
        op_, ihandles, ohandles = record
        assert isinstance(op_, Const)

        eq = op_.value == op.value
        if not isinstance(eq, bool):
            eq = all(eq)
        if not eq:
            raise TraceMismatchError(
                "const tensor violated: got a different tensor this time"
            )

        self._pc += 1
        (h,) = ohandles
        outputs = tuple([self._tinfo[h].bound_data])
        return outputs

M
Megvii Engine Team 已提交
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
    def _record_op(self, op, inputs, outputs):
        if skip_tracing:
            for x in inputs:
                h = getattr(x, "_TraceMixin__handle", None)
                if h is not None:
                    self._tinfo[h].data_read = True
            return

        ihandles = []
        for x in inputs:
            h = getattr(x, "_TraceMixin__handle", None)
            if h is None or (not self._capture_as_const and self._tinfo[h].exported):
                h, info = self._new_handle()
                info.external = True
                info.device = x.device
                info.dtype = x.dtype
281
                info.shape = x.shape
M
Megvii Engine Team 已提交
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296
                if self._capture_as_const:
                    info.bound_data = x

            ihandles.append(h)

        ohandles = []
        for x in outputs:
            h, info = self._new_handle()
            ohandles.append(h)
            info.external = False
            TraceMixin._TraceMixin__inject(x, h)

        self._seq.append((op, tuple(ihandles), tuple(ohandles)))
        self._active_tensors.update(outputs)

297
    def _record_const(self, op, outputs):
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
        if skip_tracing:
            (x,) = outputs
            h = getattr(x, "_TraceMixin__handle", None)
            if h is not None:
                self._tinfo[h].data_read = True
            return

        (x,) = outputs
        h, info = self._new_handle()
        ohandles = [h]
        info.external = True
        info.device = x.device
        info.dtype = x.dtype
        info.shape = x.shape
        info.bound_data = x
        info.is_const = True
        TraceMixin._TraceMixin__inject(x, h)
        self._seq.append((op, tuple(), tuple(ohandles)))
316

317
    def _set_active(self, active: bool):
M
Megvii Engine Team 已提交
318
        global active_trace
319 320 321 322
        if active:
            if active_trace:
                raise NotImplementedError("sorry, not implemented: nested trace")
            active_trace = self
M
Megvii Engine Team 已提交
323
        else:
324 325 326 327 328 329 330 331 332 333
            assert active_trace is self
            active_trace = None

    def _init_trace(self, symbolic: bool):
        apply.enable(apply_with_tracing)
        apply.enable(apply_const_with_tracing)
        if symbolic:
            apply.enable(apply_symbolic_mode)
            apply.enable(apply_const_symbolic_mode)
            self._lazy_eval_graph = G.Graph()
334
            self._apply_graph_options(self._lazy_eval_graph)
335
            self._lazy_eval_links = ()
336 337

    def _take_escaped_tensors(self):
M
Megvii Engine Team 已提交
338 339
        escaped_tensors = tuple(self._active_tensors)
        self._active_tensors.clear()
340 341
        return escaped_tensors

342 343 344 345 346 347
    def _lazy_eval(self, lazy_eval_graph, lazy_eval_tensors, lazy_eval_links):
        readers = [
            G.OutputNode(x._LazyEvalTensor__varnode).outputs[0]
            for x in lazy_eval_tensors
        ]
        self._apply_graph_options(lazy_eval_graph)
348 349 350 351 352
        # FIXME
        if self._graph_opt_level is not None:
            lazy_eval_graph.options.graph_opt_level = self._graph_opt_level
        else:
            lazy_eval_graph.options.graph_opt_level = 2
353
        lazy_eval_graph.compile(*lazy_eval_links, *readers)
354
        lazy_eval_graph()
355
        for r, x in zip(readers, lazy_eval_tensors):
356 357 358 359 360
            assign_raw_tensor(x, as_raw_tensor(r.op.get_value()))

    @contextlib.contextmanager
    def _setup(self):
        interrupted = False
M
Megvii Engine Team 已提交
361

362
        def do_enter():
363
            self._save_symbolic_shape = set_symbolic_shape(self._symbolic_shape)
364 365 366 367 368
            self._set_active(True)
            if self._untraced:
                self._init_trace(self._symbolic)
            else:
                apply.enable(apply_compiled_mode)
369
                apply.enable(apply_const_compiled_mode)
370 371 372 373 374 375 376 377 378 379
                if self._graph is None:
                    self._compile()
                self._graph.execute()

        def do_finalize():
            escaped_tensors = self._take_escaped_tensors()
            if self._untraced:
                for x in escaped_tensors:
                    info = self._tinfo[x._TraceMixin__handle]
                    info.data_read = True
M
Megvii Engine Team 已提交
380
                    x._TraceMixin__restore()
381 382 383
                if self._inputs_to_restore:
                    for x in self._inputs_to_restore:
                        x._TraceMixin__restore()
384 385 386
                if self._symbolic and (
                    self._lazy_eval_tensors or self._lazy_eval_links
                ):
387
                    # eval lazy eval tensors
388 389 390 391 392
                    self._lazy_eval(
                        self._lazy_eval_graph,
                        tuple(self._lazy_eval_tensors),
                        self._lazy_eval_links,
                    )
M
Megvii Engine Team 已提交
393 394
                    self._lazy_eval_graph = None
                    self._lazy_eval_tensors = None
395
                    self._lazy_eval_links = None
396 397 398 399 400 401 402 403 404 405 406 407 408 409
                self._untraced = False
            else:
                # compiled_tensor leaks
                if self._pc == len(self._seq):
                    for x in escaped_tensors:
                        try:
                            assign_raw_tensor(x, as_raw_tensor(x._dev_tensor()))
                        except TraceMismatchError:
                            # TraceMismatchError thrown in do_exit
                            pass
                    self._graph.wait()
                    self._reset_exec_env()

            # reset status
M
Megvii Engine Team 已提交
410
            self._pc = 0
411 412 413 414 415 416
            self._tensor_remaps = None
            apply.disable(apply_with_tracing)
            apply.disable(apply_const_with_tracing)
            apply.disable(apply_symbolic_mode)
            apply.disable(apply_const_symbolic_mode)
            apply.disable(apply_compiled_mode)
417
            apply.disable(apply_const_compiled_mode)
418
            self._set_active(False)
419 420
            # Restore global variable
            set_symbolic_shape(self._save_symbolic_shape)
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439

        def do_exit():
            if not self._untraced and self._pc != len(self._seq):
                raise TraceMismatchError("premature end")
            if not self._symbolic or not self._untraced:
                for x in self._active_tensors:
                    x._dev_tensor()

        try:
            do_enter()
            yield
            do_exit()
        except:
            interrupted = True
            raise
        finally:
            do_finalize()
            if interrupted:
                self._reset()
M
Megvii Engine Team 已提交
440 441

    def _begin_excluded_region(self):
M
Megvii Engine Team 已提交
442 443 444 445
        if self._capture_as_const:
            raise RuntimeError(
                "exclude_from_trace cannot be used with capture_as_const"
            )
M
Megvii Engine Team 已提交
446 447 448 449 450 451 452 453
        if self._untraced:
            # conditionally reading a compiled tensor in excluded region
            # is permitted, so we have to assume every tensor might be read
            for x in self._active_tensors:
                info = self._tinfo[x._TraceMixin__handle]
                info.exported = True
                info.data_read = True

454 455
    def _apply_graph_options(self, graph):

456
        graph.options.no_force_inplace = True
457
        graph.options.seq_opt.enable_seq_comp_node_opt = False
458
        # graph opt level
459 460 461 462
        # if self._graph_opt_level is not None:
        #     graph.options.graph_opt_level = self._graph_opt_level
        # FIXME
        graph.options.graph_opt_level = 0
463 464 465 466 467 468 469 470 471 472 473 474 475
        # sublinear
        if self._sublinear_memory_config is not None:
            graph.options.enable_sublinear_memory_opt = True
            sublinear_config = graph.options.sublinear_mem_config
            sublinear_config.lb_memory = self._sublinear_memory_config.lb_memory
            sublinear_config.genetic_nr_iter = (
                self._sublinear_memory_config.genetic_nr_iter
            )
            sublinear_config.genetic_pool_size = (
                self._sublinear_memory_config.genetic_pool_size
            )
            sublinear_config.thresh_nr_try = self._sublinear_memory_config.thresh_nr_try
            sublinear_config.num_worker = self._sublinear_memory_config.num_worker
476
        # profile
477 478
        if self._profiling:
            self._profiler = GraphProfiler(graph)
479

M
Megvii Engine Team 已提交
480 481
    def _compile(self):
        graph = self._graph = G.Graph()
482
        graph.options.async_exec_level = 0b100
483
        self._apply_graph_options(graph)
M
Megvii Engine Team 已提交
484 485 486 487
        # graph.options.graph_opt_level = 0
        need_reset_nodes = self._need_reset_nodes = []
        # links enforce ordering of I/O nodes
        links = ()
488
        readers = []
M
Megvii Engine Team 已提交
489 490

        if self._capture_as_const:
491
            for h in itertools.chain(self._arg_bindings, self._kwarg_bindings.values()):
M
Megvii Engine Team 已提交
492 493
                info = self._tinfo[h]
                opnode = info.data_setter = G.InputNode(
494 495
                    device=info.device,
                    dtype=info.dtype,
496
                    shape=info.shape or (1,),
497 498
                    graph=graph,
                    use_static_shape=_input_node_use_static_shape(),
M
Megvii Engine Team 已提交
499 500 501 502 503
                )
                need_reset_nodes.append(opnode)
                info.varnode = opnode.outputs[0]
                links += opnode.outputs[1:]

M
Megvii Engine Team 已提交
504
        for op, ihandles, ohandles in self._seq:
505 506 507 508 509 510 511 512 513 514 515 516 517
            if isinstance(op, Const):
                assert len(ihandles) == 0
                (h,) = ohandles
                info = self._tinfo[h]
                if not hasattr(info, "varnode"):
                    assert info.external
                    assert info.bound_data
                    info.varnode = graph.make_const(
                        info.bound_data.numpy(),
                        info.bound_data.dtype,
                        info.bound_data.device,
                    )
                continue
518

519
            require_links = type(op) in _io_op_types
M
Megvii Engine Team 已提交
520
            ivars = []
521
            for i, h in enumerate(ihandles):
M
Megvii Engine Team 已提交
522 523 524 525
                info = self._tinfo[h]
                if not hasattr(info, "varnode"):
                    assert info.external
                    if info.bound_data:
526 527 528 529 530 531 532 533 534 535 536
                        if hasattr(info, "is_const") and info.is_const:
                            info.varnode = graph.make_const(
                                info.bound_data.numpy(),
                                info.bound_data.dtype,
                                info.bound_data.device,
                            )
                        else:
                            info.varnode = graph.make_const(
                                info.bound_data._dev_tensor()
                                # info.bound_data.numpy()
                            )
M
Megvii Engine Team 已提交
537 538
                    else:
                        opnode = info.data_setter = G.InputNode(
539 540 541
                            *links,
                            device=info.device,
                            dtype=info.dtype,
542
                            shape=info.shape or (1,),
543
                            graph=graph,
544
                            use_static_shape=_input_node_use_static_shape(),
M
Megvii Engine Team 已提交
545 546 547
                        )
                        need_reset_nodes.append(opnode)
                        info.varnode, *links = opnode.outputs
548 549 550
                if require_links and i == 0 and len(links) > 0:
                    info.varnode = apply(VirtualDep(), info.varnode, *links)[0]
                    links = (info.varnode,)
M
Megvii Engine Team 已提交
551 552 553

                ivars.append(info.varnode)
            ovars = apply(op, *ivars)
554 555
            if require_links and len(ovars) > 0:
                links = (ovars[0],)
M
Megvii Engine Team 已提交
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
            assert len(ovars) == len(ohandles)
            for h, v in zip(ohandles, ovars):
                info = self._tinfo[h]
                info.varnode = v

                def add_reader(opnode):
                    nonlocal links
                    need_reset_nodes.append(opnode)
                    readers.append(opnode.outputs[0])
                    links = opnode.outputs

                if info.data_read:
                    # Shape can be obtained from data so doesn't need its own
                    # output node. On the other hand, value is read separately
                    # to leverage eager h2d copy
                    info.shape_read = False
                    opnode = info.data_reader = G.OutputNode(v, *links)
                    add_reader(opnode)
                if info.value_read:
                    opnode = info.value_reader = G.ValueOutputNode(v, *links)
                    add_reader(opnode)
                if info.shape_read:
                    opnode = info.shape_reader = G.AttrOutputNode(v, *links)
                    add_reader(opnode)
580 581 582 583 584
        # FIXME
        if self._graph_opt_level is not None:
            graph.options.graph_opt_level = self._graph_opt_level
        else:
            graph.options.graph_opt_level = 2
585
        graph.compile(*readers, *links)
M
Megvii Engine Team 已提交
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603

    def _reset_exec_env(self):
        for opnode in self._need_reset_nodes:
            opnode.reset()

    def _require_shape(self, handle):
        info = self._tinfo[handle]
        info.shape_read = True

    def _require_value(self, handle):
        info = self._tinfo[handle]
        info.value_read = True

    def _require_data(self, handle):
        info = self._tinfo[handle]
        info.data_read = True

    def __call__(self, *args, **kwargs):
604 605
        if is_tracing():
            return self.__wrapped__(*args, **kwargs)
M
Megvii Engine Team 已提交
606
        with self._setup():
M
Megvii Engine Team 已提交
607 608 609 610 611 612 613
            if self._capture_as_const:
                self._process_inputs(*args, **kwargs)
            outputs = self.__wrapped__(*args, **kwargs)
            if self._capture_as_const:
                self._process_outputs(outputs)
            return outputs

614 615 616 617 618 619 620 621 622 623
    def dump(
        self,
        file,
        *,
        arg_names=None,
        output_names=None,
        append=False,
        optimize_for_inference=True,
        **kwargs
    ):
624 625
        r"""
        Serializes trace to file system.
626 627 628 629 630 631 632

        :param file: output file, could be file object or filename.
        :param arg_names: names of the input tensors in the traced function.
        :param output_names: names of the output tensors in the traced function,
            use the default name if not specified.
        :param append: whether output is appended to ``file``.
            Only works when ``file`` is str.
633 634
        :param optimize_for_inference: enbale optmizations,
            will skip all optimize options if this is False. Default: True
635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674

        :Keyword Arguments:

            * enable_io16xc32 --
                whether to use float16 for I/O between oprs and use
                float32 as internal computation precision. Note the output var would be
                changed to float16.
            * enable_ioc16 --
                whether to use float16 for both I/O and computation
                precision.

            * enable_hwcd4 --
                whether to use NHWCD4 data layout. This is faster on some
                OpenCL backend.
            * enable_nchw88 --
                whether to use NCHW88 data layout, currently
                used in X86 AVX backend.
            * enable_nchw44 --
                whether to use NCHW44 data layout, currently
                used in arm backend.
            * enable_nchw44_dot --
                whether to use NCHW44_dot data layout, currently
                used in armv8.2+dotprod backend.
            * enable_nchw4 --
                whether to use NCHW4 data layout, currently
                used in nvidia backend(based on cudnn).
            * enable_nchw32 --
                whether to use NCHW32 data layout, currently
                used in nvidia backend with tensorcore(based on cudnn).
            * enable_chwn4 --
                whether to use CHWN4 data layout, currently
                used in nvidia backend with tensorcore.

            * enable_fuse_conv_bias_nonlinearity: whether to fuse conv+bias+nonlinearty
                into one opr.
            * enable_fuse_conv_bias_with_z: whether to fuse conv_bias with z
                input for inference on nvidia backend(this optimization pass will
                result in mismatch of the precision of output of training and
                inference)
        """
M
Megvii Engine Team 已提交
675 676 677 678 679 680 681 682 683 684
        if not self._capture_as_const:
            raise ValueError(
                "you must specify capture_as_const=True at __init__ to use dump"
            )
        if self._untraced:
            raise RuntimeError("should run at least once before calling dump")
        if self._output_names and output_names:
            raise TypeError(
                "cannot specify output_names when output is already in dict format"
            )
M
Megvii Engine Team 已提交
685
        if output_names and not isinstance(output_names, collections.abc.Sequence):
M
Megvii Engine Team 已提交
686 687
            output_names = (output_names,)
        if output_names and len(output_names) != len(self._output_bindings):
688 689 690 691 692
            raise ValueError(
                "wrong number of output_names, should be {} values".format(
                    len(self._output_bindings)
                )
            )
693 694
        if arg_names is None:
            arg_names = ["arg_%d" % i for i in range(len(self._arg_bindings))]
M
Megvii Engine Team 已提交
695
        if arg_names and not isinstance(arg_names, collections.abc.Sequence):
M
Megvii Engine Team 已提交
696 697
            arg_names = (arg_names,)
        if arg_names and len(arg_names) != len(self._arg_bindings):
698 699 700 701 702
            raise ValueError(
                "wrong number of arg_names, should be {} values".format(
                    len(self._arg_bindings)
                )
            )
M
Megvii Engine Team 已提交
703 704
        output_names = output_names or self._output_names

705 706
        dumped_device = as_device("xpux")

M
Megvii Engine Team 已提交
707 708
        h2v = {}
        graph = G.Graph()
709 710
        # only graph_opt_level takes effect in dump
        self._apply_graph_options(graph)
M
Megvii Engine Team 已提交
711

712
        for i, h in enumerate(self._arg_bindings):
M
Megvii Engine Team 已提交
713
            info = self._tinfo[h]
714 715
            h2v[h] = graph.make_h2d(
                dtype=info.dtype,
716
                device=dumped_device,
717
                shape=info.shape or (1,),
718 719 720
                name=arg_names[i] if arg_names else None,
            )
        for k, h in self._kwarg_bindings.items():
M
Megvii Engine Team 已提交
721
            info = self._tinfo[h]
722
            h2v[h] = graph.make_h2d(
723
                dtype=info.dtype, device=dumped_device, shape=info.shape or (1,), name=k
724
            )
M
Megvii Engine Team 已提交
725 726

        for op, ihandles, ohandles in self._seq:
727 728 729 730 731 732 733 734 735 736 737
            if isinstance(op, Const):
                assert len(ihandles) == 0
                (h,) = ohandles
                info = self._tinfo[h]
                if h not in h2v:
                    assert info.external
                    assert info.bound_data
                    h2v[h] = graph.make_const(
                        info.bound_data.numpy(), dtype=info.dtype, device=info.device,
                    )
                continue
M
Megvii Engine Team 已提交
738 739 740 741 742 743
            ivars = []
            for h in ihandles:
                info = self._tinfo[h]
                if h not in h2v:
                    assert info.external
                    assert info.bound_data
744
                    h2v[h] = graph.make_const(
745
                        info.bound_data.numpy(), dtype=info.dtype, device=dumped_device
746
                    )
M
Megvii Engine Team 已提交
747 748 749 750 751 752 753 754 755 756 757 758
                ivars.append(h2v[h])
            ovars = apply(op, *ivars)
            assert len(ovars) == len(ohandles)
            h2v.update(zip(ohandles, ovars))

        dest_vars = []
        for i, h in enumerate(self._output_bindings):
            v = h2v[h]
            if output_names:
                v.name = output_names[i]
            dest_vars.append(v)

759 760
        if optimize_for_inference:
            dest_vars = G.optimize_for_inference(dest_vars, **kwargs)
761

M
Megvii Engine Team 已提交
762
        if isinstance(file, str):
763 764
            permission = "wb" if append == False else "ab"
            file = open(file, permission)
765 766 767
        dump_content, dump_info = G.dump_graph(dest_vars)
        file.write(dump_content)
        return dump_info
M
Megvii Engine Team 已提交
768 769 770 771 772 773 774 775 776 777 778 779

    def _process_inputs(self, *args, **kwargs):
        if self._untraced:
            self._inputs_to_restore = []

            def record_input(x):
                if x is None:
                    return
                h, info = self._new_handle()
                info.external = False
                info.device = x.device
                info.dtype = x.dtype
780
                info.shape = x.shape
M
Megvii Engine Team 已提交
781 782 783 784
                TraceMixin._TraceMixin__inject(x, h)
                self._inputs_to_restore.append(x)
                return h

785
            self._arg_bindings = []
M
Megvii Engine Team 已提交
786 787 788 789 790 791 792
            for i, x in enumerate(args):
                x = find_raw_tensor(x)
                if x is None:
                    raise TypeError(
                        "positional arguments should all be tensor "
                        "but args[%d] cannot be recognized as one" % i
                    )
793
                self._arg_bindings.append(record_input(x))
M
Megvii Engine Team 已提交
794

795
            self._kwarg_bindings = {}
M
Megvii Engine Team 已提交
796 797 798
            for k, x in kwargs.items():
                x = find_raw_tensor(x)
                if x is not None:
799
                    self._kwarg_bindings[k] = record_input(x)
M
Megvii Engine Team 已提交
800
        else:
801
            if len(args) != len(self._arg_bindings):
M
Megvii Engine Team 已提交
802 803 804 805
                raise TraceMismatchError("positional argument length mismatch")

            self._tensor_remaps = {}

806
            for i, (h, x) in enumerate(zip(self._arg_bindings, args)):
M
Megvii Engine Team 已提交
807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
                x = find_raw_tensor(x)
                if x is None:
                    raise TypeError(
                        "positional arguments should all be tensor "
                        "but args[%d] cannot be recognized as one" % i
                    )
                info = self._tinfo[h]
                if x.dtype != info.dtype:
                    raise TypeError("args[%d].dtype different from last time" % i)
                if x.device != info.device:
                    raise TypeError("args[%d].device different from last time" % i)
                info.data_setter.set_value(x._dev_tensor())
                self._tensor_remaps[x] = CompiledTensorProxy(h)

            kwargs_tensors = {}
            for k, x in kwargs.items():
                x = find_raw_tensor(x)
                if x is not None:
                    kwargs_tensors[k] = x
826 827 828
            if set(kwargs_tensors) != set(self._kwarg_bindings):
                too_many = set(kwargs_tensors) - set(self._kwarg_bindings)
                too_few = set(self._kwarg_bindings) - set(kwargs_tensors)
M
Megvii Engine Team 已提交
829 830 831 832 833 834 835 836 837 838
                if too_many:
                    raise TraceMismatchError(
                        "keyword arguments found to be tensor this time "
                        "but were non-tensor previously: %s" % " ".join(too_many)
                    )
                if too_few:
                    raise TraceMismatchError(
                        "keyword arguments found to be non-tensor this time "
                        "but were tensor previously: %s" % " ".join(too_few)
                    )
839
            for k, h in self._kwarg_bindings.items():
M
Megvii Engine Team 已提交
840 841 842 843 844 845 846 847 848 849 850
                x = kwargs_tensors[k]
                info = self._tinfo[h]
                if x.dtype != info.dtype:
                    raise TypeError("kwargs[%s].dtype different from last time" % k)
                if x.device != info.device:
                    raise TypeError("kwargs[%s].device different from last time" % k)
                info.data_setter.set_value(x._dev_tensor())
                self._tensor_remaps[x] = CompiledTensorProxy(h)

    def _process_outputs(self, outputs):
        output_names = None
M
Megvii Engine Team 已提交
851
        if isinstance(outputs, collections.abc.Mapping):
M
Megvii Engine Team 已提交
852
            output_names, outputs = zip(*sorted(outputs.items()))
M
Megvii Engine Team 已提交
853
        elif not isinstance(outputs, collections.abc.Sequence):
M
Megvii Engine Team 已提交
854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891
            outputs = (outputs,)

        if not self._untraced:
            if output_names != self._output_names:
                too_many = set(output_names) - set(self._output_names)
                too_few = set(self._output_names) - set(output_names)
                if too_many:
                    raise TraceMismatchError(
                        "output has more keys than last time: %s" % " ".join(too_many)
                    )
                if too_few:
                    raise TraceMismatchError(
                        "output has less keys than last time: %s" % " ".join(too_few)
                    )
            if len(outputs) != len(self._output_bindings):
                raise TraceMismatchError("output size differs from last time")
        else:
            self._output_names = output_names
            self._output_bindings = []

        for i, x in enumerate(outputs):
            x = find_raw_tensor(x)
            if x is None:
                raise TypeError("every item of return value should be tensor")
            if self._untraced:
                if not isinstance(x, TraceMixin):
                    raise RuntimeError("output is not computed from inputs")
                h = x._TraceMixin__handle
                self._output_bindings.append(h)
            else:
                if not isinstance(x, CompiledTensorProxy):
                    raise RuntimeError("output is not computed from inputs")
                h = x._CompiledTensorProxy__handle
                if h != self._output_bindings[i]:
                    raise TraceMismatchError(
                        "retval[%s] is a different tensor than last time"
                        % (output_names and output_names[i] or i)
                    )
M
Megvii Engine Team 已提交
892

893 894 895 896 897 898 899 900 901 902
    def get_profile(self):
        """
        Get profiling result for compiled trace.

        :return: a json compatible object.
        """
        if not self._profiler:
            raise RuntimeError("trace is not set with profiling=True")
        return json.loads(self._profiler.get())

903 904 905 906 907 908
    def trace(self, *args, **kwargs):
        raise NotImplementedError(
            "trace is deemed unbeneficial with the new "
            "tracing mechanism. You should alwasy use __call__."
        )

M
Megvii Engine Team 已提交
909 910 911 912 913 914 915 916

class CompiledTensorProxy(RawTensor):
    """
    Duck-typed RawTensor
    """

    def __init__(self, handle):
        self.__handle = handle
917
        self._isscalar = False
M
Megvii Engine Team 已提交
918 919 920 921 922 923 924 925 926 927 928 929 930 931 932
        self.__info = active_trace._tinfo[handle]
        self.__shape = None
        self.__data = None
        self.__value = None

    @property
    def dtype(self):
        return self.__info.varnode.dtype

    @property
    def device(self):
        return self.__info.varnode.device

    @property
    def shape(self):
933 934
        if self._isscalar:
            return ()
M
Megvii Engine Team 已提交
935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
        if self.__shape is None:
            if self.__info.shape_read:
                self.__shape = self.__info.shape_reader.get_value().shape
            elif self.__info.data_read:
                self.__shape = self._dev_tensor().shape
            else:
                raise TraceMismatchError("shape of this tensor is not read in trace")
        return self.__shape

    def numpy(self):
        if self.__value is None:
            if self.__info.value_read:
                self.__value = self.__info.value_reader.get_value()
            elif self.__info.data_read:
                self.__value = self._dev_tensor().numpy()
            else:
                raise TraceMismatchError("value of this tensor is not read in trace")
952 953
            if self._isscalar:
                self.__value = self.__value.squeeze()
M
Megvii Engine Team 已提交
954 955 956 957 958 959 960 961 962
        return self.__value

    def _dev_tensor(self):
        if self.__data is None:
            if not self.__info.data_read:
                raise TraceMismatchError("raw data of this tensor is not read in trace")
            self.__data = self.__info.data_reader.get_value()
        return self.__data

963 964 965 966 967 968 969 970 971
    def _drop(self):
        return

    def _swap_in(self):
        return

    def _swap_out(self):
        return

M
Megvii Engine Team 已提交
972 973 974 975 976 977 978 979 980 981
    def __del__(self):
        if self.__info.shape_read and self.__shape is not None:
            self.__info.shape_reader.drop_value()
        if self.__info.value_read and self.__value is not None:
            self.__info.value_reader.drop_value()
        if self.__info.data_read and self.__data is not None:
            self.__info.data_reader.drop_value()


class LazyEvalTensor(RawTensor):
982 983
    def __init__(self, varnode, isscalar=False):
        super().__init__()
M
Megvii Engine Team 已提交
984
        self.__varnode = varnode
985
        self._isscalar = isscalar
M
Megvii Engine Team 已提交
986 987 988 989 990 991 992 993 994 995 996

    @property
    def dtype(self):
        return self.__varnode.dtype

    @property
    def device(self):
        return self.__varnode.device

    @property
    def shape(self):
997 998
        if self._isscalar:
            return ()
M
Megvii Engine Team 已提交
999 1000 1001
        return self.__varnode.shape

    def numpy(self):
1002 1003 1004 1005
        ret = self.__varnode.value
        if self._isscalar:
            ret = ret.squeeze()
        return ret
M
Megvii Engine Team 已提交
1006

1007 1008 1009 1010 1011 1012 1013 1014 1015
    def _drop(self):
        return

    def _swap_in(self):
        return

    def _swap_out(self):
        return

M
Megvii Engine Team 已提交
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
    def _dev_tensor(self):
        raise RuntimeError("cannot access data during symbolic tracing")


class TraceMixin:
    __subclass_cache = {}

    def __inject(self, handle):
        cache = __class__.__subclass_cache
        cls = self.__class__
        subcls = cache.get(cls)
        if subcls is None:
            subcls = cache[cls] = type("Traced" + cls.__name__, (__class__, cls), {})
        self.__class__ = subcls
        self.__handle = handle
        self.__cls = cls
        return self

    def __restore(self):
        cls = self.__cls
        del self.__handle
        del self.__cls
        self.__class__ = cls
        return self

    @property
    def shape(self):
        if not skip_tracing:
            active_trace._require_shape(self.__handle)
        return super().shape

    def numpy(self):
        if not skip_tracing:
            active_trace._require_value(self.__handle)
        return super().numpy()

    def _dev_tensor(self):
        if not skip_tracing:
            active_trace._require_data(self.__handle)
        return super()._dev_tensor()

1057 1058 1059 1060 1061 1062 1063 1064 1065
    def _drop(self):
        return

    def _swap_in(self):
        return

    def _swap_out(self):
        return

M
Megvii Engine Team 已提交
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076

class TracedRawTensor(TraceMixin, RawTensor):
    pass


class TracedLazyTensor(TraceMixin, LazyEvalTensor):
    pass


def assign_raw_tensor(lhs, rhs):
    handle = rhs._handle
1077 1078
    # Keep isscalar of lhs
    isscalar = lhs._isscalar
M
Megvii Engine Team 已提交
1079 1080 1081
    rhs.__dict__.clear()
    lhs.__dict__.clear()
    lhs.__class__ = RawTensor
1082
    lhs.__init__(handle, isscalar=isscalar)
M
Megvii Engine Team 已提交
1083 1084 1085 1086 1087 1088


# this hook turns RawTensor into LazyEvalTensor
@apply.register()
def apply_symbolic_mode(op: OpDef, *args: RawTensor):
    graph = active_trace._lazy_eval_graph
1089 1090 1091 1092 1093 1094 1095 1096 1097
    ivars = []
    for x in args:
        var = getattr(x, "_LazyEvalTensor__varnode", None)
        if var:
            ivars.append(var)
        else:
            data_setter = G.InputNode(
                device=x.device,
                dtype=x.dtype,
1098
                shape=x.shape or (1,),
1099 1100 1101 1102 1103 1104
                graph=graph,
                use_static_shape=True,
            )
            var = data_setter.outputs[0]
            ivars.append(var)
            data_setter.set_value(x._dev_tensor())
1105 1106 1107 1108 1109 1110 1111 1112

    require_links = type(op) in _io_op_types

    if require_links and active_trace._lazy_eval_links:
        assert len(ivars) > 0, "op should has at least one input"
        ivars[0] = apply(VirtualDep(), ivars[0], *active_trace._lazy_eval_links)[0]
        active_trace._lazy_eval_links = (ivars[0],)

M
Megvii Engine Team 已提交
1113
    ovars = apply(op, *ivars)
1114 1115 1116 1117

    if require_links:
        active_trace._lazy_eval_links = (ovars[0],)

M
Megvii Engine Team 已提交
1118
    outputs = [LazyEvalTensor(v) for v in ovars]
1119
    active_trace._lazy_eval_tensors.update(outputs)
M
Megvii Engine Team 已提交
1120 1121 1122 1123 1124 1125
    return outputs


apply.disable(apply_symbolic_mode)


1126 1127 1128
@apply.register()
def apply_const_symbolic_mode(op: Const, *args: RawTensor):
    graph = active_trace._lazy_eval_graph
1129 1130 1131
    ret = LazyEvalTensor(
        graph.make_const(op.value, dtype=op.dtype, device=op.device), isscalar=True
    )
1132
    active_trace._lazy_eval_tensors.add(ret)
1133 1134 1135 1136 1137 1138
    return (ret,)


apply.disable(apply_const_symbolic_mode)


M
Megvii Engine Team 已提交
1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
@apply.register()
def apply_compiled_mode(op: OpDef, *args: RawTensor):
    if skip_tracing:
        args = [
            as_raw_tensor(x._dev_tensor()) if x.__class__ is CompiledTensorProxy else x
            for x in args
        ]
        return apply.super(op, *args)
    return active_trace._apply_op(op, args)


apply.disable(apply_compiled_mode)


1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
@apply.register()
def apply_const_compiled_mode(op: Const, *args: RawTensor):
    if skip_tracing:
        args = [
            as_raw_tensor(x._dev_tensor()) if x.__class__ is CompiledTensorProxy else x
            for x in args
        ]
        return apply.super(op, *args)
    return active_trace._apply_const(op, args)


apply.disable(apply_const_compiled_mode)


M
Megvii Engine Team 已提交
1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
# this hook injects TraceMixin
@apply.register()
def apply_with_tracing(op: OpDef, *args: RawTensor):
    outputs = apply.super(op, *args)
    active_trace._record_op(op, args, outputs)
    return outputs


apply.disable(apply_with_tracing)


1178 1179 1180 1181 1182 1183 1184 1185
@apply.register()
def apply_const_with_tracing(op: Const, *args: RawTensor):
    outputs = apply.super(op, *args)
    active_trace._record_const(op, outputs)
    return outputs


apply.disable(apply_const_with_tracing)
M
Megvii Engine Team 已提交
1186 1187 1188 1189 1190 1191 1192 1193


class BrokenRawTensor(RawTensor):
    def __getattribute__(self, _):
        raise RuntimeError("broken due to misuse of tracing")

    def __setattr__(self, *_):
        raise RuntimeError("broken due to misuse of tracing")
M
Megvii Engine Team 已提交
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217


@functools.singledispatch
def find_raw_tensor(x):
    return None


@find_raw_tensor.register(RawTensor)
def _(x):
    return x


@find_raw_tensor.register(TensorWrapperBase)
def _(x):
    x = getattr(x, "__wrapped__", None)
    if x is not None:
        return find_raw_tensor(x)


@find_raw_tensor.register(Tensor)
def _(x):
    x = getattr(x, "_data", None)
    if x is not None:
        return find_raw_tensor(x)