tensor.cpp 31.9 KB
Newer Older
1 2 3 4
/**
 * \file imperative/python/src/tensor.cpp
 * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
 *
5
 * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
6 7 8 9 10 11
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 */

12 13
#include "megbrain/dtype.h"
#include "megbrain/common.h"
14
#include "megbrain/imperative/ops/utility.h"
15

16 17
#include "./tensor.h"
#include "./grad.h"
18
#include "./trace.h"
19 20
#include "./common.h"
#include "./numpy_dtypes.h"
21
#include "./graph_rt.h"
22
#include "./helper.h"
23 24 25

#include <pybind11/numpy.h>
#include <pybind11/operators.h>
26
#include <range/v3/all.hpp>
27
#include <string>
28 29 30

#include <unordered_map>

31
namespace py = pybind11;
32
namespace views = ranges::views;
33 34 35

namespace mgb::imperative::python {

36
interpreter::Interpreter::Channel* interpreter_for_py;
37

38 39
PyObject *cpp_apply_with_tracing, *cpp_apply_const_with_tracing,
           *cpp_apply_compiled_mode, *cpp_apply_const_compiled_mode;
40

41
PyObject *cpp_apply_backward_varnode;
42

43

44 45
#define REGISTE_APPLY_FUNC(mode)                                    \
        void set_##mode(py::object pyf) {                           \
46
            mode = pyf.ptr();                                       \
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
        }

REGISTE_APPLY_FUNC(cpp_apply_with_tracing)
REGISTE_APPLY_FUNC(cpp_apply_const_with_tracing)
REGISTE_APPLY_FUNC(cpp_apply_compiled_mode)
REGISTE_APPLY_FUNC(cpp_apply_const_compiled_mode)
REGISTE_APPLY_FUNC(cpp_apply_backward_varnode)

#undef REGISTE_APPLY_FUNC

bool is_tracing = false;
bool is_compiled = false;

#define SET_UNSET_PROP(mode)    \
    void set_##mode() {         \
        is_##mode = true;       \
    }                           \
    void unset_##mode() {       \
        is_##mode = false;      \
    }                           \

SET_UNSET_PROP(tracing)
SET_UNSET_PROP(compiled)

#undef SET_UNSET_PROP

bool skip_tracing = false;

75 76
Tensor::flags_t ApplyContext::global_disable = 0;

77 78 79 80
apply_result_t apply(ApplyContext& ctx) {
    // emulating scalar should be put to specific op's apply, e.g.,
    // elementwise, reduce, typecvt. Currently it's still handled at python
    // side. It could be move to C++ side if it has an impact on performance
81 82 83
    auto flags = ctx.flags & ~ApplyContext::global_disable;

    if (flags & Tensor::Flags::SCALAR) {
84 85 86
        // TODO: emulate scalar
    }

87
    if (flags & Tensor::Flags::GRAD) {
88 89 90
        return apply_grad(ctx);
    }

91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111
    if (auto* op = ctx.op->try_cast_final<GenericPyOp>()) {
        py::tuple pyin(ctx.nargs);
        for (size_t i = 0; i < ctx.nargs; ++i) {
            pyin[i] = TensorWrapper::make(ctx.pytype, ctx.args[i]->shared_from_this());
        }
        auto f = py::getattr(op->obj, "_default_rule");
        auto pyout = py::reinterpret_steal<py::object>(PyObject_Call(f.ptr(), pyin.ptr(), nullptr));
        if (!pyout) throw py::error_already_set();
        if (auto* tw = TensorWrapper::try_cast(pyout.ptr())) {
            return {tw->m_tensor};
        }
        apply_result_t ret;
        ret.reserve(py::len(pyout));
        for (auto&& i : pyout) {
            auto* tw = TensorWrapper::try_cast(i.ptr());
            mgb_assert(tw);
            ret.push_back(tw->m_tensor);
        }
        return ret;
    }

112
    if (flags & Tensor::Flags::TRACE) {
113
        return apply_trace(ctx);
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
    } else {
        SmallVector<interpreter::Interpreter::Handle> handles(ctx.nargs);
        for (size_t i = 0; i < ctx.nargs; ++i) {
            handles[i] = ctx.args[i]->m_handle.get();
        }

        auto output_handles = interpreter_for_py->apply_op(ctx.op, handles);

        apply_result_t outputs;
        outputs.reserve(output_handles.size());
        for (auto h : output_handles) {
            outputs.emplace_back(std::make_shared<Tensor>(h));
        }
        return outputs;
    }

    mgb_assert(0);
}

PyObject* py_apply(PyObject* self, PyObject*const* args, size_t nargs/* , PyObject* kwnames */) {
    try {
        // if (kwnames && PyTuple_GET_SIZE(kwnames)) {
        //     PyErr_SetString(PyExc_TypeError, "keyword argument not allowed");
        //     return nullptr;
        // }
139 140 141 142
        if (nargs < 2) {
            PyErr_SetString(PyExc_TypeError,
                            "py_apply expects one Op and at least one tensor "
                            "as argument");
143 144
            return nullptr;
        }
145

146 147 148 149 150 151 152 153 154 155 156 157
        auto* op = args[0];

        PyTypeObject* pytype = args[1]->ob_type;
        ++args;
        --nargs;

        ApplyContext ctx;
        ctx.flags = 0;
        ctx.op = py::handle(op).cast<std::shared_ptr<OpDef>>();
        SmallVector<Tensor*, 64> tensors(nargs);
        ctx.args = &tensors[0];
        ctx.nargs = nargs;
158
        ctx.pytype = pytype;
159 160 161
        if (strstr(op->ob_type->tp_name, "BackwardGraph")) {
            ctx.backward = true;
        }
162 163

        for (size_t i = 0; i < nargs; ++i) {
164
            if (TensorWrapper* tw = TensorWrapper::try_cast(args[i])) {
165 166 167
                auto* t = tensors[i] = tw->m_tensor.get();
                ctx.flags |= t->m_flags;
            } else {
168 169 170 171 172
                PyErr_SetString(PyExc_TypeError, "expect Tensor");
                return nullptr;
            }
        }

173 174 175
        if (is_tracing) {
            ctx.flags |= Tensor::Flags::TRACE;
        }
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199

        auto outputs = apply(ctx);
        size_t nout = outputs.size();
        auto ret = py::tuple(nout);
        for (size_t i = 0; i < nout; ++i) {
            ret[i] = TensorWrapper::make(pytype, std::move(outputs[i]));
        }
        return ret.release().ptr();
    } catch (std::exception& e) {
        PyErr_SetString(PyExc_RuntimeError, e.what());
        return nullptr;
    }
}


TensorWrapper::TensorWrapper(PyObject* args, PyObject* kwargs) {
    if (kwargs && PyDict_Size(kwargs)) {
        throw py::type_error("keyword argument not allowed");
    }
    auto nargs = PyTuple_Size(args);
    auto tup = py::reinterpret_borrow<py::tuple>(args);
    if (nargs == 0) {
        throw py::type_error("too few arguments");
    }
200
    if (auto* t = try_cast(tup[0].ptr())) {
201 202 203 204 205
        if (nargs > 1) {
            throw py::type_error("expect 1 argument");
        }
        m_tensor = t->m_tensor;
    } else {
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
        if (nargs == 1) {
            auto arg0 = PyTuple_GetItem(args, 0);
            // for lazy_eval_tensor
            if (strstr(arg0->ob_type->tp_name, "VarNode")) {
                if (PyObject_HasAttrString(arg0, "_node")) {
                    arg0 = PyObject_GetAttrString(arg0, "_node");
                }
                m_tensor = std::make_shared<Tensor>(py::handle(arg0).cast<cg::VarNode *>());
            } else {
                // for DeviceTensorND
                if (strstr(arg0->ob_type->tp_name, "DeviceTensorND")) {
                    auto dv = py::handle(arg0).cast<DeviceTensorND>();
                    interpreter::Interpreter::Handle handle = interpreter_for_py->put(dv);
                    m_tensor = std::make_shared<Tensor>(handle);
                } else {
                    throw py::type_error("single argument is not tensor, varnode or devicetensor");
                }
            }
224
        } else {
225
            py::detail::loader_life_support life_sup; // FIXME!!!required to cast DType
226 227
            if (nargs != 5 && nargs != 6) {
                throw py::type_error("expect 5 or 6 arguments");
228
            }
229 230 231 232
            auto data = tup[0].cast<py::array>();
            DType dtype = tup[1].cast<DType>();
            CompNode cn = tup[2].cast<CompNode>();
            bool is_const = tup[3].cast<bool>();
233 234
            bool no_cache = nargs == 6 ? tup[4].cast<bool>() : false;
            std::string name = tup[nargs - 1].cast<std::string>();
235 236 237

            // const op
            if (is_const && is_tracing) {
238
                PyObject *pyf;
239 240 241 242 243 244
                if (is_compiled) {
                    pyf = cpp_apply_const_compiled_mode;
                } else {
                    pyf = cpp_apply_const_with_tracing;
                }

245 246 247 248
                auto py_ret = PyObject_Call(pyf, tup.ptr(), nullptr);
                if (!py_ret) throw py::error_already_set();
                auto py_list = py::reinterpret_steal<py::list>(py_ret);
                if (auto* t = try_cast(py_list[0].ptr())) {
249 250 251 252 253 254 255 256
                    m_tensor = t->m_tensor;
                }
                return;
            }

            interpreter::Interpreter::Handle handle;
            constexpr auto size_threshhold = TensorShape::MAX_NDIM;
            if (data.size() > size_threshhold) {
257
                handle = interpreter_for_py->put(npy::np2tensor(data.ptr(), npy::Meth::borrow(cn), dtype), no_cache);
258 259
            } else {
                HostTensorND ret(cn);
260
                handle = interpreter_for_py->put(npy::np2tensor(data.ptr(), npy::Meth::copy_into(&ret), dtype), no_cache);
261 262 263
            }

            m_tensor = std::make_shared<Tensor>(handle);
264
            m_tensor->user_custom_name = name;
265

266 267 268
            if (data.ndim() == 0) {
                m_tensor->m_flags |= Tensor::Flags::SCALAR;
            }
269 270 271 272 273
        }
    }
}


274 275 276 277 278 279 280 281 282 283 284
#define REGISTE_TENSORWRAPPER_FUNC(type, member)                                    \
        PyObject* TensorWrapper::member() {                                         \
            return py::cast(m_tensor->m_trace_info.member).release().ptr();         \
        }                                                                           \
        void TensorWrapper::set_##member(PyObject* dest) {                          \
            auto py_dest = py::reinterpret_borrow<py::object>(dest);                \
            type real_dest = py_dest.cast<type>();                                  \
            m_tensor->m_trace_info.member = real_dest;                              \
        }

REGISTE_TENSORWRAPPER_FUNC(int64_t, mixin_handle)
285
REGISTE_TENSORWRAPPER_FUNC(bool, recording)
286 287 288 289

#undef REGISTE_TENSORWRAPPER_FUNC


290 291 292 293 294
PyObject* TensorWrapper::copied() {
    return py::cast(m_tensor->m_trace_info.copied).release().ptr();
}


295 296
#define REGISTE_TENSORWRAPPER_PYOBJECT_FUNC(member)                                 \
        PyObject* TensorWrapper::member() {                                         \
297 298 299 300 301
            if (m_tensor->m_trace_info.member) {                                    \
                return m_tensor->m_trace_info.member;                               \
            } else {                                                                \
                Py_RETURN_NONE;                                                     \
            }                                                                       \
302 303
        }                                                                           \
        void TensorWrapper::set_##member(PyObject* dest) {                          \
304 305 306 307 308 309 310
            if (dest == Py_None) {                                                  \
                Py_XDECREF(m_tensor->m_trace_info.member);                          \
                m_tensor->m_trace_info.member = nullptr;                            \
            } else {                                                                \
                Py_INCREF(dest);                                                    \
                m_tensor->m_trace_info.member = dest;                               \
            }                                                                       \
311 312 313 314 315 316 317 318
        }

REGISTE_TENSORWRAPPER_PYOBJECT_FUNC(compiled_info)
REGISTE_TENSORWRAPPER_PYOBJECT_FUNC(trace_mixin_info)

#undef REGISTE_TENSORWRAPPER_PYOBJECT_FUNC


319 320 321 322 323 324 325 326 327 328 329 330 331
#define SET_GET_NAME(member)                                     \
    PyObject* TensorWrapper::member() {                          \
        return py::cast(m_tensor->member).release().ptr();       \
    }                                                            \
    void TensorWrapper::set_##member(PyObject* dest) {           \
        auto py_dest = py::reinterpret_borrow<py::object>(dest); \
        m_tensor->member = py_dest.cast<std::string>();          \
    }
SET_GET_NAME(user_custom_name)
SET_GET_NAME(automatic_name)
#undef SET_GET_NAME


332 333 334 335 336 337 338 339 340 341 342 343
PyObject* TensorWrapper::handle() {
    return py::cast(m_tensor->m_handle).release().ptr();
}


void TensorWrapper::set_handle(PyObject* dest) {
    auto py_dest = py::reinterpret_borrow<py::object>(dest);
    SharedHandle real_dest = py_dest.cast<SharedHandle>();
    m_tensor->m_handle = std::move(real_dest);
}


344
PyObject* TensorWrapper::shape() {
345
    // if it's tracing compiled mode, get value from compiled_info 
346 347 348 349
    if (m_tensor->m_trace_info.compiled_info != nullptr) {
        if (m_tensor->m_flags & Tensor::Flags::SCALAR) {
            return PyTuple_New(0);
        }
350 351 352 353 354
        PyObject *shp = PyObject_GetAttrString(m_tensor->m_trace_info.compiled_info, "shape");
        if (shp == Py_None) {
            throw TraceReadError("shape of this tensor is not read in trace");
        }
        return shp;
355
    }
356 357

    // inside trace, if tensor shape is useful for other operations, set shape_read = true
358 359
    if (m_tensor->m_trace_info.recording && !skip_tracing) {
        PyObject_SetAttrString(m_tensor->m_trace_info.trace_mixin_info, "shape_read", py::cast(true).release().ptr());
360
    }
361

362 363 364
    if (m_tensor->m_flags & Tensor::Flags::SCALAR) {
        return PyTuple_New(0);
    }
365 366

    TensorShape shape;
367
    if (m_tensor->m_var) {      // get shape from m_var
368 369 370 371 372 373
        auto&& mgr = m_tensor->m_var->owner_graph()->static_infer_manager();
        auto *tshp = mgr.infer_shape_fallible(m_tensor->m_var);
        if (!tshp) {
            Py_RETURN_NONE;
        }
        shape = *tshp;
374 375 376 377
    } else {
        shape = m_tensor->shape();
    }

378 379 380 381 382 383 384 385 386 387 388 389
    if (!shape.ndim) {
        Py_RETURN_NONE;
    }
    py::tuple ret(shape.ndim);
    for (size_t i = 0; i < shape.ndim; ++i) {
        ret[i] = shape[i];
    }
    return ret.release().ptr();
}


PyObject* TensorWrapper::dtype() {
390 391 392
    if (m_tensor->m_var) {
        return py::cast(m_tensor->m_var->dtype()).release().ptr();
    }
393 394 395 396 397
    return py::cast(m_tensor->dtype()).release().ptr();
}


PyObject* TensorWrapper::device() {
398 399 400
    if (m_tensor->m_var) {
        return py::cast(m_tensor->m_var->comp_node()).release().ptr();
    }
401 402 403 404 405
    return py::cast(m_tensor->comp_node()).release().ptr();
}


PyObject* TensorWrapper::numpy() {
406 407
    if (m_tensor->m_trace_info.compiled_info != nullptr) {
        PyObject* np_val = PyObject_CallMethod(m_tensor->m_trace_info.compiled_info, "numpy", nullptr);
408
        if (!np_val) throw py::error_already_set();
409 410 411
        if (np_val == Py_None) {
            throw TraceReadError("value of this tensor is not read in trace");
        }
412
        if (m_tensor->m_flags & Tensor::Flags::SCALAR) {
413 414 415
            PyObject *np_scalar = PyArray_Squeeze(reinterpret_cast<PyArrayObject*>(np_val));
            Py_DECREF(np_val);
            return np_scalar;
416 417 418
        }
        return np_val;
    }
419

420 421
    if (m_tensor->m_trace_info.recording && !skip_tracing) {
        PyObject_SetAttrString(m_tensor->m_trace_info.trace_mixin_info, "value_read", py::cast(true).release().ptr());
422
    }
423

424 425 426 427 428
    if (m_tensor->m_handle.get() == nullptr && m_tensor->m_var != nullptr) {
        auto&& mgr = m_tensor->m_var->owner_graph()->static_infer_manager();
        auto&& type = mgr.get_infer_type(m_tensor->m_var);
        using InferType = cg::static_infer::InferType;
        if (!(type.value & (InferType::CONST | InferType::RT_STATIC))) {
429
            PyErr_SetString(PyExc_ValueError, "tensor invalid");
430 431 432 433
            return nullptr;
        }
        auto* val = mgr.infer_value_fallible(m_tensor->m_var);
        if (!val) {
434
            PyErr_SetString(PyExc_ValueError, "tensor invalid");
435 436
            return nullptr;
        }
437 438 439 440 441
        auto np_val = py::cast(*val).attr("numpy")();
        if (m_tensor->m_flags & Tensor::Flags::SCALAR) {
            return PyArray_Squeeze(reinterpret_cast<PyArrayObject*>(np_val.release().ptr()));
        }
        return np_val.release().ptr();
442
    }
443 444 445 446
    auto&& hv = [&]() {
        py::gil_scoped_release _;
        return interpreter_for_py->get_value(m_tensor->m_handle.get());
    }();
447
    auto arr = py::reinterpret_steal<py::array>(npy::ndarray_from_tensor(hv, npy::ShareType::TRY_SHARE));
448 449 450 451
    if (!arr) {
        PyErr_SetString(PyExc_ValueError, "tensor invalid");
        return nullptr;
    }
452

453 454 455 456 457 458 459
    if (m_tensor->m_flags & Tensor::Flags::SCALAR) {
        mgb_assert(PyArray_Check(arr.ptr()));
        return PyArray_Squeeze(reinterpret_cast<PyArrayObject*>(arr.ptr()));
    }
    return arr.release().ptr();
}

460 461 462 463
PyObject* TensorWrapper::varnode() {
    if (m_tensor->m_var) {
        return py::cast(m_tensor->m_var).release().ptr();
    }
464
    Py_RETURN_NONE;
465 466
}

467
void TensorWrapper::reset(PyObject* tensor) {
468
    TensorWrapper* t = TensorWrapper::try_cast(tensor);
469 470 471
    if (!t) {
        throw py::type_error("expect Tensor");
    }
472 473
    std::string user_custom_name = m_tensor->user_custom_name;
    std::string automatic_name = m_tensor->automatic_name;
474
    m_tensor = t->m_tensor;
475 476
    m_tensor->user_custom_name = user_custom_name;
    m_tensor->automatic_name = automatic_name;
477 478
}

479 480 481 482
void TensorWrapper::reset_varnode() {
    m_tensor->m_var = nullptr;
}

483 484 485
PyObject* TensorWrapper::detach() {
    PyObject* self = wrap_t::pycast(this);
    PyTypeObject* pytype = self->ob_type;
486 487 488 489 490 491 492

    std::shared_ptr<Tensor> new_tensor;
    if (m_tensor->m_handle.get()) {
        new_tensor = std::make_shared<Tensor>(m_tensor->m_handle);
    } else {
        new_tensor = std::make_shared<Tensor>(m_tensor->m_var);
    }
493
    new_tensor->m_trace_info = m_tensor->m_trace_info;
494 495 496 497 498
    auto ret = TensorWrapper::make(pytype, std::move(new_tensor));
    return ret.release().ptr();

}

499
PyObject* TensorWrapper::_dev_tensor(){
500 501
    if (m_tensor->m_trace_info.compiled_info != nullptr) {
        auto *dev_tensor = PyObject_CallMethod(m_tensor->m_trace_info.compiled_info, "_dev_tensor", nullptr);
502
        if (!dev_tensor) throw py::error_already_set();
503 504 505
        if (dev_tensor == Py_None) {
            throw TraceReadError("raw data of this tensor is not read in trace");
        }
506 507

        // set m_handle to make it a real tensor
508 509 510
        auto py_dev_tensor = py::reinterpret_borrow<py::object>(dev_tensor);
        auto sh = interpreter_for_py->put(py_dev_tensor.cast<DeviceTensorND>());
        m_tensor->m_handle = std::move(SharedHandle(sh));
511 512

        // compiled info is useless after m_handle is set
513 514
        Py_DECREF(m_tensor->m_trace_info.compiled_info);
        m_tensor->m_trace_info.compiled_info = nullptr;
515 516

        return dev_tensor;
517 518 519
    }
    if (m_tensor->m_trace_info.recording && !skip_tracing) {
        PyObject_SetAttrString(m_tensor->m_trace_info.trace_mixin_info, "data_read", py::cast(true).release().ptr());
520
    }
521 522 523 524
    auto dev_tensor = [&](){
        py::gil_scoped_release _;
        return interpreter_for_py->get_dev_tensor(m_tensor->m_handle.get());
    }();
525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540
    return py::cast(dev_tensor).release().ptr();
}

void TensorWrapper::_swap_out() {
    interpreter_for_py->swap_out(m_tensor->m_handle.get());
}

void TensorWrapper::_swap_in() {
    interpreter_for_py->swap_in(m_tensor->m_handle.get());
}

void TensorWrapper::_drop() {
    interpreter_for_py->drop(m_tensor->m_handle.get());
}


541 542 543 544 545 546 547 548
PyObject* TensorWrapper::isscalar() {
    if(m_tensor->m_flags & Tensor::Flags::SCALAR) {
        Py_RETURN_TRUE;
    } else {
        Py_RETURN_FALSE;
    }
}

549

550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565
void TensorWrapper::setscalar() {
    m_tensor->m_flags |= Tensor::Flags::SCALAR;
}


struct TensorWeakRef {
    std::weak_ptr<Tensor> wptr;

    TensorWeakRef(const TensorWrapper& tw) : wptr(tw.m_tensor) {}

    py::object operator()() {
        if (auto p = wptr.lock()) {
            return TensorWrapper::make(p);
        }
        return py::none();
    }
566
    int _use_cnt() { return wptr.use_count(); }
567 568
};

569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
/* ============== convert inputs ============== */

// map numpy.dtype.kind to priority
inline uint8_t category_priority(char c) {
    switch (c) {
        case 'f': return 3; // floating-point
        case 'i': return 2; // signed integer
        case 'u': return 2; // unsigned integer
        case 'b': return 1; // boolean
        default: return 0;
    }
}

// Returns the maximum value of the priority of each type in the list `types`.
uint8_t max_priority(SmallVector<PyArray_Descr*> types) {
    if (types.size() == 0) {
        return 0;
    } else {
        uint8_t max_p = 0;
        for (auto&& desc: types) {
            max_p = std::max(max_p, category_priority(desc->kind));
        }
        return max_p;
    }
}

595
// Returns the data type with sufficient size to hold all types of
596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
// category `cat` in the list `types`.
PyArray_Descr* promote_types(SmallVector<PyArray_Descr*> types, uint8_t cat) {
    // Return value: New reference
    SmallVector<PyArray_Descr*> used_types;
    for (auto&& desc: types) {
        auto&& v = category_priority(desc->kind);
        if (v == cat) {
            used_types.emplace_back(desc);
        }
    }
    mgb_assert(used_types.size() > 0, "size of used_types is 0");
    PyArray_Descr* res = used_types[0];
    Py_INCREF(res);

    for (size_t i = 1; i < used_types.size(); ++i) {
        PyArray_Descr* tmp = PyArray_PromoteTypes(used_types[i], res);
        Py_DECREF(res);
        res = tmp;
    }
    return res;
}

PyArray_Descr* scalar2dtype(PyObject* arg) {
    // Return value: New reference
    if (PyBool_Check(arg)) {
        auto&& descr = PyArray_DescrFromType(NPY_BOOL);
        return descr;
    }
    if (PyLong_CheckExact(arg)) {
        auto&& descr = PyArray_DescrFromType(NPY_INT32);
        return descr;
    }
    if (PyFloat_CheckExact(arg)) {
        auto&& descr = PyArray_DescrFromType(NPY_FLOAT32);
        return descr;
    }
    return nullptr;
}

PyArray_Descr* _dtype_promotion(PyObject*const* args, size_t nargs) {
    // Return value: New reference
    SmallVector<PyArray_Descr*> tensors;
    SmallVector<PyArray_Descr*> scalars;

    bool is_tuple = false;
641
    PyObject* tuple = nullptr;
642 643 644 645 646 647 648 649 650 651 652 653 654 655
    if (nargs == 1 && (PyTuple_Check(args[0]) || PyList_Check(args[0]))) {
        if (PyList_Check(args[0])) {
            tuple = PyList_AsTuple(args[0]);
        } else {
            tuple = args[0];
            Py_INCREF(tuple);
        }
        nargs = PyTuple_Size(tuple);
        is_tuple = true;
    }

    for (size_t i = 0; i < nargs; ++i) {
        PyObject* handle = is_tuple ? PyTuple_GetItem(tuple, i): args[i];
        if (handle == Py_None) continue;
656
        TensorWrapper* tw = TensorWrapper::try_cast(handle);
657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
        if (tw) {
            mgb::DType type = tw->m_tensor->dtype();
            auto&& descr = npy::dtype_mgb2np_descr(type);
            Py_INCREF(descr.get());
            tensors.emplace_back(descr.get());
        }else{
            if (PyArray_Check(handle) || PyArray_CheckScalar(handle)) {
                auto&& descr = PyArray_DescrFromObject(handle, nullptr);
                tensors.emplace_back(descr);
                continue;
            }
            PyArray_Descr* descr = scalar2dtype(handle);
            if (descr) {
                scalars.emplace_back(descr);
                continue;
            }
        }
    }

    auto max_pri_scalars = max_priority(scalars);
    auto max_pri_tensors = max_priority(tensors);

    if (max_pri_scalars <= 0 && max_pri_tensors <= 0) {
        throw py::value_error("invalid input, no dtype avaliable");
    }
    PyArray_Descr* res;
    if (max_pri_scalars > max_pri_tensors) {
        res = promote_types(scalars, max_pri_scalars);
    }else{
        res = promote_types(tensors, max_pri_tensors);
    }
    for (auto *p: tensors) { Py_DECREF(p); }
    for (auto *p: scalars) { Py_DECREF(p); }
690
    Py_XDECREF(tuple);
691 692 693 694 695
    return res;
}

CompNode _get_device(PyObject*const* args, size_t nargs) {
    bool is_tuple = false;
696
    PyObject* tuple = nullptr;
697 698 699 700 701 702 703 704 705 706 707 708 709 710
    if (nargs == 1 && (PyTuple_Check(args[0]) || PyList_Check(args[0]))) {
        if (PyList_Check(args[0])) {
            tuple = PyList_AsTuple(args[0]);
        } else {
            tuple = args[0];
            Py_INCREF(tuple);
        }
        nargs = PyTuple_Size(tuple);
        is_tuple = true;
    }
    bool valid = false;
    CompNode cn;
    for (size_t i = 0; i < nargs; ++i) {
        PyObject* handle = is_tuple ? PyTuple_GetItem(tuple, i): args[i];
711
        TensorWrapper* tw = TensorWrapper::try_cast(handle);
712 713 714 715 716 717 718 719 720 721 722 723 724 725
        if (tw) {
            if (!valid) {
                cn = tw->m_tensor->comp_node();
                valid = true;
            } else {
                CompNode cn1 = tw->m_tensor->comp_node();
                if (cn1 != cn) {
                    throw py::value_error(ssprintf("ambiguous device: %s vs %s",
                        cn.to_string().c_str(), cn1.to_string().c_str()));
                }
            }
        }
    }
    if (!valid) {
726
        mgb_assert(0, "expect at least 1 device");
727
    }
728
    Py_XDECREF(tuple);
729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
    return cn;
}

// Returns the dtype that would result from performing an arithmetic
// operation on the provided input tensors and scalars.
PyObject* dtype_promotion(PyObject* self, PyObject*const* args, size_t nargs) {
    if (!nargs) {
        PyErr_SetString(PyExc_TypeError, "empty input is not allowed");
        return nullptr;
    }
    try {
        PyArray_Descr* res = _dtype_promotion(args, nargs);
        return py::cast(npy::dtype_np2mgb_descr(res)).release().ptr();
    } catch (std::exception& e) {
        PyErr_SetString(PyExc_RuntimeError, e.what());
        return nullptr;
    }
}

PyObject* get_device(PyObject* self, PyObject*const* args, size_t nargs) {
    if (!nargs) {
        PyErr_SetString(PyExc_TypeError, "empty input is not allowed");
        return nullptr;
    }
    try {
        CompNode cn = _get_device(args, nargs);
        return py::cast(cn).release().ptr();
    } catch (std::exception& e) {
        PyErr_SetString(PyExc_RuntimeError, e.what());
        return nullptr;
    }
}
761

762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779
#ifdef METH_FASTCALL
#define MGE_PY_INTERFACE(NAME, FUNC) \
    { #NAME, (PyCFunction)FUNC, METH_FASTCALL, nullptr }
#else
#define WRAP_FUNC_PY35(FUNC)                                \
    PyObject* py35_##FUNC(PyObject* self, PyObject* args) { \
        auto* arr = &PyTuple_GET_ITEM(args, 0);             \
        auto size = PyTuple_GET_SIZE(args);                 \
        return FUNC(self, arr, size);                       \
    }
WRAP_FUNC_PY35(py_apply);
WRAP_FUNC_PY35(dtype_promotion);
WRAP_FUNC_PY35(get_device);
#undef WRAP_FUNC_PY35
#define MGE_PY_INTERFACE(NAME, FUNC) \
    { #NAME, (PyCFunction)py35_##FUNC, METH_VARARGS, nullptr }
#endif

780

781
void init_tensor(py::module m) {
782 783 784
    imperative::Tensor::static_initialize();
    static auto sl_interpreter_for_py = interpreter::Interpreter::inst().create_channel();
    interpreter_for_py = sl_interpreter_for_py.get();
785 786 787 788 789 790 791 792 793

    auto* tensor_type = TensorWrapper::wrap_t::type()
        .def<&TensorWrapper::numpy>("numpy")
        .def_getset<&TensorWrapper::shape>("shape")
        .def_getset<&TensorWrapper::dtype>("dtype")
        .def_getset<&TensorWrapper::device>("device")
        .def<&TensorWrapper::reset>("_reset")
        .def<&TensorWrapper::isscalar>("isscalar")
        .def<&TensorWrapper::setscalar>("setscalar")
794
        .def<&TensorWrapper::detach>("detach")
795 796 797 798
        .def<&TensorWrapper::_dev_tensor>("_dev_tensor")
        .def<&TensorWrapper::_swap_out>("_swap_out")
        .def<&TensorWrapper::_swap_in>("_swap_in")
        .def<&TensorWrapper::_drop>("_drop")
799
        .def<&TensorWrapper::reset_varnode>("_reset_varnode")
800
        .def<&TensorWrapper::_use_cnt>("_use_cnt")
801
        .def_getset<&TensorWrapper::varnode>("_varnode")
802
        .def_getset<&TensorWrapper::copied>("_copied")
803 804
        .def_getset<&TensorWrapper::mixin_handle, &TensorWrapper::set_mixin_handle>("_mixin_handle")
        .def_getset<&TensorWrapper::recording, &TensorWrapper::set_recording>("_recording")
805
        .def_getset<&TensorWrapper::handle, &TensorWrapper::set_handle>("_handle")
806 807
        .def_getset<&TensorWrapper::compiled_info, &TensorWrapper::set_compiled_info>("_compiled_info")
        .def_getset<&TensorWrapper::trace_mixin_info, &TensorWrapper::set_trace_mixin_info>("_trace_mixin_info")
808 809
        .def_getset<&TensorWrapper::user_custom_name, &TensorWrapper::set_user_custom_name>("c_name")
        .def_getset<&TensorWrapper::automatic_name, &TensorWrapper::set_automatic_name>("_name")
810 811 812 813 814 815
        .finalize();
    if (!tensor_type) throw py::error_already_set();
    py::setattr(m, "Tensor", tensor_type);

    py::class_<TensorWeakRef>(m, "TensorWeakRef")
        .def(py::init<const TensorWrapper&>())
816 817
        .def("__call__", &TensorWeakRef::operator())
        .def("_use_cnt", &TensorWeakRef::_use_cnt);
818

819
    static PyMethodDef method_defs[] = {
820 821 822 823
            MGE_PY_INTERFACE(apply, py_apply),
            MGE_PY_INTERFACE(dtype_promotion, dtype_promotion),
            MGE_PY_INTERFACE(get_device, get_device),
            {nullptr, nullptr, 0, nullptr}};
824 825 826 827 828 829 830
    for (auto&& def: method_defs) {
        if (def.ml_meth != nullptr) {
            auto* func = PyCFunction_NewEx(&def, nullptr, nullptr);
            if (!func) throw py::error_already_set();
            py::setattr(m, def.ml_name, func);
        }
    }
831

832 833 834 835
    m.def("set_option",
          [](std::string name, int value){ interpreter_for_py->set_option(name, value); });
    m.def("get_option",
          [](std::string name){ return interpreter_for_py->get_option(name); });
836
    m.def("_set_swap_flag",
837
          [](bool flag) { interpreter_for_py->set_option("enable_swap", flag); });
838
    m.def("_set_drop_flag",
839
          [](bool flag) { interpreter_for_py->set_option("enable_drop", flag); });
840
    m.def("config_async_level",
841 842 843 844
          [](int level) {
              mgb_assert(level >= 0 and level <= 2, "async_level should be 0, 1 or 2");
              interpreter_for_py->set_option("async_level", level);
          });
845
    m.def("get_async_level",
846
          []() { return interpreter_for_py->get_option("async_level"); });
847
    m.def("set_buffer_length",
848 849 850 851 852 853 854 855 856 857 858 859
          [](int length) {
              mgb_assert(length >= 0 and length < 100, "buffer_length should be in [0, 100)");
              interpreter_for_py->set_option("buffer_length", length);
          });
    m.def("push_scope",
          [](std::string name) { interpreter_for_py->push_scope(name); });
    m.def("pop_scope",
          [](std::string name) { interpreter_for_py->pop_scope(name); });
    m.def("start_profile",
          [](std::unordered_map<std::string, int> option) { return interpreter_for_py->start_profile(option); });
    m.def("stop_profile",
          [](std::string basename, std::string format) { interpreter_for_py->stop_profile(basename, format); });
860 861 862 863 864 865
    m.def("sync",
          []() {
              interpreter_for_py->sync();
              py_task_q.wait_all_task_finish();
          },
          py::call_guard<py::gil_scoped_release>());
866 867 868 869 870 871 872
    m.def("full_sync",
          []() {
              interpreter_for_py->sync();
              CompNode::sync_all();
              py_task_q.wait_all_task_finish();
          },
          py::call_guard<py::gil_scoped_release>());
873

874 875
    py::handle grad_key_type = GradKeyWrapper::wrap_t::type()
        .def<&GradKeyWrapper::attach>("attach")
876 877
        .def<&GradKeyWrapper::is_attached_to>("is_attached_to")
        .def_getset<&GradKeyWrapper::get_name, &GradKeyWrapper::set_name>("name")
878 879 880
        .finalize();
    if (!grad_key_type) throw py::error_already_set();
    py::setattr(m, "GradKey", grad_key_type);
881 882
    m.def("backward", &GradKeyWrapper::backward);

883 884 885 886 887 888 889 890 891
    m.def("set_cpp_apply_with_tracing", &set_cpp_apply_with_tracing);
    m.def("set_cpp_apply_const_with_tracing", &set_cpp_apply_const_with_tracing);
    m.def("set_cpp_apply_compiled_mode", &set_cpp_apply_compiled_mode);
    m.def("set_cpp_apply_const_compiled_mode", &set_cpp_apply_const_compiled_mode);
    m.def("set_cpp_apply_backward_varnode", &set_cpp_apply_backward_varnode);

    m.attr("skip_tracing") = &skip_tracing;

    py::class_<SharedHandle>(m, "SharedHandle")
892 893 894 895 896 897 898 899
        .def(py::init<const SharedHandle&>())
        .def("__eq__", [](SharedHandle &thish, SharedHandle &thath) {
            return (thish.get() == thath.get());
        })
        .def("__hash__", [](SharedHandle &sh) {
            return reinterpret_cast<int64_t>(sh.get());
        })
        ;
900 901 902 903 904

    m.def("set_tracing", &set_tracing);
    m.def("unset_tracing", &unset_tracing);
    m.def("set_compiled", &set_compiled);
    m.def("unset_compiled", &unset_compiled);
905 906
}

907 908
#undef MGE_PY_INTERFACE

909
} // namespace mgb::imperative::python