diff --git a/imperative/python/megengine/__init__.py b/imperative/python/megengine/__init__.py index 5d86550bdfbb5ecc01fdb0d74e875ed62d9c72e5..60311783a3e3e10a0fd4bdd47317805f57944720 100644 --- a/imperative/python/megengine/__init__.py +++ b/imperative/python/megengine/__init__.py @@ -72,7 +72,7 @@ if sys.platform == "win32": kernel32.SetErrorMode(old_error_mode) from .core._imperative_rt.utils import _set_fork_exec_path_for_timed_func -from .core._imperative_rt.imperative import sync +from .core._imperative_rt.core2 import sync from .device import * from .logger import enable_debug_log, get_logger, set_log_file, set_log_level from .serialization import load, save diff --git a/imperative/python/megengine/core/tensor/utils.py b/imperative/python/megengine/core/tensor/utils.py index 0eed59ed1146ec061f4facfa1525c88ca9dac56d..52ef77318e559b7c5f75d3fceff3d42b827b1e6e 100644 --- a/imperative/python/megengine/core/tensor/utils.py +++ b/imperative/python/megengine/core/tensor/utils.py @@ -14,8 +14,8 @@ import numpy as np from .._imperative_rt.core2 import Tensor, apply from ..ops import builtin from ..ops.special import Const -from ..tensor.core import OpBase, TensorBase, TensorWrapperBase from .dtype import is_equal, is_quantize +from .megbrain_graph import VarNode _enable_convert_inputs = True @@ -110,7 +110,7 @@ def dtype_promotion(inputs): def get_device(inputs): device = None for i in inputs: - if isinstance(i, Tensor): + if isinstance(i, (Tensor, VarNode)): if device is None: device = i.device elif device != i.device: @@ -142,9 +142,9 @@ def astype(x, dtype): def convert_single_value(v, inputs, *, dtype=None, device=None): - tensors = [i for i in inputs if isinstance(i, Tensor)] + tensors = [i for i in inputs if isinstance(i, (Tensor, VarNode))] assert len(tensors) > 0 - if isinstance(v, (TensorWrapperBase, Tensor)): + if isinstance(v, (Tensor, VarNode)): v = astype(v, v.dtype if is_quantize(v.dtype) else dtype) else: (v,) = Const(v, dtype=dtype, device=device)(*tensors) diff --git a/imperative/python/megengine/functional/tensor.py b/imperative/python/megengine/functional/tensor.py index 081fef85cc9bb60aa0ec8db5d036765bb157d79a..69e0b33e0e07e89f2282d153b7222c703a5ae694 100644 --- a/imperative/python/megengine/functional/tensor.py +++ b/imperative/python/megengine/functional/tensor.py @@ -905,7 +905,6 @@ def linspace( stop = Tensor(stop, device=device) num = Tensor(num, device=device) - device = device if device is None else device.to_c() op = builtin.Linspace(comp_node=device) (result,) = apply(op, start, stop, num) if np.dtype(dtype) == np.int32: diff --git a/imperative/python/megengine/tensor.py b/imperative/python/megengine/tensor.py index ae66437d380bd763001bcab74e723fb9016e71e1..9f63b68805dfd073dea12b30a17b189dc7559c80 100644 --- a/imperative/python/megengine/tensor.py +++ b/imperative/python/megengine/tensor.py @@ -119,7 +119,6 @@ class Tensor(_Tensor, ArrayMethodMixin): self.q_dict = state.pop("qdict") - tensor = Tensor diff --git a/imperative/python/src/tensor.cpp b/imperative/python/src/tensor.cpp index b123f73e6f5dabc464e10b767d7e81d45270d005..15582d06264e6d81736b5dd002f992a90079aa8a 100644 --- a/imperative/python/src/tensor.cpp +++ b/imperative/python/src/tensor.cpp @@ -16,7 +16,7 @@ #include #include - +#include "./helper.h" namespace py = pybind11; namespace mgb::imperative::python { @@ -201,6 +201,24 @@ PyObject* TensorWrapper::detach() { } +PyObject* TensorWrapper::_dev_tensor(){ + auto dev_tensor = interpreter_for_py->get_dev_tensor(m_tensor->m_handle.get()); + return py::cast(dev_tensor).release().ptr(); +} + +void TensorWrapper::_swap_out() { + interpreter_for_py->swap_out(m_tensor->m_handle.get()); +} + +void TensorWrapper::_swap_in() { + interpreter_for_py->swap_in(m_tensor->m_handle.get()); +} + +void TensorWrapper::_drop() { + interpreter_for_py->drop(m_tensor->m_handle.get()); +} + + PyObject* TensorWrapper::isscalar() { if(m_tensor->m_flags & Tensor::Flags::SCALAR) { Py_RETURN_TRUE; @@ -240,6 +258,10 @@ void init_tensor(py::module m) { .def<&TensorWrapper::isscalar>("isscalar") .def<&TensorWrapper::setscalar>("setscalar") .def<&TensorWrapper::detach>("detach") + .def<&TensorWrapper::_dev_tensor>("_dev_tensor") + .def<&TensorWrapper::_swap_out>("_swap_out") + .def<&TensorWrapper::_swap_in>("_swap_in") + .def<&TensorWrapper::_drop>("_drop") .finalize(); if (!tensor_type) throw py::error_already_set(); py::setattr(m, "Tensor", tensor_type); @@ -253,6 +275,21 @@ void init_tensor(py::module m) { if (!apply_func) throw py::error_already_set(); py::setattr(m, "apply", apply_func); + m.def("_set_swap_flag", + [](bool flag) { interpreter_for_py->set_swap_flag(flag); }); + m.def("_set_drop_flag", + [](bool flag) { interpreter_for_py->set_drop_flag(flag); }); + m.def("config_async_level", + [](int level) { interpreter_for_py->config_async_level(level); }); + m.def("get_async_level", + []() { return interpreter_for_py->get_async_level(); }); + m.def("sync", + []() { + interpreter_for_py->sync(); + py_task_q.wait_all_task_finish(); + }, + py::call_guard()); + py::handle grad_key_type = GradKeyWrapper::wrap_t::type() .def<&GradKeyWrapper::attach>("attach") .finalize(); diff --git a/imperative/python/src/tensor.h b/imperative/python/src/tensor.h index 26054a76f657134fdabbdd6694fff28c576af65c..f5ab62f4b09c40f743f446bdb35de7b78dd1d00c 100644 --- a/imperative/python/src/tensor.h +++ b/imperative/python/src/tensor.h @@ -131,6 +131,10 @@ struct TensorWrapper { PyObject* detach(); PyObject* isscalar(); void setscalar(); + PyObject* _dev_tensor(); + void _swap_in(); + void _swap_out(); + void _drop(); }; diff --git a/imperative/python/test/integration/test_converge_with_swap_and_drop.py b/imperative/python/test/integration/test_converge_with_swap_and_drop.py index 58628c0e3990a29fafad79758ed2caa0249c069f..3da5ae55340172f4880d1b71022f8d85255a4b96 100644 --- a/imperative/python/test/integration/test_converge_with_swap_and_drop.py +++ b/imperative/python/test/integration/test_converge_with_swap_and_drop.py @@ -15,7 +15,7 @@ import megengine as mge import megengine.autodiff as ad import megengine.functional as F from megengine import Tensor -from megengine.core._imperative_rt.imperative import _set_drop_flag, _set_swap_flag +from megengine.core._imperative_rt.core2 import _set_drop_flag, _set_swap_flag from megengine.module import Linear, Module from megengine.optimizer import SGD diff --git a/imperative/python/test/unit/core/test_async_level.py b/imperative/python/test/unit/core/test_async_level.py index f5a761f130217b0527f2f2a6b7659ebb0d8171b9..72cee0e8d24f9f652671d08f419050c7e70c1d27 100644 --- a/imperative/python/test/unit/core/test_async_level.py +++ b/imperative/python/test/unit/core/test_async_level.py @@ -2,7 +2,7 @@ import pytest import megengine as mge import megengine.functional as F -from megengine.core._imperative_rt.imperative import config_async_level, get_async_level +from megengine.core._imperative_rt.core2 import config_async_level, get_async_level def test_basic(): @@ -12,7 +12,6 @@ def test_basic(): config_async_level(3) -@pytest.mark.skip def test_level1_infer_value(): config_async_level(1) a = mge.tensor([[1, 2], [2, 3], [3, 4]], dtype="float32") @@ -23,7 +22,6 @@ def test_level1_infer_value(): d = F.reshape(a, c) -@pytest.mark.skip def test_level1_infer_shape_with_unknown(): config_async_level(2) a = mge.tensor([[1, 2, 2, 3]], dtype="float32") diff --git a/imperative/python/test/unit/core/test_megbrain_graph.py b/imperative/python/test/unit/core/test_megbrain_graph.py index e79976064d7f44caeac88c733f7973bd67991825..2b3f4699d72de8d98caaca8061048434bcc5c9a2 100644 --- a/imperative/python/test/unit/core/test_megbrain_graph.py +++ b/imperative/python/test/unit/core/test_megbrain_graph.py @@ -11,17 +11,13 @@ from concurrent.futures import Future import numpy as np import megengine.functional as F +import megengine.tensor as Tensor from megengine.core.tensor import megbrain_graph as mgb_graph -from megengine.core.tensor.raw_tensor import as_raw_tensor - - -def make_dev_tensor(value, dtype=None, device=None): - return as_raw_tensor(value, dtype=dtype, device=device)._dev_tensor() def test_io(): g = mgb_graph.Graph() - x = make_dev_tensor(np.random.randn(3).astype("float32"), device="xpux") + x = Tensor(np.random.randn(3).astype("float32"), device="xpux")._dev_tensor() vx, _ = mgb_graph.input_callback( lambda: x, device=x.comp_node, dtype=x.dtype, graph=g ) @@ -43,7 +39,7 @@ def test_io2(): for _ in range(3): f.execute() - x = make_dev_tensor(np.random.randn(10).astype(dtype), device=device) + x = Tensor(np.random.randn(10).astype(dtype), device=device)._dev_tensor() px.set_value(x) y = py.get_value() np.testing.assert_equal(x.numpy(), y.numpy()) @@ -60,7 +56,7 @@ def test_attr_output(): for shape in [(2,), (3,), (5,)]: f.execute() - x = make_dev_tensor(np.random.randn(*shape).astype(dtype), device=device) + x = Tensor(np.random.randn(*shape).astype(dtype), device=device)._dev_tensor() px.set_value(x) ay = py.get_value() assert ay.shape == shape @@ -71,7 +67,7 @@ def test_attr_output(): def test_op(): g = mgb_graph.Graph() - x = make_dev_tensor(np.random.randn(10).astype("float32"), device="xpux") + x = Tensor(np.random.randn(10).astype("float32"), device="xpux")._dev_tensor() v, _ = mgb_graph.input_callback( lambda: x, device=x.comp_node, dtype=x.dtype, graph=g )