提交 4ab259f5 编写于 作者: M Megvii Engine Team

refactor(mge): add some deprecation and migration guides

GitOrigin-RevId: 4793adf02bc188e6b745952a01c497391854b291
上级 715009b5
...@@ -725,6 +725,12 @@ class trace: ...@@ -725,6 +725,12 @@ class trace:
raise RuntimeError("trace is not set with profiling=True") raise RuntimeError("trace is not set with profiling=True")
return json.loads(self._profiler.get()) return json.loads(self._profiler.get())
def trace(self, *args, **kwargs):
raise NotImplementedError(
"trace is deemed unbeneficial with the new "
"tracing mechanism. You should alwasy use __call__."
)
class CompiledTensorProxy(RawTensor): class CompiledTensorProxy(RawTensor):
""" """
......
...@@ -174,7 +174,11 @@ class Module(metaclass=ABCMeta): ...@@ -174,7 +174,11 @@ class Module(metaclass=ABCMeta):
if "requires_grad" in kwargs: if "requires_grad" in kwargs:
del kwargs["requires_grad"] del kwargs["requires_grad"]
warnings.warn("passing requires_grad has no effect currently") warnings.warn(
"Tensor currently has no requires_grad attribute "
"so requires_grad argument is ignored here",
DeprecationWarning,
)
def predicate(obj) -> bool: def predicate(obj) -> bool:
return _is_parameter(obj) return _is_parameter(obj)
...@@ -197,7 +201,11 @@ class Module(metaclass=ABCMeta): ...@@ -197,7 +201,11 @@ class Module(metaclass=ABCMeta):
if "requires_grad" in kwargs: if "requires_grad" in kwargs:
del kwargs["requires_grad"] del kwargs["requires_grad"]
warnings.warn("passing requires_grad has no effect currently") warnings.warn(
"Tensor currently has no requires_grad attribute "
"so requires_grad argument is ignored here",
DeprecationWarning,
)
def predicate(obj) -> bool: def predicate(obj) -> bool:
return _is_parameter(obj) return _is_parameter(obj)
...@@ -339,6 +347,7 @@ class Module(metaclass=ABCMeta): ...@@ -339,6 +347,7 @@ class Module(metaclass=ABCMeta):
self.apply(fn) self.apply(fn)
@deprecated(version="1.0")
def replace_param( def replace_param(
self, params: dict, start_pos: int, seen: Optional[Set[int]] = None self, params: dict, start_pos: int, seen: Optional[Set[int]] = None
): ):
......
...@@ -16,6 +16,7 @@ from typing import Union ...@@ -16,6 +16,7 @@ from typing import Union
import numpy as np import numpy as np
from ..tensor import Parameter, Tensor from ..tensor import Parameter, Tensor
from ..utils.deprecation import deprecated
class _RequiredParameter: class _RequiredParameter:
...@@ -149,8 +150,15 @@ class Optimizer(metaclass=ABCMeta): ...@@ -149,8 +150,15 @@ class Optimizer(metaclass=ABCMeta):
self._updates(group) self._updates(group)
return self return self
@deprecated(version="1.0", reason="use clear_grad instead")
def zero_grad(self):
for param_group in self.param_groups:
for param in param_group["params"]:
if param.grad is not None:
param.grad.reset_zero()
def clear_grad(self): def clear_grad(self):
r"""Clear the grad buffer. r"""Set the grad attribute to None for all parameters.
""" """
for param_group in self.param_groups: for param_group in self.param_groups:
...@@ -224,3 +232,9 @@ class Optimizer(metaclass=ABCMeta): ...@@ -224,3 +232,9 @@ class Optimizer(metaclass=ABCMeta):
"loaded state dict contains a state that doesn't match " "loaded state dict contains a state that doesn't match "
"the size of optimizer's state" "the size of optimizer's state"
) )
def backward(self, loss):
raise NotImplementedError("use autodiff.GradManager instead")
def bcast_param(self):
raise NotImplementedError("use distributed.bcast_list_ instead")
...@@ -13,6 +13,7 @@ import collections ...@@ -13,6 +13,7 @@ import collections
from .core import Tensor as _Tensor from .core import Tensor as _Tensor
from .core.ops.builtin import Copy from .core.ops.builtin import Copy
from .core.tensor.core import apply from .core.tensor.core import apply
from .core.tensor.raw_tensor import as_device
from .device import get_default_device from .device import get_default_device
from .utils.deprecation import deprecated from .utils.deprecation import deprecated
...@@ -35,7 +36,8 @@ class Tensor(_Tensor): ...@@ -35,7 +36,8 @@ class Tensor(_Tensor):
def reset_zero(self): def reset_zero(self):
self *= 0 self *= 0
def to(self, cn): def to(self, device):
cn = as_device(device).to_c()
return apply(Copy(comp_node=cn), self)[0] return apply(Copy(comp_node=cn), self)[0]
@property @property
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册