diff --git a/imperative/python/megengine/jit/tracing.py b/imperative/python/megengine/jit/tracing.py index f24bfd74..c1662fbf 100644 --- a/imperative/python/megengine/jit/tracing.py +++ b/imperative/python/megengine/jit/tracing.py @@ -725,6 +725,12 @@ class trace: raise RuntimeError("trace is not set with profiling=True") return json.loads(self._profiler.get()) + def trace(self, *args, **kwargs): + raise NotImplementedError( + "trace is deemed unbeneficial with the new " + "tracing mechanism. You should alwasy use __call__." + ) + class CompiledTensorProxy(RawTensor): """ diff --git a/imperative/python/megengine/module/module.py b/imperative/python/megengine/module/module.py index 9b1dbc28..bf87be9d 100644 --- a/imperative/python/megengine/module/module.py +++ b/imperative/python/megengine/module/module.py @@ -174,7 +174,11 @@ class Module(metaclass=ABCMeta): if "requires_grad" in kwargs: del kwargs["requires_grad"] - warnings.warn("passing requires_grad has no effect currently") + warnings.warn( + "Tensor currently has no requires_grad attribute " + "so requires_grad argument is ignored here", + DeprecationWarning, + ) def predicate(obj) -> bool: return _is_parameter(obj) @@ -197,7 +201,11 @@ class Module(metaclass=ABCMeta): if "requires_grad" in kwargs: del kwargs["requires_grad"] - warnings.warn("passing requires_grad has no effect currently") + warnings.warn( + "Tensor currently has no requires_grad attribute " + "so requires_grad argument is ignored here", + DeprecationWarning, + ) def predicate(obj) -> bool: return _is_parameter(obj) @@ -339,6 +347,7 @@ class Module(metaclass=ABCMeta): self.apply(fn) + @deprecated(version="1.0") def replace_param( self, params: dict, start_pos: int, seen: Optional[Set[int]] = None ): diff --git a/imperative/python/megengine/optimizer/optimizer.py b/imperative/python/megengine/optimizer/optimizer.py index 90a51eeb..cf869cdf 100644 --- a/imperative/python/megengine/optimizer/optimizer.py +++ b/imperative/python/megengine/optimizer/optimizer.py @@ -16,6 +16,7 @@ from typing import Union import numpy as np from ..tensor import Parameter, Tensor +from ..utils.deprecation import deprecated class _RequiredParameter: @@ -149,8 +150,15 @@ class Optimizer(metaclass=ABCMeta): self._updates(group) return self + @deprecated(version="1.0", reason="use clear_grad instead") + def zero_grad(self): + for param_group in self.param_groups: + for param in param_group["params"]: + if param.grad is not None: + param.grad.reset_zero() + def clear_grad(self): - r"""Clear the grad buffer. + r"""Set the grad attribute to None for all parameters. """ for param_group in self.param_groups: @@ -224,3 +232,9 @@ class Optimizer(metaclass=ABCMeta): "loaded state dict contains a state that doesn't match " "the size of optimizer's state" ) + + def backward(self, loss): + raise NotImplementedError("use autodiff.GradManager instead") + + def bcast_param(self): + raise NotImplementedError("use distributed.bcast_list_ instead") diff --git a/imperative/python/megengine/tensor.py b/imperative/python/megengine/tensor.py index 726236e3..a8eae821 100644 --- a/imperative/python/megengine/tensor.py +++ b/imperative/python/megengine/tensor.py @@ -13,6 +13,7 @@ import collections from .core import Tensor as _Tensor from .core.ops.builtin import Copy from .core.tensor.core import apply +from .core.tensor.raw_tensor import as_device from .device import get_default_device from .utils.deprecation import deprecated @@ -35,7 +36,8 @@ class Tensor(_Tensor): def reset_zero(self): self *= 0 - def to(self, cn): + def to(self, device): + cn = as_device(device).to_c() return apply(Copy(comp_node=cn), self)[0] @property