diff --git a/imperative/python/megengine/autodiff/grad_manager.py b/imperative/python/megengine/autodiff/grad_manager.py index db03f235..0cd44b38 100644 --- a/imperative/python/megengine/autodiff/grad_manager.py +++ b/imperative/python/megengine/autodiff/grad_manager.py @@ -115,14 +115,6 @@ class GradManager: else: logger.warning("params with index {} is not attached.".format(idx)) - def clear_grad(self): - r""" - For advanced usage: set the grad attribute to None for registered parameters. - It could be more convenient when there is more than one Optimizer. - """ - for param in self._param_dict.values(): - param.grad = None - def _register_after_backward_callback(self, callback): self._after_backward_callback.append(callback) return self diff --git a/imperative/python/test/unit/autodiff/test_grad_manger.py b/imperative/python/test/unit/autodiff/test_grad_manger.py index f47e618b..947fa520 100644 --- a/imperative/python/test/unit/autodiff/test_grad_manger.py +++ b/imperative/python/test/unit/autodiff/test_grad_manger.py @@ -37,7 +37,8 @@ def test_basic(): np.testing.assert_equal(w.grad.numpy(), [[1], [3], [5]]) np.testing.assert_equal(b.grad.numpy(), [1]) - gm.clear_grad() + w.grad = None + b.grad = None with gm: p = F.matmul(x, w) y = p + b