You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_sgd_momentum.py 2.3 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172
  1. # -*- coding: utf-8 -*-
  2. import itertools
  3. import os
  4. import numpy as np
  5. import pytest
  6. import megengine
  7. import megengine.autodiff as ad
  8. import megengine.optimizer as optimizer
  9. from megengine import Parameter, tensor
  10. from megengine.jit import trace
  11. from megengine.module import Module
  12. class Simple(Module):
  13. def __init__(self):
  14. super().__init__()
  15. self.a = Parameter([1.23], dtype="float32")
  16. def forward(self, x):
  17. x = x * self.a
  18. return x
  19. @pytest.mark.parametrize("trace_mode", [True, False, None])
  20. @pytest.mark.parametrize("inplace_mode", [True, False])
  21. def test_sgd_momentum(monkeypatch, trace_mode, inplace_mode):
  22. with monkeypatch.context() as mk:
  23. mk.setenv("MEGENGINE_INPLACE_UPDATE", str(int(inplace_mode)))
  24. def train_func(data, *, model=None, optim=None, gm=None):
  25. optim.clear_grad()
  26. with gm:
  27. loss = net(data)
  28. gm.backward(loss)
  29. optim.step()
  30. return loss
  31. if trace_mode is not None:
  32. train_func = trace(symbolic=trace_mode)(train_func)
  33. def eval_func(data, *, model=None, optim=None, gm=None):
  34. loss = net(data)
  35. return loss
  36. if trace_mode is not None:
  37. eval_func = trace(symbolic=trace_mode)(eval_func)
  38. net = Simple()
  39. optim = optimizer.SGD(net.parameters(), lr=1.0, momentum=0.9)
  40. gm = ad.GradManager().attach(net.parameters())
  41. data = tensor([2.34])
  42. train_func(data, model=net, optim=optim, gm=gm)
  43. np.testing.assert_almost_equal(
  44. optim._state[net.a]["momentum_buffer"].numpy(), 2.34
  45. )
  46. # do 3 steps of infer
  47. for _ in range(3):
  48. loss = eval_func(data)
  49. np.testing.assert_almost_equal(loss.numpy(), 2.34 * (1.23 - 2.34), 5)
  50. np.testing.assert_almost_equal(
  51. optim._state[net.a]["momentum_buffer"].numpy(), 2.34
  52. )
  53. # do a step of train
  54. train_func(data, model=net, optim=optim, gm=gm)
  55. np.testing.assert_almost_equal(loss.numpy(), 2.34 * (1.23 - 2.34), 5)
  56. np.testing.assert_almost_equal(
  57. optim._state[net.a]["momentum_buffer"].numpy(), 0.9 * 2.34 + 2.34, 5
  58. )