You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_advance_indexing.py 1.8 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566
  1. # -*- coding: utf-8 -*-
  2. import numpy as np
  3. import megengine
  4. import megengine.autodiff as ad
  5. import megengine.optimizer as optimizer
  6. from megengine import Parameter, tensor
  7. from megengine.module import Module
  8. class Simple(Module):
  9. def __init__(self):
  10. super().__init__()
  11. self.a = Parameter([1.0], dtype=np.float32)
  12. def forward(self, x, y):
  13. x = x[y] * self.a
  14. return x
  15. class Simple2(Module):
  16. def __init__(self):
  17. super().__init__()
  18. self.a = Parameter([1.0], dtype=np.float32)
  19. def forward(self, x):
  20. x = x[1, ..., :, 0:4:2, 0:2] * self.a
  21. return x
  22. def test_advance_indexing():
  23. net = Simple()
  24. gm = ad.GradManager().attach(net.parameters())
  25. optim = optimizer.SGD(net.parameters(), lr=1.0)
  26. optim.clear_grad()
  27. dshape = (10, 10)
  28. raw_data = np.arange(100).reshape(dshape).astype(np.float32)
  29. raw_mask = (np.random.random_sample(dshape) > 0.5).astype(np.bool_)
  30. data = tensor(raw_data)
  31. mask = tensor(raw_mask)
  32. answer = 1.0 - raw_data[raw_mask].sum()
  33. with gm:
  34. loss = net(data, mask).sum()
  35. gm.backward(loss)
  36. optim.step()
  37. np.testing.assert_almost_equal(net.a.numpy(), np.array([answer]).astype(np.float32))
  38. def test_advance_indexing_with_subtensor():
  39. net = Simple2()
  40. gm = ad.GradManager().attach(net.parameters())
  41. optim = optimizer.SGD(net.parameters(), lr=1.0)
  42. optim.clear_grad()
  43. dshape = (2, 3, 4, 3, 4, 2)
  44. raw_data = np.arange(576).reshape(dshape).astype(np.float32)
  45. data = tensor(raw_data)
  46. answer = 1.0 - raw_data[1, ..., :, 0:4:2, 0:2].sum()
  47. with gm:
  48. loss = net(data).sum()
  49. gm.backward(loss)
  50. optim.step()
  51. np.testing.assert_almost_equal(net.a.numpy(), np.array([answer]).astype(np.float32))