You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_observer.py 3.5 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. import platform
  2. import numpy as np
  3. import pytest
  4. import megengine as mge
  5. import megengine.distributed as dist
  6. from megengine.distributed.helper import get_device_count_by_fork
  7. from megengine.quantization import QuantMode, create_qparams
  8. from megengine.quantization.observer import (
  9. ExponentialMovingAverageObserver,
  10. HistogramObserver,
  11. MinMaxObserver,
  12. Observer,
  13. PassiveObserver,
  14. SyncExponentialMovingAverageObserver,
  15. SyncMinMaxObserver,
  16. )
  17. def test_observer():
  18. with pytest.raises(TypeError):
  19. Observer("qint8")
  20. def test_min_max_observer():
  21. x = np.random.rand(3, 3, 3, 3).astype("float32")
  22. np_min, np_max = x.min(), x.max()
  23. x = mge.tensor(x)
  24. m = MinMaxObserver()
  25. m(x)
  26. np.testing.assert_allclose(m.min_val.numpy(), np_min)
  27. np.testing.assert_allclose(m.max_val.numpy(), np_max)
  28. def test_exponential_moving_average_observer():
  29. t = np.random.rand()
  30. x1 = np.random.rand(3, 3, 3, 3).astype("float32")
  31. x2 = np.random.rand(3, 3, 3, 3).astype("float32")
  32. expected_min = x1.min() * t + x2.min() * (1 - t)
  33. expected_max = x1.max() * t + x2.max() * (1 - t)
  34. m = ExponentialMovingAverageObserver(momentum=t)
  35. m(mge.tensor(x1, dtype=np.float32))
  36. m(mge.tensor(x2, dtype=np.float32))
  37. np.testing.assert_allclose(m.min_val.numpy(), expected_min, atol=1e-5)
  38. np.testing.assert_allclose(m.max_val.numpy(), expected_max, atol=1e-5)
  39. def test_histogram_observer():
  40. x = np.random.rand(3, 3, 3, 3).astype("float32")
  41. np_min, np_max = x.min(), x.max()
  42. x = mge.tensor(x)
  43. m = HistogramObserver()
  44. m(x)
  45. np.testing.assert_allclose(m.min_val.numpy(), np_min)
  46. np.testing.assert_allclose(m.max_val.numpy(), np_max)
  47. def test_passive_observer():
  48. qparams = create_qparams(QuantMode.SYMMERTIC, "qint8", mge.tensor(1.0))
  49. m = PassiveObserver("qint8")
  50. m.set_qparams(qparams)
  51. assert m.orig_scale == 1.0
  52. assert m.scale.numpy() == 1.0
  53. assert m.get_qparams().dtype_meta == qparams.dtype_meta
  54. assert m.get_qparams().scale == qparams.scale
  55. assert m.get_qparams() == qparams
  56. @pytest.mark.require_ngpu(2)
  57. @pytest.mark.isolated_distributed
  58. def test_sync_min_max_observer():
  59. word_size = get_device_count_by_fork("gpu")
  60. x = np.random.rand(3 * word_size, 3, 3, 3).astype("float32")
  61. np_min, np_max = x.min(), x.max()
  62. @dist.launcher
  63. def worker():
  64. rank = dist.get_rank()
  65. m = SyncMinMaxObserver()
  66. y = mge.tensor(x[rank * 3 : (rank + 1) * 3])
  67. m(y)
  68. assert m.min_val == np_min and m.max_val == np_max
  69. worker()
  70. @pytest.mark.require_ngpu(2)
  71. @pytest.mark.isolated_distributed
  72. def test_sync_exponential_moving_average_observer():
  73. word_size = get_device_count_by_fork("gpu")
  74. t = np.random.rand()
  75. x1 = np.random.rand(3 * word_size, 3, 3, 3).astype("float32")
  76. x2 = np.random.rand(3 * word_size, 3, 3, 3).astype("float32")
  77. expected_min = x1.min() * t + x2.min() * (1 - t)
  78. expected_max = x1.max() * t + x2.max() * (1 - t)
  79. @dist.launcher
  80. def worker():
  81. rank = dist.get_rank()
  82. m = SyncExponentialMovingAverageObserver(momentum=t)
  83. y1 = mge.tensor(x1[rank * 3 : (rank + 1) * 3])
  84. y2 = mge.tensor(x2[rank * 3 : (rank + 1) * 3])
  85. m(y1)
  86. m(y2)
  87. np.testing.assert_allclose(m.min_val.numpy(), expected_min, atol=1e-6)
  88. np.testing.assert_allclose(m.max_val.numpy(), expected_max, atol=1e-6)
  89. worker()

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台