You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_module.py 9.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. from functools import partial
  2. import numpy as np
  3. import pytest
  4. import megengine as mge
  5. import megengine.functional as F
  6. import megengine.module as Float
  7. import megengine.module.qat as QAT
  8. import megengine.module.quantized as Q
  9. from megengine import Parameter, Tensor
  10. from megengine.core.tensor import dtype
  11. from megengine.quantization import (
  12. FakeQuantize,
  13. MinMaxObserver,
  14. QConfig,
  15. QuantMode,
  16. create_qparams,
  17. )
  18. from megengine.quantization.quantize import (
  19. disable_fake_quant,
  20. disable_observer,
  21. propagate_qconfig,
  22. )
  23. min_max_fakequant_qconfig = QConfig(
  24. weight_observer=partial(MinMaxObserver, dtype="qint8_narrow"),
  25. act_observer=partial(MinMaxObserver, dtype="qint8"),
  26. weight_fake_quant=partial(FakeQuantize, dtype="qint8_narrow"),
  27. act_fake_quant=partial(FakeQuantize, dtype="qint8"),
  28. )
  29. inp_scale = np.float32(np.random.rand() + 1)
  30. min_val = np.random.randint(-127, 0, size=(2,)).astype("float32")
  31. max_val = np.random.randint(1, 127, size=(2,)).astype("float32")
  32. weight_scale = np.float32(np.max([-min_val[0], max_val[0]]) / 254 * 2)
  33. act_scale = np.float32(np.max([-min_val[1], max_val[1]]) / 255 * 2)
  34. def quant(x, scale):
  35. inp_dtype = dtype.qint8(scale)
  36. return x.astype(inp_dtype)
  37. def fake_quant(x, scale, qmin, qmax):
  38. x = x / scale
  39. x = F.round(x)
  40. x = F.clip(x, qmin, qmax)
  41. x = x * scale
  42. return x
  43. fake_quant_act = partial(fake_quant, qmin=-128, qmax=127)
  44. fake_quant_weight = partial(fake_quant, qmin=-127, qmax=127)
  45. fake_quant_bias = partial(fake_quant, qmin=-(2 ** 31), qmax=2 ** 31 - 1)
  46. def init_qat_net(net):
  47. if net.with_weight:
  48. net.weight_observer.min_val[...] = Tensor(min_val[0])
  49. net.weight_observer.max_val[...] = Tensor(max_val[0])
  50. if net.with_act:
  51. net.act_observer.min_val[...] = Tensor(min_val[1])
  52. net.act_observer.max_val[...] = Tensor(max_val[1])
  53. def test_quant_stub():
  54. normal_net = Float.QuantStub()
  55. normal_net.eval()
  56. qat_from_float = QAT.QuantStub.from_float_module(normal_net)
  57. qat_from_float.eval()
  58. disable_observer(qat_from_float)
  59. disable_fake_quant(qat_from_float)
  60. qat_net = QAT.QuantStub()
  61. qat_net.eval()
  62. disable_observer(qat_net)
  63. propagate_qconfig(qat_net, min_max_fakequant_qconfig)
  64. init_qat_net(qat_net)
  65. q_net = Q.QuantStub.from_qat_module(qat_net)
  66. q_net.eval()
  67. x = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
  68. normal = normal_net(x)
  69. qat_without_fakequant = qat_from_float(x)
  70. fake_quant_normal = fake_quant_act(normal_net(x), act_scale)
  71. qat = qat_net(x)
  72. q = q_net(x).numpy() * act_scale
  73. np.testing.assert_allclose(qat_without_fakequant, normal)
  74. np.testing.assert_allclose(qat, fake_quant_normal)
  75. np.testing.assert_allclose(q, fake_quant_normal.numpy())
  76. def test_dequant_stub():
  77. normal_net = Float.DequantStub()
  78. normal_net.eval()
  79. qat_from_float = QAT.DequantStub.from_float_module(normal_net)
  80. qat_from_float.eval()
  81. disable_fake_quant(qat_from_float)
  82. disable_observer(qat_from_float)
  83. qat_net = QAT.DequantStub()
  84. qat_net.eval()
  85. disable_observer(qat_net)
  86. propagate_qconfig(qat_net, min_max_fakequant_qconfig)
  87. init_qat_net(qat_net)
  88. q_net = Q.DequantStub.from_qat_module(qat_net)
  89. q_net.eval()
  90. x = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
  91. x = fake_quant_act(x, inp_scale)
  92. x.qparams.scale = inp_scale
  93. normal = normal_net(x)
  94. qat_without_fakequant = qat_from_float(x)
  95. fake_quant_normal = normal_net(x)
  96. qat = qat_net(x)
  97. q = q_net(quant(x, inp_scale)).numpy()
  98. np.testing.assert_allclose(qat_without_fakequant, normal)
  99. np.testing.assert_allclose(qat, fake_quant_normal)
  100. np.testing.assert_allclose(q, fake_quant_normal.numpy())
  101. @pytest.mark.parametrize("kind", ["COS", "RELU", "ADD", "MUL", "FUSE_ADD_RELU"])
  102. def test_elemwise(kind):
  103. normal_net = Float.Elemwise(kind)
  104. normal_net.eval()
  105. qat_from_float = QAT.Elemwise.from_float_module(normal_net)
  106. qat_from_float.eval()
  107. disable_observer(qat_from_float)
  108. disable_fake_quant(qat_from_float)
  109. qat_net = QAT.Elemwise(kind)
  110. qat_net.eval()
  111. disable_observer(qat_net)
  112. propagate_qconfig(qat_net, min_max_fakequant_qconfig)
  113. init_qat_net(qat_net)
  114. q_net = Q.Elemwise.from_qat_module(qat_net)
  115. q_net.eval()
  116. x1_scale = np.float32(np.random.rand() + 1)
  117. x1 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
  118. x1 = fake_quant_act(x1, x1_scale)
  119. x1.qparams.scale = x1_scale
  120. x2_scale = np.float32(np.random.rand() + 1)
  121. x2 = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
  122. x2 = fake_quant_act(x2, x2_scale)
  123. x2.qparams.scale = x2_scale
  124. x1_int8 = quant(x1, x1_scale)
  125. x2_int8 = quant(x2, x2_scale)
  126. # test correctness of `Float`, `QAT` and `Quantized`
  127. if kind in ("ADD", "MUL", "FUSE_ADD_RELU"):
  128. normal = normal_net(x1, x2)
  129. qat_without_fakequant = qat_from_float(x1, x2)
  130. fake_quant_normal = fake_quant_act(normal_net(x1, x2), act_scale)
  131. qat = qat_net(x1, x2)
  132. q = q_net(x1_int8, x2_int8).numpy() * act_scale
  133. else:
  134. normal = normal_net(x1)
  135. qat_without_fakequant = qat_from_float(x1)
  136. fake_quant_normal = fake_quant_act(normal_net(x1), act_scale)
  137. qat = qat_net(x1)
  138. q = q_net(x1_int8).numpy() * act_scale
  139. np.testing.assert_allclose(qat_without_fakequant, normal)
  140. np.testing.assert_allclose(qat, fake_quant_normal)
  141. np.testing.assert_allclose(q, fake_quant_normal.numpy())
  142. def test_linear():
  143. normal_net = Float.Linear(3, 3, bias=True)
  144. normal_net.eval()
  145. qat_net = QAT.Linear(3, 3, bias=True)
  146. qat_net.eval()
  147. disable_observer(qat_net)
  148. propagate_qconfig(qat_net, min_max_fakequant_qconfig)
  149. init_qat_net(qat_net)
  150. x = mge.tensor(np.random.normal(size=(3, 3)).astype("float32"))
  151. x = fake_quant_act(x, inp_scale)
  152. x.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", inp_scale))
  153. x_int8 = quant(x, inp_scale)
  154. weight = np.random.normal(size=(3, 3)).astype("float32")
  155. bias = np.random.normal(size=(3,)).astype("float32")
  156. normal_net.weight[...] = fake_quant_weight(weight, weight_scale)
  157. normal_net.bias[...] = fake_quant_bias(bias, inp_scale * weight_scale)
  158. qat_net.weight[...] = Parameter(weight)
  159. qat_net.bias[...] = Parameter(bias)
  160. qat_from_float = QAT.Linear.from_float_module(normal_net)
  161. qat_from_float.eval()
  162. disable_fake_quant(qat_from_float)
  163. disable_observer(qat_from_float)
  164. q_net = Q.Linear.from_qat_module(qat_net)
  165. q_net.eval()
  166. normal = normal_net(x)
  167. qat_without_fakequant = qat_from_float(x)
  168. fake_quant_normal = fake_quant_act(normal_net(x), act_scale)
  169. qat = qat_net(x)
  170. q = q_net(x_int8).numpy() * act_scale
  171. np.testing.assert_allclose(qat_without_fakequant, normal)
  172. np.testing.assert_allclose(qat, fake_quant_normal.numpy())
  173. np.testing.assert_allclose(q, fake_quant_normal.numpy())
  174. @pytest.mark.parametrize("module", ["Conv2d", "ConvBn2d", "ConvBnRelu2d"])
  175. def test_conv(module):
  176. normal_net = getattr(Float, module)(3, 3, 3, 1, 1, 1, bias=True)
  177. normal_net.eval()
  178. qat_net = getattr(QAT, module)(3, 3, 3, 1, 1, 1, bias=True)
  179. qat_net.eval()
  180. disable_observer(qat_net)
  181. propagate_qconfig(qat_net, min_max_fakequant_qconfig)
  182. init_qat_net(qat_net)
  183. x = mge.tensor(np.random.normal(size=(1, 3, 3, 3)).astype("float32"))
  184. x = fake_quant_act(x, inp_scale)
  185. x.qparams.update(create_qparams(QuantMode.SYMMERTIC, "qint8", inp_scale))
  186. x_int8 = quant(x, inp_scale)
  187. weight = np.random.normal(size=(3, 3, 3, 3)).astype("float32")
  188. bias = np.random.normal(size=(1, 3, 1, 1)).astype("float32")
  189. if module in ("ConvBn2d", "ConvBnRelu2d"):
  190. normal_net.conv.weight[...] = fake_quant_weight(weight, weight_scale)
  191. normal_net.conv.bias[...] = fake_quant_bias(bias, inp_scale * weight_scale)
  192. qat_net.conv.weight[...] = Parameter(weight)
  193. qat_net.conv.bias[...] = Parameter(bias)
  194. else:
  195. normal_net.weight[...] = fake_quant_weight(weight, weight_scale)
  196. normal_net.bias[...] = fake_quant_bias(bias, inp_scale * weight_scale)
  197. qat_net.weight[...] = Parameter(weight)
  198. qat_net.bias[...] = Parameter(bias)
  199. qat_from_float = getattr(QAT, module).from_float_module(normal_net)
  200. qat_from_float.eval()
  201. disable_observer(qat_from_float)
  202. disable_fake_quant(qat_from_float)
  203. q_net = getattr(Q, module).from_qat_module(qat_net)
  204. q_net.eval()
  205. normal = normal_net(x)
  206. qat_without_fakequant = qat_from_float(x)
  207. fake_quant_normal = fake_quant_act(normal_net(x), act_scale)
  208. qat = qat_net(x)
  209. q = q_net(x_int8).numpy() * act_scale
  210. np.testing.assert_allclose(qat_without_fakequant, normal, atol=1e-5)
  211. np.testing.assert_allclose(qat, fake_quant_normal, atol=act_scale)
  212. np.testing.assert_allclose(q, fake_quant_normal.numpy(), atol=act_scale)

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台