You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_quantize.py 13 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385
  1. import numpy as np
  2. import pytest
  3. from megengine import Parameter, Tensor
  4. from megengine import module as Float
  5. from megengine.functional import ones, zeros
  6. from megengine.module import (
  7. BatchNorm2d,
  8. Conv2d,
  9. ConvBn2d,
  10. ConvTranspose2d,
  11. ConvTransposeBn2d,
  12. ReLU,
  13. )
  14. from megengine.module import qat as QAT
  15. from megengine.module import quantized as Q
  16. from megengine.quantization import (
  17. min_max_fakequant_qconfig,
  18. passive_qconfig,
  19. tqt_qconfig,
  20. )
  21. from megengine.quantization.fake_quant import TQT, FakeQuantize
  22. from megengine.quantization.observer import MinMaxObserver, PassiveObserver
  23. from megengine.quantization.quantize import (
  24. _get_quantable_module_names,
  25. apply_easy_quant,
  26. disable_fake_quant,
  27. disable_observer,
  28. enable_fake_quant,
  29. enable_observer,
  30. propagate_qconfig,
  31. quantize,
  32. quantize_qat,
  33. reset_qconfig,
  34. )
  35. from megengine.utils.bn_fusion import fuse_conv_bn_relu_module
  36. class FloatNet(Float.Module):
  37. def __init__(self):
  38. super().__init__()
  39. self.quant = Float.QuantStub()
  40. self.linear = Float.Sequential(Float.Linear(3, 3), Float.Linear(3, 3))
  41. self.dequant = Float.DequantStub()
  42. self.linear[0].bias[...] = Parameter(np.random.rand(3))
  43. self.linear[1].bias[...] = Parameter(np.random.rand(3))
  44. def forward(self, x):
  45. x = self.quant(x)
  46. x = self.linear(x)
  47. x = self.dequant(x)
  48. return x
  49. class QATNet(Float.Module):
  50. def __init__(self):
  51. super().__init__()
  52. self.quant = QAT.QuantStub()
  53. self.linear = Float.Sequential(QAT.Linear(3, 3), QAT.Linear(3, 3))
  54. self.dequant = QAT.DequantStub()
  55. self.linear[0].bias[...] = Parameter(np.random.rand(3))
  56. self.linear[1].bias[...] = Parameter(np.random.rand(3))
  57. def forward(self, x):
  58. x = self.quant(x)
  59. x = self.linear(x)
  60. x = self.dequant(x)
  61. return x
  62. def test_propagate_qconfig():
  63. net = QATNet()
  64. propagate_qconfig(net, min_max_fakequant_qconfig)
  65. assert all(
  66. [
  67. net.quant.weight_observer is None,
  68. net.quant.weight_fake_quant is None,
  69. isinstance(net.quant.act_observer, MinMaxObserver),
  70. isinstance(net.quant.act_fake_quant, FakeQuantize),
  71. isinstance(net.linear[0].weight_observer, MinMaxObserver),
  72. isinstance(net.linear[0].weight_fake_quant, FakeQuantize),
  73. isinstance(net.linear[0].act_observer, MinMaxObserver),
  74. isinstance(net.linear[0].act_fake_quant, FakeQuantize),
  75. isinstance(net.linear[1].weight_observer, MinMaxObserver),
  76. isinstance(net.linear[1].weight_fake_quant, FakeQuantize),
  77. isinstance(net.linear[1].act_observer, MinMaxObserver),
  78. isinstance(net.linear[1].act_fake_quant, FakeQuantize),
  79. net.dequant.weight_observer is None,
  80. net.dequant.weight_fake_quant is None,
  81. net.dequant.act_observer is None,
  82. net.dequant.act_observer is None,
  83. ]
  84. )
  85. def init_qat_net():
  86. net = QATNet()
  87. propagate_qconfig(net, min_max_fakequant_qconfig)
  88. min_val = np.random.randint(-127, 0, size=(3,))
  89. max_val = np.random.randint(1, 127, size=(3,))
  90. net.quant.act_observer.min_val[...] = Parameter(min_val[0])
  91. net.quant.act_observer.max_val[...] = Parameter(max_val[0])
  92. net.linear[0].weight_observer.min_val[...] = Parameter(min_val[1])
  93. net.linear[0].weight_observer.max_val[...] = Parameter(max_val[1])
  94. net.linear[0].act_observer.min_val[...] = Parameter(min_val[2])
  95. net.linear[0].act_observer.max_val[...] = Parameter(max_val[2])
  96. net.linear[1].weight_observer.min_val[...] = Parameter(min_val[1])
  97. net.linear[1].weight_observer.max_val[...] = Parameter(max_val[1])
  98. net.linear[1].act_observer.min_val[...] = Parameter(min_val[2])
  99. net.linear[1].act_observer.max_val[...] = Parameter(max_val[2])
  100. return net
  101. def test_reset_qconfig():
  102. qat_net = init_qat_net()
  103. new_qat_net = reset_qconfig(qat_net, passive_qconfig)
  104. assert (
  105. new_qat_net.linear[0].get_weight_qparams()
  106. == qat_net.linear[0].get_weight_qparams()
  107. )
  108. assert (
  109. new_qat_net.linear[0].get_activation_qparams()
  110. == qat_net.linear[0].get_activation_qparams()
  111. )
  112. assert (
  113. new_qat_net.linear[1].get_weight_qparams()
  114. == qat_net.linear[1].get_weight_qparams()
  115. )
  116. assert (
  117. new_qat_net.linear[1].get_activation_qparams()
  118. == qat_net.linear[1].get_activation_qparams()
  119. )
  120. def test_enable_and_disable_observer():
  121. net = init_qat_net()
  122. enable_observer(net)
  123. assert net.quant.act_observer.enabled is True
  124. assert net.linear[0].weight_observer.enabled is True
  125. assert net.linear[0].act_observer.enabled is True
  126. assert net.linear[1].weight_observer.enabled is True
  127. assert net.linear[1].act_observer.enabled is True
  128. disable_observer(net)
  129. assert net.quant.act_observer.enabled is False
  130. assert net.linear[0].weight_observer.enabled is False
  131. assert net.linear[0].weight_observer.enabled is False
  132. assert net.linear[1].act_observer.enabled is False
  133. assert net.linear[1].act_observer.enabled is False
  134. def test_enable_and_disable_fake_quant():
  135. net = init_qat_net()
  136. disable_fake_quant(net)
  137. assert net.quant.act_fake_quant.enabled is False
  138. assert net.linear[0].weight_fake_quant.enabled is False
  139. assert net.linear[0].act_fake_quant.enabled is False
  140. assert net.linear[1].weight_fake_quant.enabled is False
  141. assert net.linear[1].act_fake_quant.enabled is False
  142. enable_fake_quant(net)
  143. assert net.quant.act_fake_quant.enabled is True
  144. assert net.linear[0].weight_fake_quant.enabled is True
  145. assert net.linear[0].act_fake_quant.enabled is True
  146. assert net.linear[1].weight_fake_quant.enabled is True
  147. assert net.linear[1].act_fake_quant.enabled is True
  148. def init_observer(module, data):
  149. enable_observer(module)
  150. disable_fake_quant(module)
  151. module(data)
  152. disable_observer(module)
  153. enable_fake_quant(module)
  154. def test_enable_and_disable_all():
  155. x = Tensor(np.random.randint(1, 10, size=(3, 3)).astype(np.float32))
  156. net = FloatNet()
  157. y1 = net(x).numpy()
  158. net = quantize_qat(net, min_max_fakequant_qconfig)
  159. init_observer(net, x)
  160. y2 = net(x).numpy()
  161. disable_fake_quant(net)
  162. y3 = net(x).numpy()
  163. enable_fake_quant(net)
  164. y4 = net(x).numpy()
  165. np.testing.assert_allclose(y1, y3)
  166. np.testing.assert_allclose(y2, y4)
  167. with pytest.raises(AssertionError):
  168. np.testing.assert_allclose(y2, y3)
  169. def test_quantize_qat():
  170. net = FloatNet()
  171. qat_net = quantize_qat(net, inplace=False, qconfig=min_max_fakequant_qconfig)
  172. assert isinstance(qat_net.quant, QAT.QuantStub)
  173. assert isinstance(qat_net.linear[0], QAT.Linear)
  174. assert isinstance(qat_net.linear[1], QAT.Linear)
  175. assert isinstance(qat_net.dequant, QAT.DequantStub)
  176. def test_quantize():
  177. qat_net = init_qat_net()
  178. q_net = quantize(qat_net, inplace=False)
  179. assert isinstance(q_net.quant, Q.QuantStub)
  180. assert isinstance(q_net.linear[0], Q.Linear)
  181. assert isinstance(q_net.linear[1], Q.Linear)
  182. assert isinstance(q_net.dequant, Q.DequantStub)
  183. def test_apply_easy_quant():
  184. qat_net = init_qat_net()
  185. data = Tensor(np.random.rand(2, 3, 3, 3), dtype=np.float32)
  186. eq_net = reset_qconfig(qat_net, passive_qconfig, inplace=False)
  187. apply_easy_quant(eq_net, data, 0.9, 1.1, 10)
  188. assert isinstance(eq_net.quant.act_observer, PassiveObserver)
  189. assert isinstance(eq_net.linear[0].weight_observer, PassiveObserver)
  190. assert isinstance(eq_net.linear[0].act_observer, PassiveObserver)
  191. assert isinstance(eq_net.linear[1].weight_observer, PassiveObserver)
  192. assert isinstance(eq_net.linear[1].act_observer, PassiveObserver)
  193. assert eq_net.dequant.act_observer is None
  194. def test_apply_tqt():
  195. qat_net = init_qat_net()
  196. tqt_net = reset_qconfig(qat_net, tqt_qconfig, inplace=False)
  197. assert isinstance(tqt_net.quant.act_fake_quant, TQT)
  198. assert isinstance(tqt_net.linear[0].weight_fake_quant, TQT)
  199. assert isinstance(tqt_net.linear[0].act_fake_quant, TQT)
  200. assert isinstance(tqt_net.linear[1].weight_fake_quant, TQT)
  201. assert isinstance(tqt_net.linear[1].act_fake_quant, TQT)
  202. assert tqt_net.dequant.act_fake_quant is None
  203. def test_get_quantable_module_names():
  204. # need to make sure names from Quantized and QAT are the same
  205. def _get_qat_module_names():
  206. def is_qat(key: str):
  207. value = getattr(QAT, key)
  208. return (
  209. isinstance(value, type)
  210. and issubclass(value, QAT.QATModule)
  211. and value != QAT.QATModule
  212. )
  213. # source should have all quantable modules' names
  214. quantable_module_names = [key for key in dir(QAT) if is_qat(key)]
  215. return quantable_module_names
  216. qat_module_names = _get_qat_module_names()
  217. quantized_module_names = _get_quantable_module_names()
  218. assert set(qat_module_names) == set(quantized_module_names)
  219. for key in qat_module_names:
  220. value = getattr(Float, key)
  221. assert (
  222. isinstance(value, type)
  223. and issubclass(value, Float.Module)
  224. and value != Float.Module
  225. )
  226. def test_disable_quantize():
  227. class Net(Float.Module):
  228. def __init__(self):
  229. super().__init__()
  230. self.conv = Float.ConvBnRelu2d(3, 3, 3)
  231. self.conv.disable_quantize()
  232. def forward(self, x):
  233. return self.conv(x)
  234. net = Net()
  235. qat_net = quantize_qat(net, inplace=False)
  236. assert isinstance(qat_net.conv, Float.ConvBnRelu2d)
  237. assert isinstance(qat_net.conv.conv, Float.Conv2d)
  238. def test_convert_with_custom_mapping():
  239. class FloatExample(Float.Module):
  240. def forward(self, x):
  241. return x
  242. class QATExample(QAT.QATModule):
  243. def forward(self, x):
  244. return x
  245. @classmethod
  246. def from_float_module(cls, float_module):
  247. return cls()
  248. class Net(Float.Module):
  249. def __init__(self):
  250. super().__init__()
  251. self.example = FloatExample()
  252. def forward(self, x):
  253. return self.example(x)
  254. net = Net()
  255. qat_net = quantize_qat(net, inplace=False, mapping={FloatExample: QATExample})
  256. assert isinstance(qat_net.example, QATExample)
  257. def test_ConvBn2d_fold_weight_bias():
  258. in_channels = 32
  259. out_channels = 64
  260. kernel_size = 3
  261. conv = Conv2d(in_channels, out_channels, kernel_size)
  262. bn = BatchNorm2d(out_channels)
  263. relu = ReLU()
  264. fused_conv = fuse_conv_bn_relu_module(conv, bn, relu)
  265. bn.eval()
  266. fused_conv.eval()
  267. inputs = Tensor(np.random.randn(4, in_channels, 32, 32).astype(np.float32))
  268. expected_result = relu(bn(conv(inputs)))
  269. actual_result = fused_conv(inputs)
  270. np.testing.assert_allclose(
  271. expected_result.numpy(), actual_result.numpy(), atol=1e-4
  272. )
  273. conv.eval()
  274. bn.eval()
  275. relu.eval()
  276. fused_conv = fuse_conv_bn_relu_module(conv, bn, relu)
  277. fused_conv.eval()
  278. expected_result = relu(conv(inputs))
  279. actual_result = fused_conv(inputs)
  280. np.testing.assert_allclose(
  281. expected_result.numpy(), actual_result.numpy(), atol=1e-4
  282. )
  283. conv.train()
  284. bn.train()
  285. fused_conv = fuse_conv_bn_relu_module(conv, bn, None)
  286. fused_conv.train()
  287. expected_result = bn(conv(inputs))
  288. actual_result = fused_conv(inputs)
  289. np.testing.assert_allclose(
  290. expected_result.numpy(), actual_result.numpy(), atol=1e-4
  291. )
  292. def test_ConvTransposeBn2d_fold_weight_bias():
  293. in_channels = 32
  294. out_channels = 64
  295. kernel_size = 3
  296. conv = ConvTranspose2d(in_channels, out_channels, kernel_size)
  297. bn = BatchNorm2d(out_channels)
  298. relu = ReLU()
  299. fused_conv = fuse_conv_bn_relu_module(conv, bn, relu)
  300. bn.eval()
  301. fused_conv.eval()
  302. inputs = Tensor(np.random.randn(4, in_channels, 32, 32).astype(np.float32))
  303. expected_result = relu(bn(conv(inputs)))
  304. actual_result = fused_conv(inputs)
  305. np.testing.assert_allclose(
  306. expected_result.numpy(), actual_result.numpy(), atol=1e-4
  307. )
  308. conv.eval()
  309. bn.eval()
  310. relu.eval()
  311. fused_conv = fuse_conv_bn_relu_module(conv, bn, relu)
  312. fused_conv.eval()
  313. expected_result = relu(conv(inputs))
  314. actual_result = fused_conv(inputs)
  315. np.testing.assert_allclose(
  316. expected_result.numpy(), actual_result.numpy(), atol=1e-4
  317. )
  318. conv.train()
  319. bn.train()
  320. fused_conv = fuse_conv_bn_relu_module(conv, bn, None)
  321. fused_conv.train()
  322. expected_result = bn(conv(inputs))
  323. actual_result = fused_conv(inputs)
  324. np.testing.assert_allclose(
  325. expected_result.numpy(), actual_result.numpy(), atol=1e-4
  326. )