You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_fake_quant.py 4.8 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. import numpy as np
  10. import pytest
  11. import megengine as mge
  12. from megengine import tensor
  13. from megengine.core.autodiff.grad import Function, Grad
  14. from megengine.core.tensor.utils import make_shape_tuple
  15. from megengine.quantization.internal_fake_quant import *
  16. from megengine.quantization.utils import QuantMode, fake_quant_tensor, tqt_forward
  17. class TQT_numpy:
  18. def __init__(self, lowerbound, upperbound):
  19. super().__init__()
  20. self.lowerbound = lowerbound
  21. self.upperbound = upperbound
  22. def forward(self, inp, scale):
  23. t = 2 ** scale
  24. # t = F.maximum(t, 1e-4)
  25. inp_scaled = inp / t
  26. inp_clipped = np.maximum(
  27. np.minimum(inp_scaled, self.upperbound), self.lowerbound
  28. )
  29. inp_rounded = np.round(inp_clipped)
  30. inp_flq = inp_rounded * t
  31. self.saved_tensors = (inp_scaled, inp_rounded, t)
  32. return inp_flq
  33. def backward(self, grad_inp_flq):
  34. (inp_scaled, inp_rounded, t) = self.saved_tensors
  35. mask_clip = (inp_scaled < -0.5 + self.lowerbound) + (
  36. inp_scaled > self.upperbound + 0.5
  37. ) # mask for accumulating the gradients of |data_scaled|>L
  38. mask_quant = np.abs(
  39. mask_clip - 1
  40. ) # mask for accumulating the gradients with |data_scaled|<=L
  41. grad_quant = (
  42. grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
  43. ) # gradient within |data_scaled|<=L
  44. grad_clip = (
  45. grad_inp_flq * mask_clip * inp_rounded
  46. ) # gradient with | data_scaled|>L
  47. grad_s = grad_clip.sum() + grad_quant.sum()
  48. # dL/ds = dL/dt * t * ln(2)
  49. grad_s = grad_s * t * np.log(2)
  50. grad_inp = grad_inp_flq * mask_quant
  51. return grad_inp, grad_s
  52. def test_tqt():
  53. g = []
  54. def cb(grad):
  55. g.append(grad)
  56. x = np.random.normal(size=(1, 2, 3, 4))
  57. s = np.random.rand(1) + 1
  58. g_y = np.ones(shape=(1, 2, 3, 4), dtype="float32")
  59. n = TQT_numpy(-127, 127)
  60. y_np = n.forward(x, s)
  61. g_x_np, g_s_np = n.backward(g_y)
  62. x = mge.tensor(x, dtype="float32")
  63. s = mge.tensor(s, dtype="float32")
  64. g_y = mge.tensor(g_y, dtype="float32")
  65. grad = Grad().wrt(x, s, callback=cb)
  66. y = tqt_forward(-127, 127, x, s)
  67. grad(y, g_y)
  68. g_x, g_s = g
  69. np.testing.assert_allclose(y.numpy(), y_np, atol=1e-6)
  70. np.testing.assert_allclose(g_x.numpy(), g_x_np, atol=1e-6)
  71. np.testing.assert_allclose(g_s.numpy(), g_s_np, atol=1e-6)
  72. def _save_to(self, name="grad"):
  73. def callback(grad):
  74. setattr(self, name, grad)
  75. return callback
  76. class Round(Function):
  77. def forward(self, x):
  78. return F.round(x)
  79. def backward(self, output_grads):
  80. return output_grads
  81. def fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax):
  82. oup = Round()(inp / scale) + zero_point
  83. oup = F.minimum(F.maximum(oup, qmin), qmax)
  84. oup = (oup - zero_point) * scale
  85. return oup
  86. def test_fakequant():
  87. qmin = -126
  88. qmax = 129
  89. def run(zero_point, scale):
  90. q_dict = {}
  91. q_dict["mode"] = QuantMode.ASYMMERTIC
  92. q_dict["scale"] = scale
  93. q_dict["zero_point"] = zero_point
  94. inp_data = np.random.uniform(low=-512.0, high=512.0, size=(1, 32, 32, 32))
  95. inp = tensor(inp_data, dtype=np.float32)
  96. # test forward
  97. oup = fake_quant_tensor(inp, qmin, qmax, q_dict).numpy()
  98. oup_gt = fake_quant_tensor_gt(inp, scale, zero_point, qmin, qmax).numpy()
  99. assert np.allclose(oup, oup_gt)
  100. assert oup.shape == oup_gt.shape
  101. # test backward
  102. x = tensor(inp_data, dtype=np.float32)
  103. grad = Grad().wrt(x, callback=_save_to(x))
  104. y = fake_quant_tensor(x, qmin, qmax, q_dict)
  105. grad(y, tensor(F.ones_like(x)))
  106. x1 = tensor(inp_data, dtype=np.float32)
  107. grad = Grad().wrt(x1, callback=_save_to(x1))
  108. y1 = fake_quant_tensor_gt(x1, scale, zero_point, qmin, qmax)
  109. grad(y1, tensor(F.ones_like(x1)))
  110. assert np.allclose(x.grad.numpy(), x1.grad.numpy())
  111. assert make_shape_tuple(x.grad.shape) == make_shape_tuple(x1.grad.shape)
  112. zero_point = tensor([1.0], dtype=np.float32)
  113. scale = tensor([4.0], dtype=np.float32)
  114. run(zero_point, scale)
  115. zero_point = tensor(1.0 * np.ones((1, 32, 1, 1)), dtype=np.float32)
  116. scale = tensor(4.0 * np.ones((1, 32, 1, 1)), dtype=np.float32)
  117. run(zero_point, scale)

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台