You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

fake_quant.py 5.6 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  2. #
  3. # Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  4. #
  5. # Unless required by applicable law or agreed to in writing,
  6. # software distributed under the License is distributed on an
  7. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  8. import math
  9. from typing import Iterable
  10. import numpy as np
  11. from .. import functional as F
  12. from ..core.autodiff.grad import Function
  13. from ..core.tensor.dtype import _metadata_dict, get_quantized_dtype
  14. from ..module import Module
  15. from ..tensor import Parameter, Tensor
  16. from .utils import QuantMode, fake_quant_tensor, get_qparam_dict
  17. class _FakeQuantize(Module):
  18. r"""
  19. A Basic Fake Quant module.
  20. :param dtype: a string indicating the target quantization type of input.
  21. :param narrow_range: whether the absolute value of ``qmin`` is the same as ``qmax``,
  22. instead of 1 greater. Usually True for weight and False for activation.
  23. :param enable: whether do ``normal_forward`` or ``fake_quant_forward``.
  24. """
  25. def __init__(
  26. self, dtype: str, narrow_range: bool = False, enable: bool = True, **kwargs
  27. ):
  28. super().__init__()
  29. if not dtype in _metadata_dict.keys():
  30. raise ValueError(
  31. "unknown dtype: {}, only support {}".format(
  32. dtype, _metadata_dict.keys()
  33. )
  34. )
  35. self.dtype = dtype
  36. self.narrow_range = narrow_range
  37. self.qmin = (
  38. -_metadata_dict[dtype].qmax if narrow_range else _metadata_dict[dtype].qmin
  39. )
  40. self.qmax = _metadata_dict[dtype].qmax
  41. self.enabled = enable
  42. def enable(self):
  43. self.enabled = True
  44. def disable(self):
  45. self.enabled = False
  46. def fake_quant_forward(self, inp, q_dict=None):
  47. return inp
  48. def normal_foward(self, inp, q_dict=None):
  49. return inp
  50. def forward(self, inp, q_dict=None):
  51. if self.enabled:
  52. return self.fake_quant_forward(inp, q_dict=q_dict)
  53. else:
  54. return self.normal_foward(inp, q_dict=q_dict)
  55. class TQT_Function(Function):
  56. def __init__(self, lowerbound, upperbound):
  57. super().__init__()
  58. self.lowerbound = lowerbound
  59. self.upperbound = upperbound
  60. self.saved_tensors = ()
  61. def save_for_backward(self, *tensors: Iterable[Tensor]):
  62. """
  63. Saves tensors needed for gradient computation. This method should be called only
  64. once in :meth:`~.function.Function.forward`, additional calls will replace values saved previously.
  65. The saved tensors can be accessed through the ``saved_tensors`` attribute.
  66. """
  67. self.saved_tensors = tensors
  68. def forward(self, inp, scale):
  69. t = 2 ** scale
  70. # t = F.maximum(t, 1e-4)
  71. inp_scaled = inp / t
  72. inp_clipped = F.maximum(F.minimum(inp_scaled, self.upperbound), self.lowerbound)
  73. inp_rounded = F.round(inp_clipped)
  74. inp_flq = inp_rounded * t
  75. self.save_for_backward(inp_scaled, inp_rounded, t)
  76. return inp_flq
  77. def backward(self, grad_inp_flq):
  78. (inp_scaled, inp_rounded, t) = self.saved_tensors
  79. mask_clip = F.logical_and(
  80. inp_scaled < -0.5 + self.lowerbound, inp_scaled > self.upperbound + 0.5
  81. ) # mask for accumulating the gradients of |data_scaled|>L
  82. mask_quant = F.logical_not(mask_clip)
  83. grad_quant = (
  84. grad_inp_flq * mask_quant * (inp_rounded - inp_scaled)
  85. ) # gradient within |data_scaled|<=L
  86. grad_clip = (
  87. grad_inp_flq * mask_clip * inp_rounded
  88. ) # gradient with | data_scaled|>L
  89. grad_s = grad_clip.sum() + grad_quant.sum()
  90. # dL/ds = dL/dt * t * ln(2)
  91. grad_s = grad_s * t * math.log(2)
  92. grad_inp = grad_inp_flq * mask_quant
  93. return grad_inp, grad_s
  94. class TQT(_FakeQuantize):
  95. r"""
  96. TQT: https://arxiv.org/abs/1903.08066 Trained Quantization Thresholds
  97. for Accurate and Efficient Fixed-Point Inference of Deep Neural Networks.
  98. """
  99. def __init__(
  100. self,
  101. q_dict,
  102. dtype: str,
  103. narrow_range: bool = False,
  104. enable: bool = True,
  105. **kwargs
  106. ):
  107. super().__init__(dtype, narrow_range, enable, **kwargs)
  108. assert (
  109. q_dict["mode"] == QuantMode.SYMMERTIC
  110. ), "only symmetric quantization is supported by TQT"
  111. if "scale" not in q_dict or q_dict["scale"] is None:
  112. raise AssertionError("Can not get an initialized scale")
  113. self.scale = F.log(q_dict["scale"]) / math.log(2)
  114. def fake_quant_forward(self, inp, q_dict=None):
  115. # when enable, TQT will do fakequant forward, finetune the scale
  116. return TQT_Function(self.qmin, self.qmax)(inp, self.scale)
  117. def get_qparams(self):
  118. q_dict = get_qparam_dict(QuantMode.SYMMERTIC)
  119. q_dict["scale"] = 2 ** self.scale
  120. return q_dict
  121. def get_dtype(self):
  122. q_dict = self.get_qparams()
  123. scale = None if "scale" not in q_dict else q_dict["scale"].numpy()[0]
  124. zero_point = (
  125. None if "zero_point" not in q_dict else q_dict["zero_point"].numpy()[0]
  126. )
  127. return get_quantized_dtype(self.dtype, scale, zero_point)
  128. class FakeQuantize(_FakeQuantize):
  129. r"""
  130. A module to do quant and dequant according to observer's scale and zero_point.
  131. """
  132. def fake_quant_forward(self, inp, q_dict=None):
  133. return fake_quant_tensor(inp, self.qmin, self.qmax, q_dict)

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台