You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

module.py 6.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  2. #
  3. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  4. #
  5. # Unless required by applicable law or agreed to in writing,
  6. # software distributed under the License is distributed on an
  7. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  8. from abc import abstractmethod
  9. # avoid circular reference
  10. from ...quantization.fake_quant import FakeQuantize
  11. from ...quantization.observer import Observer
  12. from ...quantization.qconfig import QConfig
  13. from ...quantization.utils import fake_quant_bias
  14. from ...tensor import Tensor
  15. from ..module import Module
  16. class QATModule(Module):
  17. r"""Base class of quantized-float related :class:`~.Module`, basically for QAT and Calibration.
  18. Use :meth:`from_float_module` to generate a instance from float :class:`~.Module`.
  19. Or use :func:`~.quantize.quantize_qat` to do it recursively and automatically.
  20. Can also be converted to :class:`~.QuantizedModule` for deployment using
  21. :func:`~.quantize.quantize` further.
  22. """
  23. with_weight = True
  24. with_act = True
  25. def __init__(self, **kwargs):
  26. super().__init__(**kwargs)
  27. self.weight_observer = None # type: Observer
  28. self.act_observer = None # type: Observer
  29. self.weight_fake_quant = None # type: FakeQuantize
  30. self.act_fake_quant = None # type: FakeQuantize
  31. def __repr__(self):
  32. return "QAT." + super().__repr__()
  33. def set_qconfig(self, qconfig: QConfig):
  34. r"""Set quantization related configs with ``qconfig``, including
  35. observer and fake_quant for weight and activation.
  36. """
  37. def safe_call(func):
  38. return func() if func is not None else None
  39. if self.with_act:
  40. self.act_observer = safe_call(qconfig.act_observer)
  41. self.act_fake_quant = safe_call(qconfig.act_fake_quant)
  42. if self.with_weight:
  43. self.weight_observer = safe_call(qconfig.weight_observer)
  44. self.weight_fake_quant = safe_call(qconfig.weight_fake_quant)
  45. def _enable_exec(self, with_module, func, enable):
  46. if not with_module or not func:
  47. return
  48. if enable:
  49. func.enable()
  50. else:
  51. func.disable()
  52. def set_fake_quant(self, enable):
  53. self._enable_exec(self.with_act, self.act_fake_quant, enable)
  54. self._enable_exec(self.with_weight, self.weight_fake_quant, enable)
  55. def set_observer(self, enable):
  56. self._enable_exec(self.with_act, self.act_observer, enable)
  57. self._enable_exec(self.with_weight, self.weight_observer, enable)
  58. def _apply_fakequant_with_observer(
  59. self, target: Tensor, fake_quant: FakeQuantize, observer: Observer
  60. ):
  61. # do observer
  62. if observer is None:
  63. oup = target
  64. qparams = None
  65. else:
  66. oup = observer(target)
  67. qparams = observer.get_qparams()
  68. # do fake quant
  69. if fake_quant is not None:
  70. oup = fake_quant(oup, qparams)
  71. # use qparams of fake_quant if have.
  72. if hasattr(fake_quant, "get_qparams"):
  73. qparams = fake_quant.get_qparams()
  74. # set to tensor qparams.
  75. if qparams is not None:
  76. oup.qparams.update(qparams)
  77. return oup
  78. def apply_quant_weight(self, target: Tensor):
  79. r"""Apply weight's observer and fake_quant from ``qconfig`` on ``target``."""
  80. return self._apply_fakequant_with_observer(
  81. target, self.weight_fake_quant, self.weight_observer
  82. )
  83. def apply_quant_activation(self, target: Tensor):
  84. r"""Apply weight's observer and fake_quant from ``qconfig`` on ``target``."""
  85. return self._apply_fakequant_with_observer(
  86. target, self.act_fake_quant, self.act_observer
  87. )
  88. def apply_quant_bias(self, target: Tensor, inp: Tensor, w_qat: Tensor):
  89. r"""Use :func:`~.fake_quant_bias` to process ``target``. Only valid when
  90. ``act_fake_quant`` and ``weight_fake_quant`` are both enabled.
  91. """
  92. # bias should have the same dtype as activation, so act_fake_quant can also
  93. # decide whether to do bias fakequant
  94. if (
  95. self.act_fake_quant
  96. and self.act_fake_quant.enabled
  97. and self.weight_fake_quant
  98. and self.weight_fake_quant.enabled
  99. ):
  100. b_qat = fake_quant_bias(target, inp, w_qat)
  101. else:
  102. b_qat = target
  103. return b_qat
  104. def _get_method_result(
  105. self, method: str, fake_quant: FakeQuantize, observer: Observer
  106. ):
  107. if hasattr(fake_quant, method):
  108. return getattr(fake_quant, method)()
  109. elif hasattr(observer, method):
  110. return getattr(observer, method)()
  111. return None
  112. def get_weight_dtype(self):
  113. r"""Get weight's quantization dtype as the method from ``qconfig``."""
  114. return self._get_method_result(
  115. "get_quantized_dtype", self.weight_fake_quant, self.weight_observer
  116. )
  117. def get_activation_dtype(self):
  118. r"""Get activation's quantization dtype as the method from ``qconfig``."""
  119. return self._get_method_result(
  120. "get_quantized_dtype", self.act_fake_quant, self.act_observer
  121. )
  122. def get_weight_qparams(self):
  123. r"""Get weight's quantization parameters."""
  124. return self._get_method_result(
  125. "get_qparams", self.weight_fake_quant, self.weight_observer
  126. )
  127. def get_activation_qparams(self):
  128. r"""Get activation's quantization parameters."""
  129. return self._get_method_result(
  130. "get_qparams", self.act_fake_quant, self.act_observer
  131. )
  132. @classmethod
  133. @abstractmethod
  134. def from_float_module(cls, float_module: Module):
  135. r"""Return a :class:`~.QATModule` instance converted from
  136. a float :class:`~.Module` instance.
  137. """

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台