You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

conv.py 3.5 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  2. #
  3. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  4. #
  5. # Unless required by applicable law or agreed to in writing,
  6. # software distributed under the License is distributed on an
  7. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  8. from ... import functional as F
  9. from .. import conv as Float
  10. from .module import QATModule
  11. class Conv2d(Float.Conv2d, QATModule):
  12. r"""A :class:`~.QATModule` :class:`~.module.Conv2d` with QAT support.
  13. Could be applied with :class:`~.Observer` and :class:`~.quantization.fake_quant.FakeQuantize`.
  14. """
  15. def calc_conv_qat(self, inp):
  16. w_qat = self.apply_quant_weight(self.weight)
  17. b_qat = self.apply_quant_bias(self.bias, inp, w_qat)
  18. conv = self.calc_conv(inp, w_qat, b_qat)
  19. return conv
  20. @classmethod
  21. def from_float_module(cls, float_module: Float.Conv2d):
  22. r"""
  23. Return a :class:`~.QATModule` instance converted from
  24. a float :class:`~.Module` instance.
  25. """
  26. qat_module = cls(
  27. float_module.in_channels,
  28. float_module.out_channels,
  29. float_module.kernel_size,
  30. float_module.stride,
  31. float_module.padding,
  32. float_module.dilation,
  33. float_module.groups,
  34. float_module.bias is not None,
  35. float_module.conv_mode,
  36. float_module.compute_mode,
  37. float_module.padding_mode,
  38. name=float_module.name,
  39. )
  40. qat_module.weight = float_module.weight
  41. qat_module.bias = float_module.bias
  42. return qat_module
  43. def forward(self, inp):
  44. return self.apply_quant_activation(self.calc_conv_qat(inp))
  45. class ConvRelu2d(Conv2d):
  46. r"""A :class:`~.QATModule` include :class:`~.module.Conv2d` and :func:`~.relu` with QAT support.
  47. Could be applied with :class:`~.Observer` and :class:`~.quantization.fake_quant.FakeQuantize`.
  48. """
  49. def forward(self, inp):
  50. return self.apply_quant_activation(F.relu(self.calc_conv_qat(inp)))
  51. class ConvTranspose2d(Float.ConvTranspose2d, QATModule):
  52. r"""A :class:`~.QATModule` :class:`~.module.ConvTranspose2d` with QAT support.
  53. Could be applied with :class:`~.Observer` and :class:`~.quantization.fake_quant.FakeQuantize`.
  54. """
  55. def calc_conv_transpose2d_qat(self, inp):
  56. w_qat = self.apply_quant_weight(self.weight)
  57. b_qat = self.apply_quant_bias(self.bias, inp, w_qat)
  58. conv = self.calc_conv_transpose2d(inp, w_qat, b_qat)
  59. return conv
  60. @classmethod
  61. def from_float_module(cls, float_module: Float.ConvTranspose2d):
  62. r"""
  63. Return a :class:`~.QATModule` instance converted from
  64. a float :class:`~.Module` instance.
  65. """
  66. qat_module = cls(
  67. float_module.in_channels,
  68. float_module.out_channels,
  69. float_module.kernel_size,
  70. float_module.stride,
  71. float_module.padding,
  72. float_module.dilation,
  73. float_module.groups,
  74. float_module.bias is not None,
  75. float_module.conv_mode,
  76. float_module.compute_mode,
  77. name=float_module.name,
  78. )
  79. qat_module.weight = float_module.weight
  80. qat_module.bias = float_module.bias
  81. return qat_module
  82. def forward(self, inp):
  83. return self.apply_quant_activation(self.calc_conv_transpose2d_qat(inp))