You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

quantized.py 7.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  2. #
  3. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  4. #
  5. # Unless required by applicable law or agreed to in writing,
  6. # software distributed under the License is distributed on an
  7. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  8. # pylint: disable=too-many-lines
  9. from typing import Tuple, Union
  10. from ..core._imperative_rt.core2 import apply
  11. from ..core.ops import builtin
  12. from ..tensor import Tensor
  13. from ..utils.tuple_function import _pair, _pair_nonzero
  14. from .debug_param import get_execution_strategy
  15. def conv_bias_activation(
  16. inp: Tensor,
  17. weight: Tensor,
  18. bias: Tensor,
  19. dtype=None,
  20. stride: Union[int, Tuple[int, int]] = 1,
  21. padding: Union[int, Tuple[int, int]] = 0,
  22. dilation: Union[int, Tuple[int, int]] = 1,
  23. groups: int = 1,
  24. nonlinear_mode="identity",
  25. conv_mode="cross_correlation",
  26. compute_mode="default",
  27. ) -> Tensor:
  28. """
  29. Convolution bias with activation operation, only for inference.
  30. :param inp: feature map of the convolution operation.
  31. :param weight: convolution kernel.
  32. :param bias: bias added to the result of convolution
  33. :param stride: stride of the 2D convolution operation. Default: 1
  34. :param padding: size of the paddings added to the input on both sides
  35. of its spatial dimensions. Only zero-padding is supported. Default: 0
  36. :param dilation: dilation of the 2D convolution operation. Default: 1
  37. :param groups: number of groups into which the input and output channels are divided,
  38. so as to perform a "grouped convolution". When ``groups`` is not 1,
  39. ``in_channels`` and ``out_channels`` must be divisible by ``groups``,
  40. and the shape of weight should be `(groups, out_channel // groups,
  41. in_channels // groups, height, width)`.
  42. :type conv_mode: string or :class:`Convolution.Mode`.
  43. :param conv_mode: supports 'cross_correlation' or 'convolution'. Default:
  44. 'cross_correlation'
  45. :param dtype: support for ``np.dtype``, Default: np.int8
  46. :type compute_mode: string or
  47. :class:`Convolution.ComputeMode`.
  48. :param compute_mode: when set to "default", no special requirements will be
  49. placed on the precision of intermediate results. When set to "float32",
  50. "float32" would be used for accumulator and intermediate result,
  51. but only effective when input and output are of float16 dtype.
  52. """
  53. ph, pw = _pair(padding)
  54. sh, sw = _pair_nonzero(stride)
  55. dh, dw = _pair_nonzero(dilation)
  56. sparse_type = "dense" if groups == 1 else "group"
  57. op = builtin.ConvBias(
  58. stride_h=sh,
  59. stride_w=sw,
  60. pad_h=ph,
  61. pad_w=pw,
  62. dilate_h=dh,
  63. dilate_w=dw,
  64. dtype=dtype,
  65. format="NCHW",
  66. strategy=get_execution_strategy(),
  67. nonlineMode=nonlinear_mode,
  68. mode=conv_mode,
  69. compute_mode=compute_mode,
  70. sparse=sparse_type,
  71. )
  72. (outputs,) = apply(op, inp, weight, bias)
  73. return outputs
  74. def batch_conv_bias_activation(
  75. inp: Tensor,
  76. weight: Tensor,
  77. bias: Tensor,
  78. dtype=None,
  79. stride: Union[int, Tuple[int, int]] = 1,
  80. padding: Union[int, Tuple[int, int]] = 0,
  81. dilation: Union[int, Tuple[int, int]] = 1,
  82. groups: int = 1,
  83. nonlinear_mode="identity",
  84. conv_mode="cross_correlation",
  85. compute_mode="default",
  86. ) -> Tensor:
  87. """
  88. Batch convolution bias with activation operation, only for inference.
  89. :param inp: feature map of the convolution operation.
  90. :param weight: convolution kernel in batched way.
  91. :param bias: bias added to the result of convolution
  92. :param stride: stride of the 2D convolution operation. Default: 1
  93. :param padding: size of the paddings added to the input on both sides
  94. of its spatial dimensions. Only zero-padding is supported. Default: 0
  95. :param dilation: dilation of the 2D convolution operation. Default: 1
  96. :param groups: number of groups into which the input and output channels are divided,
  97. so as to perform a "grouped convolution". When ``groups`` is not 1,
  98. ``in_channels`` and ``out_channels`` must be divisible by ``groups``,
  99. and the shape of weight should be `(groups, out_channel // groups,
  100. in_channels // groups, height, width)`.
  101. :type conv_mode: string or :class:`Convolution.Mode`.
  102. :param conv_mode: supports 'cross_correlation' or 'convolution'. Default:
  103. 'cross_correlation'
  104. :param dtype: support for ``np.dtype``, Default: np.int8
  105. :type compute_mode: string or
  106. :class:`Convolution.ComputeMode`.
  107. :param compute_mode: when set to "default", no special requirements will be
  108. placed on the precision of intermediate results. When set to "float32",
  109. "float32" would be used for accumulator and intermediate result,
  110. but only effective when input and output are of float16 dtype.
  111. """
  112. ph, pw = _pair(padding)
  113. sh, sw = _pair_nonzero(stride)
  114. dh, dw = _pair_nonzero(dilation)
  115. sparse_type = "dense" if groups == 1 else "group"
  116. op = builtin.BatchConvBias(
  117. stride_h=sh,
  118. stride_w=sw,
  119. pad_h=ph,
  120. pad_w=pw,
  121. dilate_h=dh,
  122. dilate_w=dw,
  123. dtype=dtype,
  124. format="NCHW",
  125. strategy=get_execution_strategy(),
  126. nonlineMode=nonlinear_mode,
  127. mode=conv_mode,
  128. compute_mode=compute_mode,
  129. sparse=sparse_type,
  130. )
  131. (outputs,) = apply(op, inp, weight, bias)
  132. return outputs
  133. def conv_transpose2d(
  134. inp: Tensor,
  135. weight: Tensor,
  136. bias: Tensor = None,
  137. dtype=None,
  138. stride: Union[int, Tuple[int, int]] = 1,
  139. padding: Union[int, Tuple[int, int]] = 0,
  140. dilation: Union[int, Tuple[int, int]] = 1,
  141. groups: int = 1,
  142. conv_mode="cross_correlation",
  143. compute_mode="default",
  144. ) -> Tensor:
  145. assert (
  146. conv_mode.lower() == "cross_correlation"
  147. or conv_mode.name == "CROSS_CORRELATION"
  148. )
  149. assert compute_mode.lower() == "default" or compute_mode.name == "DEFAULT"
  150. if groups != 1:
  151. raise NotImplementedError(
  152. "group quantized transposed conv2d is not supported yet."
  153. )
  154. if bias is not None:
  155. raise NotImplementedError(
  156. "bias of quantized transposed conv2d is not supported yet."
  157. )
  158. pad_h, pad_w = _pair(padding)
  159. stride_h, stride_w = _pair_nonzero(stride)
  160. dilate_h, dilate_w = _pair_nonzero(dilation)
  161. # should be replaced by Op with bias such as ConvolutionBackwardDataBias
  162. op = builtin.ConvolutionBackwardData(
  163. stride_h=stride_h,
  164. stride_w=stride_w,
  165. pad_h=pad_h,
  166. pad_w=pad_w,
  167. dilate_h=dilate_h,
  168. dilate_w=dilate_w,
  169. strategy=get_execution_strategy(),
  170. dtype=dtype,
  171. compute_mode=compute_mode,
  172. mode=conv_mode,
  173. )
  174. (output,) = apply(op, weight, inp)
  175. return output

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台