You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

conv.py 33 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954
  1. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  2. #
  3. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  4. #
  5. # Unless required by applicable law or agreed to in writing,
  6. # software distributed under the License is distributed on an
  7. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  8. from abc import abstractmethod
  9. from typing import Tuple, Union
  10. import numpy as np
  11. from ..functional import (
  12. conv1d,
  13. conv2d,
  14. conv3d,
  15. conv_transpose2d,
  16. conv_transpose3d,
  17. deformable_conv2d,
  18. local_conv2d,
  19. relu,
  20. )
  21. from ..tensor import Parameter
  22. from ..utils.tuple_function import _pair, _pair_nonzero, _triple, _triple_nonzero
  23. from . import init
  24. from .module import Module
  25. class _ConvNd(Module):
  26. """base class for convolution modules, including transposed conv"""
  27. def __init__(
  28. self,
  29. in_channels: int,
  30. out_channels: int,
  31. kernel_size: Union[int, Tuple[int, int]],
  32. stride: Union[int, Tuple[int, int]],
  33. padding: Union[int, Tuple[int, int]],
  34. dilation: Union[int, Tuple[int, int]],
  35. groups: int,
  36. bias: bool = True,
  37. **kwargs
  38. ):
  39. super().__init__(**kwargs)
  40. if in_channels % groups != 0:
  41. raise ValueError("in_channels must be divisible by groups")
  42. if out_channels % groups != 0:
  43. raise ValueError("out_channels must be divisible by groups")
  44. self.in_channels = in_channels
  45. self.out_channels = out_channels
  46. self.kernel_size = kernel_size
  47. self.stride = stride
  48. self.padding = padding
  49. self.dilation = dilation
  50. self.groups = groups
  51. self.weight = Parameter(np.zeros(self._infer_weight_shape(), dtype=np.float32))
  52. self.bias = None
  53. if bias:
  54. self.bias = Parameter(np.zeros(self._infer_bias_shape(), dtype=np.float32))
  55. self.reset_parameters()
  56. @abstractmethod
  57. def _get_fanin(self):
  58. pass
  59. def reset_parameters(self) -> None:
  60. fanin = self._get_fanin()
  61. std = np.sqrt(1 / fanin)
  62. init.normal_(self.weight, 0.0, std)
  63. if self.bias is not None:
  64. init.zeros_(self.bias)
  65. @abstractmethod
  66. def _infer_weight_shape(self):
  67. pass
  68. @abstractmethod
  69. def _infer_bias_shape(self):
  70. pass
  71. def _module_info_string(self):
  72. s = "{in_channels}, {out_channels}, kernel_size={kernel_size}"
  73. if self.stride != (1,) * len(self.stride):
  74. s += ", stride={stride}"
  75. if self.padding != (0,) * len(self.padding):
  76. s += ", padding={padding}"
  77. if self.dilation != (1,) * len(self.dilation):
  78. s += ", dilation={dilation}"
  79. if self.groups != 1:
  80. s += ", groups={groups}"
  81. if self.bias is None:
  82. s += ", bias=False"
  83. return s.format(**self.__dict__)
  84. class Conv1d(_ConvNd):
  85. r"""
  86. Applies a 1D convolution over an input tensor.
  87. For instance, given an input of the size :math:`(N, C_{\text{in}}, H)`,
  88. this layer generates an output of the size
  89. :math:`(N, C_{\text{out}}, H_{\text{out}})` through the
  90. process described as below:
  91. .. math::
  92. \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
  93. \sum_{k = 0}^{C_{\text{in}} - 1} \text{weight}(C_{\text{out}_j}, k) \star \text{input}(N_i, k)
  94. where :math:`\star` is the valid 1D cross-correlation operator,
  95. :math:`N` is batch size, :math:`C` denotes number of channels, and
  96. :math:`H` is length of 1D data element.
  97. When `groups == in_channels` and `out_channels == K * in_channels`,
  98. where K is a positive integer, this operation is also known as depthwise
  99. convolution.
  100. In other words, for an input of size :math:`(N, C_{in}, H_{in})`,
  101. a depthwise convolution with a depthwise multiplier `K`, can be constructed
  102. by arguments :math:`(in\_channels=C_{in}, out\_channels=C_{in} \times K, ..., groups=C_{in})`.
  103. :param in_channels: number of input channels.
  104. :param out_channels: number of output channels.
  105. :param kernel_size: size of weight on spatial dimensions.
  106. :param stride: stride of the 1D convolution operation.
  107. :param padding: size of the paddings added to the input on both sides of its
  108. spatial dimensions. Only zero-padding is supported. Default: 0
  109. :param dilation: dilation of the 1D convolution operation. Default: 1
  110. :param groups: number of groups into which the input and output channels are divided,
  111. so as to perform a "grouped convolution". When ``groups`` is not 1,
  112. ``in_channels`` and ``out_channels`` must be divisible by ``groups``,
  113. and there would be an extra dimension at the beginning of the weight's
  114. shape. Default: 1
  115. :param bias: whether to add a bias onto the result of convolution. Default:
  116. True
  117. :param conv_mode: Supports `cross_correlation`. Default:
  118. `cross_correlation`
  119. :param compute_mode: When set to "default", no special requirements will be
  120. placed on the precision of intermediate results. When set to "float32",
  121. "float32" would be used for accumulator and intermediate result, but only
  122. effective when input and output are of float16 dtype.
  123. .. note::
  124. * ``weight`` usually has shape ``(out_channels, in_channels, kernel_size)`` ,
  125. if groups is not 1, shape will be ``(groups, out_channels // groups, in_channels // groups, kernel_size)``
  126. * ``bias`` usually has shape ``(1, out_channels, 1)``
  127. Examples:
  128. .. testcode::
  129. import numpy as np
  130. import megengine as mge
  131. import megengine.module as M
  132. m = M.Conv1d(in_channels=3, out_channels=1, kernel_size=3)
  133. inp = mge.tensor(np.arange(0, 24).astype("float32").reshape(2, 3, 4))
  134. oup = m(inp)
  135. print(oup.numpy().shape)
  136. Outputs:
  137. .. testoutput::
  138. (2, 1, 2)
  139. """
  140. def __init__(
  141. self,
  142. in_channels: int,
  143. out_channels: int,
  144. kernel_size: int,
  145. stride: int = 1,
  146. padding: int = 0,
  147. dilation: int = 1,
  148. groups: int = 1,
  149. bias: bool = True,
  150. conv_mode: str = "cross_correlation",
  151. compute_mode: str = "default",
  152. **kwargs
  153. ):
  154. kernel_size = kernel_size
  155. stride = stride
  156. padding = padding
  157. dilation = dilation
  158. self.conv_mode = conv_mode
  159. self.compute_mode = compute_mode
  160. super().__init__(
  161. in_channels,
  162. out_channels,
  163. kernel_size,
  164. stride,
  165. padding,
  166. dilation,
  167. groups,
  168. bias,
  169. **kwargs,
  170. )
  171. def _get_fanin(self):
  172. kh = self.kernel_size
  173. ic = self.in_channels
  174. return kh * ic
  175. def _infer_weight_shape(self):
  176. group = self.groups
  177. ichl = self.in_channels
  178. ochl = self.out_channels
  179. kh = self.kernel_size
  180. if group == 1:
  181. # Assume format is NCH(W=1)
  182. return (ochl, ichl, kh)
  183. assert (
  184. ichl % group == 0 and ochl % group == 0
  185. ), "invalid config: in_channels={} out_channels={} group={}".format(
  186. ichl, ochl, group
  187. )
  188. # Assume format is NCH(W=1)
  189. return (group, ochl // group, ichl // group, kh)
  190. def _infer_bias_shape(self):
  191. # Assume format is NCH(W=1)
  192. return (1, self.out_channels, 1)
  193. def calc_conv(self, inp, weight, bias):
  194. return conv1d(
  195. inp,
  196. weight,
  197. bias,
  198. self.stride,
  199. self.padding,
  200. self.dilation,
  201. self.groups,
  202. self.conv_mode,
  203. self.compute_mode,
  204. )
  205. def forward(self, inp):
  206. return self.calc_conv(inp, self.weight, self.bias)
  207. class Conv2d(_ConvNd):
  208. r"""
  209. Applies a 2D convolution over an input tensor.
  210. For instance, given an input of the size :math:`(N, C_{\text{in}}, H, W)`,
  211. this layer generates an output of the size
  212. :math:`(N, C_{\text{out}}, H_{\text{out}}, W_{\text{out}})` through the
  213. process described as below:
  214. .. math::
  215. \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
  216. \sum_{k = 0}^{C_{\text{in}} - 1} \text{weight}(C_{\text{out}_j}, k) \star \text{input}(N_i, k)
  217. where :math:`\star` is the valid 2D cross-correlation operator,
  218. :math:`N` is batch size, :math:`C` denotes number of channels,
  219. :math:`H` is height of input planes in pixels, and :math:`W` is
  220. width in pixels.
  221. In general, output feature maps' shapes can be inferred as follows:
  222. input: :math:`(N, C_{\text{in}}, H_{\text{in}}, W_{\text{in}})`
  223. output: :math:`(N, C_{\text{out}}, H_{\text{out}}, W_{\text{out}})` where
  224. .. math::
  225. \text{H}_{out} = \lfloor \frac{\text{H}_{in} + 2 * \text{padding[0]} -
  226. \text{dilation[0]} * (\text{kernel_size[0]} - 1) - 1}{\text{stride[0]}} + 1 \rfloor
  227. .. math::
  228. \text{W}_{out} = \lfloor \frac{\text{W}_{in} + 2 * \text{padding[1]} -
  229. \text{dilation[1]} * (\text{kernel_size[1]} - 1) - 1}{\text{stride[1]}} + 1 \rfloor
  230. When `groups == in_channels` and `out_channels == K * in_channels`,
  231. where K is a positive integer, this operation is also known as depthwise
  232. convolution.
  233. In other words, for an input of size :math:`(N, C_{in}, H_{in}, W_{in})`,
  234. a depthwise convolution with a depthwise multiplier `K`, can be constructed
  235. by arguments :math:`(in\_channels=C_{in}, out\_channels=C_{in} \times K, ..., groups=C_{in})`.
  236. :param in_channels: number of input channels.
  237. :param out_channels: number of output channels.
  238. :param kernel_size: size of weight on spatial dimensions. If kernel_size is
  239. an :class:`int`, the actual kernel size would be
  240. ``(kernel_size, kernel_size)``.
  241. :param stride: stride of the 2D convolution operation. Default: 1
  242. :param padding: size of the paddings added to the input on both sides of its
  243. spatial dimensions. Only zero-padding is supported. Default: 0
  244. :param dilation: dilation of the 2D convolution operation. Default: 1
  245. :param groups: number of groups into which the input and output channels are divided,
  246. so as to perform a "grouped convolution". When ``groups`` is not 1,
  247. ``in_channels`` and ``out_channels`` must be divisible by ``groups``,
  248. and there would be an extra dimension at the beginning of the weight's
  249. shape. Default: 1
  250. :param bias: whether to add a bias onto the result of convolution. Default:
  251. True
  252. :param conv_mode: Supports `cross_correlation`. Default:
  253. `cross_correlation`
  254. :param compute_mode: When set to "default", no special requirements will be
  255. placed on the precision of intermediate results. When set to "float32",
  256. "float32" would be used for accumulator and intermediate result, but only
  257. effective when input and output are of float16 dtype.
  258. .. note::
  259. * ``weight`` usually has shape ``(out_channels, in_channels, height, width)`` ,
  260. if groups is not 1, shape will be ``(groups, out_channels // groups, in_channels // groups, height, width)``
  261. * ``bias`` usually has shape ``(1, out_channels, *1)``
  262. Examples:
  263. .. testcode::
  264. import numpy as np
  265. import megengine as mge
  266. import megengine.module as M
  267. m = M.Conv2d(in_channels=3, out_channels=1, kernel_size=3)
  268. inp = mge.tensor(np.arange(0, 96).astype("float32").reshape(2, 3, 4, 4))
  269. oup = m(inp)
  270. print(oup.numpy().shape)
  271. Outputs:
  272. .. testoutput::
  273. (2, 1, 2, 2)
  274. """
  275. def __init__(
  276. self,
  277. in_channels: int,
  278. out_channels: int,
  279. kernel_size: Union[int, Tuple[int, int]],
  280. stride: Union[int, Tuple[int, int]] = 1,
  281. padding: Union[int, Tuple[int, int]] = 0,
  282. dilation: Union[int, Tuple[int, int]] = 1,
  283. groups: int = 1,
  284. bias: bool = True,
  285. conv_mode: str = "cross_correlation",
  286. compute_mode: str = "default",
  287. **kwargs
  288. ):
  289. kernel_size = _pair_nonzero(kernel_size)
  290. stride = _pair_nonzero(stride)
  291. padding = _pair(padding)
  292. dilation = _pair_nonzero(dilation)
  293. self.conv_mode = conv_mode
  294. self.compute_mode = compute_mode
  295. super().__init__(
  296. in_channels,
  297. out_channels,
  298. kernel_size,
  299. stride,
  300. padding,
  301. dilation,
  302. groups,
  303. bias,
  304. **kwargs,
  305. )
  306. def _get_fanin(self):
  307. kh, kw = self.kernel_size
  308. ic = self.in_channels
  309. return kh * kw * ic
  310. def _infer_weight_shape(self):
  311. group = self.groups
  312. ichl = self.in_channels
  313. ochl = self.out_channels
  314. kh, kw = self.kernel_size
  315. if group == 1:
  316. # Assume format is NCHW
  317. return (ochl, ichl, kh, kw)
  318. assert (
  319. ichl % group == 0 and ochl % group == 0
  320. ), "invalid config: in_channels={} out_channels={} group={}".format(
  321. ichl, ochl, group
  322. )
  323. # Assume format is NCHW
  324. return (group, ochl // group, ichl // group, kh, kw)
  325. def _infer_bias_shape(self):
  326. # Assume format is NCHW
  327. return (1, self.out_channels, 1, 1)
  328. def calc_conv(self, inp, weight, bias):
  329. return conv2d(
  330. inp,
  331. weight,
  332. bias,
  333. self.stride,
  334. self.padding,
  335. self.dilation,
  336. self.groups,
  337. self.conv_mode,
  338. self.compute_mode,
  339. )
  340. def forward(self, inp):
  341. return self.calc_conv(inp, self.weight, self.bias)
  342. class Conv3d(_ConvNd):
  343. r"""
  344. Applies a 3D convolution over an input tensor.
  345. For instance, given an input of the size :math:`(N, C_{\text{in}}, T, H, W)`,
  346. this layer generates an output of the size
  347. :math:`(N, C_{\text{out}}, T_{\text{out}}, H_{\text{out}}, W_{\text{out}})` through the
  348. process described as below:
  349. .. math::
  350. \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
  351. \sum_{k = 0}^{C_{\text{in}} - 1} \text{weight}(C_{\text{out}_j}, k) \star \text{input}(N_i, k)
  352. where :math:`\star` is the valid 3D cross-correlation operator,
  353. :math:`N` is batch size, :math:`C` denotes number of channels.
  354. When `groups == in_channels` and `out_channels == K * in_channels`,
  355. where K is a positive integer, this operation is also known as depthwise
  356. convolution.
  357. In other words, for an input of size :math:`(N, C_{in}, T_{int}, H_{in}, W_{in})`,
  358. a depthwise convolution with a depthwise multiplier `K`, can be constructed
  359. by arguments :math:`(in\_channels=C_{in}, out\_channels=C_{in} \times K, ..., groups=C_{in})`.
  360. :param in_channels: number of input channels.
  361. :param out_channels: number of output channels.
  362. :param kernel_size: size of weight on spatial dimensions. If kernel_size is
  363. an :class:`int`, the actual kernel size would be
  364. `(kernel_size, kernel_size, kernel_size)`.
  365. :param stride: stride of the 3D convolution operation. Default: 1
  366. :param padding: size of the paddings added to the input on both sides of its
  367. spatial dimensions. Only zero-padding is supported. Default: 0
  368. :param dilation: dilation of the 3D convolution operation. Default: 1
  369. :param groups: number of groups into which the input and output channels are divided,
  370. so as to perform a "grouped convolution". When ``groups`` is not 1,
  371. ``in_channels`` and ``out_channels`` must be divisible by ``groups``,
  372. and there would be an extra dimension at the beginning of the weight's
  373. shape. Default: 1
  374. :param bias: whether to add a bias onto the result of convolution. Default:
  375. True
  376. :param conv_mode: Supports `cross_correlation`. Default:
  377. `cross_correlation`
  378. .. note::
  379. * ``weight`` usually has shape ``(out_channels, in_channels, depth, height, width)`` ,
  380. if groups is not 1, shape will be ``(groups, out_channels // groups, in_channels // groups, depth, height, width)``
  381. * ``bias`` usually has shape ``(1, out_channels, *1)``
  382. Examples:
  383. .. testcode::
  384. import numpy as np
  385. import megengine as mge
  386. import megengine.module as M
  387. m = M.Conv3d(in_channels=3, out_channels=1, kernel_size=3)
  388. inp = mge.tensor(np.arange(0, 384).astype("float32").reshape(2, 3, 4, 4, 4))
  389. oup = m(inp)
  390. print(oup.numpy().shape)
  391. Outputs:
  392. .. testoutput::
  393. (2, 1, 2, 2, 2)
  394. """
  395. def __init__(
  396. self,
  397. in_channels: int,
  398. out_channels: int,
  399. kernel_size: Union[int, Tuple[int, int, int]],
  400. stride: Union[int, Tuple[int, int, int]] = 1,
  401. padding: Union[int, Tuple[int, int, int]] = 0,
  402. dilation: Union[int, Tuple[int, int, int]] = 1,
  403. groups: int = 1,
  404. bias: bool = True,
  405. conv_mode: str = "cross_correlation",
  406. ):
  407. kernel_size = _triple_nonzero(kernel_size)
  408. stride = _triple_nonzero(stride)
  409. padding = _triple(padding)
  410. dilation = _triple_nonzero(dilation)
  411. self.conv_mode = conv_mode
  412. super().__init__(
  413. in_channels,
  414. out_channels,
  415. kernel_size,
  416. stride,
  417. padding,
  418. dilation,
  419. groups,
  420. bias,
  421. )
  422. def _get_fanin(self):
  423. kt, kh, kw = self.kernel_size
  424. ic = self.in_channels
  425. return kt * kh * kw * ic
  426. def _infer_weight_shape(self):
  427. group = self.groups
  428. ichl = self.in_channels
  429. ochl = self.out_channels
  430. kt, kh, kw = self.kernel_size
  431. if group == 1:
  432. # Assume format is NCTHW
  433. return (ochl, ichl, kt, kh, kw)
  434. assert (
  435. ichl % group == 0 and ochl % group == 0
  436. ), "invalid config: in_channels={} out_channels={} group={}".format(
  437. ichl, ochl, group
  438. )
  439. # Assume format is NCTHW
  440. return (group, ochl // group, ichl // group, kt, kh, kw)
  441. def _infer_bias_shape(self):
  442. # Assume format is NCTHW
  443. return (1, self.out_channels, 1, 1, 1)
  444. def calc_conv(self, inp, weight, bias):
  445. return conv3d(
  446. inp,
  447. weight,
  448. bias,
  449. self.stride,
  450. self.padding,
  451. self.dilation,
  452. self.groups,
  453. self.conv_mode,
  454. )
  455. def forward(self, inp):
  456. return self.calc_conv(inp, self.weight, self.bias)
  457. class ConvTranspose2d(_ConvNd):
  458. r"""
  459. Applies a 2D transposed convolution over an input tensor.
  460. This module is also known as a deconvolution or a fractionally-strided convolution.
  461. :class:`ConvTranspose2d` can be seen as the gradient of :class:`Conv2d` operation
  462. with respect to its input.
  463. Convolution usually reduces the size of input, while transposed convolution works
  464. the opposite way, transforming a smaller input to a larger output while preserving the
  465. connectivity pattern.
  466. :param in_channels: number of input channels.
  467. :param out_channels: number of output channels.
  468. :param kernel_size: size of weight on spatial dimensions. If ``kernel_size`` is
  469. an :class:`int`, the actual kernel size would be
  470. ``(kernel_size, kernel_size)``.
  471. :param stride: stride of the 2D convolution operation. Default: 1
  472. :param padding: size of the paddings added to the input on both sides of its
  473. spatial dimensions. Only zero-padding is supported. Default: 0
  474. :param dilation: dilation of the 2D convolution operation. Default: 1
  475. :param groups: number of groups into which the input and output channels are divided,
  476. so as to perform a "grouped convolution". When ``groups`` is not 1,
  477. ``in_channels`` and ``out_channels`` must be divisible by ``groups``,
  478. and there would be an extra dimension at the beginning of the weight's
  479. shape. Default: 1
  480. :param bias: wether to add a bias onto the result of convolution. Default:
  481. True
  482. :param conv_mode: Supports `cross_correlation`. Default:
  483. `cross_correlation`
  484. :param compute_mode: When set to "default", no special requirements will be
  485. placed on the precision of intermediate results. When set to "float32",
  486. "float32" would be used for accumulator and intermediate result, but only
  487. effective when input and output are of float16 dtype.
  488. .. note::
  489. * ``weight`` usually has shape ``(in_channels, out_channels, height, width)`` ,
  490. if groups is not 1, shape will be ``(groups, in_channels // groups, out_channels // groups, height, width)``
  491. * ``bias`` usually has shape ``(1, out_channels, *1)``
  492. """
  493. def __init__(
  494. self,
  495. in_channels: int,
  496. out_channels: int,
  497. kernel_size: Union[int, Tuple[int, int]],
  498. stride: Union[int, Tuple[int, int]] = 1,
  499. padding: Union[int, Tuple[int, int]] = 0,
  500. dilation: Union[int, Tuple[int, int]] = 1,
  501. groups: int = 1,
  502. bias: bool = True,
  503. conv_mode: str = "cross_correlation",
  504. compute_mode: str = "default",
  505. **kwargs
  506. ):
  507. kernel_size = _pair_nonzero(kernel_size)
  508. stride = _pair_nonzero(stride)
  509. padding = _pair(padding)
  510. dilation = _pair_nonzero(dilation)
  511. self.conv_mode = conv_mode
  512. self.compute_mode = compute_mode
  513. super().__init__(
  514. in_channels,
  515. out_channels,
  516. kernel_size,
  517. stride,
  518. padding,
  519. dilation,
  520. groups,
  521. bias,
  522. **kwargs,
  523. )
  524. def _get_fanin(self):
  525. kh, kw = self.kernel_size
  526. oc = self.out_channels
  527. return kh * kw * oc
  528. def _infer_weight_shape(self):
  529. group = self.groups
  530. ichl = self.in_channels
  531. ochl = self.out_channels
  532. kh, kw = self.kernel_size
  533. if group == 1:
  534. # Assume format is NCHW
  535. return (ichl, ochl, kh, kw)
  536. assert (
  537. ichl % group == 0 and ochl % group == 0
  538. ), "invalid config: in_channels={} out_channels={} group={}".format(
  539. ichl, ochl, group
  540. )
  541. # Assume format is NCHW
  542. return (group, ichl // group, ochl // group, kh, kw)
  543. def _infer_bias_shape(self):
  544. # Assume format is NCHW
  545. return (1, self.out_channels, 1, 1)
  546. def calc_conv_transpose2d(self, inp, weight, bias):
  547. return conv_transpose2d(
  548. inp,
  549. weight,
  550. bias,
  551. self.stride,
  552. self.padding,
  553. self.dilation,
  554. self.groups,
  555. self.conv_mode,
  556. self.compute_mode,
  557. )
  558. def forward(self, inp):
  559. return self.calc_conv_transpose2d(inp, self.weight, self.bias)
  560. class LocalConv2d(Conv2d):
  561. r"""
  562. Applies a spatial convolution with untied kernels over an groupped channeled input 4D tensor.
  563. It is also known as the locally connected layer.
  564. :param in_channels: number of input channels.
  565. :param out_channels: number of output channels.
  566. :param input_height: the height of the input images.
  567. :param input_width: the width of the input images.
  568. :param kernel_size: size of weight on spatial dimensions. If kernel_size is
  569. an :class:`int`, the actual kernel size would be
  570. ``(kernel_size, kernel_size)``.
  571. :param stride: stride of the 2D convolution operation. Default: 1
  572. :param padding: size of the paddings added to the input on both sides of its
  573. spatial dimensions. Only zero-padding is supported. Default: 0
  574. :param groups: number of groups into which the input and output channels are divided,
  575. so as to perform a "grouped convolution". When ``groups`` is not 1,
  576. ``in_channels`` and ``out_channels`` must be divisible by ``groups``. Default: 1
  577. .. note::
  578. * ``weight`` usually has shape ``(out_height, out_width, in_channels, height, width, in_channels)`` ,
  579. if groups is not 1, shape will be ``(groups, out_height, out_width, in_channels // groups, height, width, out_channels // groups)``
  580. * ``bias`` usually has shape ``(1, out_channels, *1)``
  581. """
  582. def __init__(
  583. self,
  584. in_channels: int,
  585. out_channels: int,
  586. input_height: int,
  587. input_width: int,
  588. kernel_size: Union[int, Tuple[int, int]],
  589. stride: Union[int, Tuple[int, int]] = 1,
  590. padding: Union[int, Tuple[int, int]] = 0,
  591. dilation: Union[int, Tuple[int, int]] = 1,
  592. groups: int = 1,
  593. conv_mode: str = "cross_correlation",
  594. **kwargs
  595. ):
  596. self.input_height = input_height
  597. self.input_width = input_width
  598. super().__init__(
  599. in_channels,
  600. out_channels,
  601. kernel_size,
  602. stride,
  603. padding,
  604. dilation,
  605. groups,
  606. bias=False,
  607. **kwargs,
  608. )
  609. def _infer_weight_shape(self):
  610. group = self.groups
  611. out_height = (
  612. self.input_height + self.padding[0] * 2 - self.kernel_size[0]
  613. ) // self.stride[0] + 1
  614. out_width = (
  615. self.input_width + self.padding[1] * 2 - self.kernel_size[1]
  616. ) // self.stride[1] + 1
  617. # Assume format is NCHW
  618. return (
  619. group,
  620. out_height,
  621. out_width,
  622. self.in_channels // group,
  623. self.kernel_size[0],
  624. self.kernel_size[1],
  625. self.out_channels // group,
  626. )
  627. def forward(self, inp):
  628. return local_conv2d(
  629. inp,
  630. self.weight,
  631. None,
  632. self.stride,
  633. self.padding,
  634. self.dilation,
  635. self.conv_mode,
  636. )
  637. class ConvRelu2d(Conv2d):
  638. r"""
  639. A fused :class:`~.Module` including :class:`~.module.Conv2d` and :func:`~.relu`.
  640. Could be replaced with :class:`~.QATModule` version :class:`~.qat.ConvRelu2d` using :func:`~.quantize.quantize_qat`.
  641. """
  642. def forward(self, inp):
  643. return relu(self.calc_conv(inp, self.weight, self.bias))
  644. class DeformableConv2d(_ConvNd):
  645. """
  646. Deformable Convolution.
  647. :param in_channels: number of input channels.
  648. :param out_channels: number of output channels.
  649. :param kernel_size: size of weight on spatial dimensions. If kernel_size is
  650. an :class:`int`, the actual kernel size would be
  651. ``(kernel_size, kernel_size)``.
  652. :param stride: stride of the 2D convolution operation. Default: 1
  653. :param padding: size of the paddings added to the input on both sides of its
  654. spatial dimensions. Only zero-padding is supported. Default: 0
  655. :param dilation: dilation of the 2D convolution operation. Default: 1
  656. :param groups: number of groups into which the input and output channels are divided,
  657. so as to perform a "grouped convolution". When ``groups`` is not 1,
  658. ``in_channels`` and ``out_channels`` must be divisible by ``groups``,
  659. and there would be an extra dimension at the beginning of the weight's
  660. shape. Default: 1
  661. :param bias: whether to add a bias onto the result of convolution. Default:
  662. True
  663. :param conv_mode: Supports `cross_correlation`. Default:
  664. `cross_correlation`
  665. :param compute_mode: When set to "default", no special requirements will be
  666. placed on the precision of intermediate results. When set to "float32",
  667. "float32" would be used for accumulator and intermediate result, but only
  668. effective when input and output are of float16 dtype.
  669. .. note::
  670. * ``weight`` usually has shape ``(out_channels, in_channels, height, width)`` ,
  671. if groups is not 1, shape will be ``(groups, out_channels // groups, in_channels // groups, height, width)``
  672. * ``bias`` usually has shape ``(1, out_channels, *1)``
  673. """
  674. def __init__(
  675. self,
  676. in_channels: int,
  677. out_channels: int,
  678. kernel_size: Union[int, Tuple[int, int]],
  679. stride: Union[int, Tuple[int, int]] = 1,
  680. padding: Union[int, Tuple[int, int]] = 0,
  681. dilation: Union[int, Tuple[int, int]] = 1,
  682. groups: int = 1,
  683. bias: bool = True,
  684. conv_mode: str = "cross_correlation",
  685. compute_mode: str = "default",
  686. **kwargs
  687. ):
  688. kernel_size = _pair_nonzero(kernel_size)
  689. stride = _pair_nonzero(stride)
  690. padding = _pair(padding)
  691. dilation = _pair_nonzero(dilation)
  692. self.conv_mode = conv_mode
  693. self.compute_mode = compute_mode
  694. super().__init__(
  695. in_channels,
  696. out_channels,
  697. kernel_size,
  698. stride,
  699. padding,
  700. dilation,
  701. groups,
  702. bias,
  703. **kwargs,
  704. )
  705. def _get_fanin(self):
  706. kh, kw = self.kernel_size
  707. ic = self.in_channels
  708. return kh * kw * ic
  709. def _infer_weight_shape(self):
  710. group = self.groups
  711. ichl = self.in_channels
  712. ochl = self.out_channels
  713. kh, kw = self.kernel_size
  714. if group == 1:
  715. # Assume format is NCHW
  716. return (ochl, ichl, kh, kw)
  717. assert (
  718. ichl % group == 0 and ochl % group == 0
  719. ), "invalid config: in_channels={} out_channels={} group={}".format(
  720. ichl, ochl, group
  721. )
  722. # Assume format is NCHW
  723. return (group, ochl // group, ichl // group, kh, kw)
  724. def _infer_bias_shape(self):
  725. # Assume format is NCHW
  726. return (1, self.out_channels, 1, 1)
  727. def calc_conv(self, inp, weight, offset, mask, bias):
  728. return deformable_conv2d(
  729. inp,
  730. weight,
  731. offset,
  732. mask,
  733. bias,
  734. self.stride,
  735. self.padding,
  736. self.dilation,
  737. self.groups,
  738. self.conv_mode,
  739. self.compute_mode,
  740. )
  741. def forward(self, inp, offset, mask):
  742. return self.calc_conv(inp, self.weight, offset, mask, self.bias)
  743. class ConvTranspose3d(_ConvNd):
  744. r"""
  745. Applies a 3D transposed convolution over an input tensor.
  746. Only support the case that groups = 1 and conv_mode = "cross_correlation".
  747. :class:`ConvTranspose3d` can be seen as the gradient of :class:`Conv3d` operation
  748. with respect to its input.
  749. Convolution3D usually reduces the size of input, while transposed convolution3d
  750. works the opposite way, transforming a smaller input to a larger output while
  751. preserving the connectivity pattern.
  752. :param in_channels: number of input channels.
  753. :param out_channels: number of output channels.
  754. :param kernel_size: size of weight on spatial dimensions. If ``kernel_size`` is
  755. an :class:`int`, the actual kernel size would be
  756. ``(kernel_size, kernel_size, kernel_size)``.
  757. :param stride: stride of the 3D convolution operation. Default: 1
  758. :param padding: size of the paddings added to the input on all sides of its
  759. spatial dimensions. Only zero-padding is supported. Default: 0
  760. :param dilation: dilation of the 3D convolution operation. Default: 1
  761. :param bias: wether to add a bias onto the result of convolution. Default:
  762. True
  763. .. note::
  764. * ``weight`` usually has shape ``(in_channels, out_channels, depth, height, width)`` .
  765. * ``bias`` usually has shape ``(1, out_channels, *1)``
  766. """
  767. def __init__(
  768. self,
  769. in_channels: int,
  770. out_channels: int,
  771. kernel_size: Union[int, Tuple[int, int, int]],
  772. stride: Union[int, Tuple[int, int, int]] = 1,
  773. padding: Union[int, Tuple[int, int, int]] = 0,
  774. dilation: Union[int, Tuple[int, int, int]] = 1,
  775. bias: bool = True,
  776. ):
  777. kernel_size = _triple_nonzero(kernel_size)
  778. stride = _triple_nonzero(stride)
  779. padding = _triple(padding)
  780. dilation = _triple_nonzero(dilation)
  781. super().__init__(
  782. in_channels=in_channels,
  783. out_channels=out_channels,
  784. kernel_size=kernel_size,
  785. stride=stride,
  786. padding=padding,
  787. dilation=dilation,
  788. groups=1,
  789. bias=bias,
  790. )
  791. def _get_fanin(self):
  792. kt, kh, kw = self.kernel_size
  793. ic = self.in_channels
  794. return kt * kh * kw * ic
  795. def _infer_weight_shape(self):
  796. ichl = self.in_channels
  797. ochl = self.out_channels
  798. kt, kh, kw = self.kernel_size
  799. return (ichl, ochl, kt, kh, kw)
  800. def _infer_bias_shape(self):
  801. # Assume format is NCTHW
  802. return (1, self.out_channels, 1, 1, 1)
  803. def forward(self, inp):
  804. return conv_transpose3d(
  805. inp, self.weight, self.bias, self.stride, self.padding, self.dilation,
  806. )

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台