You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

conv.py 36 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000
  1. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  2. #
  3. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  4. #
  5. # Unless required by applicable law or agreed to in writing,
  6. # software distributed under the License is distributed on an
  7. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  8. from abc import abstractmethod
  9. from typing import Tuple, Union
  10. import numpy as np
  11. from ..functional import (
  12. conv1d,
  13. conv2d,
  14. conv3d,
  15. conv_transpose2d,
  16. conv_transpose3d,
  17. deformable_conv2d,
  18. local_conv2d,
  19. pad,
  20. relu,
  21. )
  22. from ..tensor import Parameter
  23. from ..utils.tuple_function import _pair, _pair_nonzero, _triple, _triple_nonzero
  24. from . import init
  25. from .module import Module
  26. class _ConvNd(Module):
  27. """base class for convolution modules, including transposed conv"""
  28. def __init__(
  29. self,
  30. in_channels: int,
  31. out_channels: int,
  32. kernel_size: Union[int, Tuple[int, int]],
  33. stride: Union[int, Tuple[int, int]],
  34. padding: Union[int, Tuple[int, int]],
  35. dilation: Union[int, Tuple[int, int]],
  36. groups: int,
  37. bias: bool = True,
  38. **kwargs
  39. ):
  40. super().__init__(**kwargs)
  41. if in_channels % groups != 0:
  42. raise ValueError("in_channels must be divisible by groups")
  43. if out_channels % groups != 0:
  44. raise ValueError("out_channels must be divisible by groups")
  45. self.in_channels = in_channels
  46. self.out_channels = out_channels
  47. self.kernel_size = kernel_size
  48. self.stride = stride
  49. self.padding = padding
  50. self.dilation = dilation
  51. self.groups = groups
  52. self.weight = Parameter(np.zeros(self._infer_weight_shape(), dtype=np.float32))
  53. self.bias = None
  54. if bias:
  55. self.bias = Parameter(np.zeros(self._infer_bias_shape(), dtype=np.float32))
  56. self.reset_parameters()
  57. @abstractmethod
  58. def _get_fanin(self):
  59. pass
  60. def reset_parameters(self) -> None:
  61. fanin = self._get_fanin()
  62. std = np.sqrt(1 / fanin)
  63. init.normal_(self.weight, 0.0, std)
  64. if self.bias is not None:
  65. init.zeros_(self.bias)
  66. @abstractmethod
  67. def _infer_weight_shape(self):
  68. pass
  69. @abstractmethod
  70. def _infer_bias_shape(self):
  71. pass
  72. def _module_info_string(self):
  73. s = "{in_channels}, {out_channels}, kernel_size={kernel_size}"
  74. if self.stride != (1,) * len(self.stride):
  75. s += ", stride={stride}"
  76. if self.padding != (0,) * len(self.padding):
  77. s += ", padding={padding}"
  78. if self.dilation != (1,) * len(self.dilation):
  79. s += ", dilation={dilation}"
  80. if self.groups != 1:
  81. s += ", groups={groups}"
  82. if self.bias is None:
  83. s += ", bias=False"
  84. return s.format(**self.__dict__)
  85. class Conv1d(_ConvNd):
  86. r"""Applies a 1D convolution over an input tensor.
  87. For instance, given an input of the size :math:`(N, C_{\text{in}}, H)`,
  88. this layer generates an output of the size
  89. :math:`(N, C_{\text{out}}, H_{\text{out}})` through the
  90. process described as below:
  91. .. math::
  92. \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
  93. \sum_{k = 0}^{C_{\text{in}} - 1} \text{weight}(C_{\text{out}_j}, k) \star \text{input}(N_i, k)
  94. where :math:`\star` is the valid 1D cross-correlation operator,
  95. :math:`N` is batch size, :math:`C` denotes number of channels, and
  96. :math:`H` is length of 1D data element.
  97. When `groups == in_channels` and `out_channels == K * in_channels`,
  98. where K is a positive integer, this operation is also known as depthwise
  99. convolution.
  100. In other words, for an input of size :math:`(N, C_{in}, H_{in})`,
  101. a depthwise convolution with a depthwise multiplier `K`, can be constructed
  102. by arguments :math:`(in\_channels=C_{in}, out\_channels=C_{in} \times K, ..., groups=C_{in})`.
  103. Args:
  104. in_channels: number of input channels.
  105. out_channels: number of output channels.
  106. kernel_size: size of weight on spatial dimensions.
  107. stride: stride of the 1D convolution operation.
  108. padding: size of the paddings added to the input on both sides of its
  109. spatial dimensions. Default: 0
  110. dilation: dilation of the 1D convolution operation. Default: 1
  111. groups: number of groups to divide input and output channels into,
  112. so as to perform a "grouped convolution". When ``groups`` is not 1,
  113. ``in_channels`` and ``out_channels`` must be divisible by ``groups``,
  114. and the shape of weight should be ``(groups, out_channel // groups,
  115. in_channels // groups, kernel_size)``. Default: 1
  116. bias: whether to add a bias onto the result of convolution. Default: True
  117. conv_mode: Supports `cross_correlation`. Default: `cross_correlation`
  118. compute_mode: When set to "default", no special requirements will be
  119. placed on the precision of intermediate results. When set to "float32",
  120. "float32" would be used for accumulator and intermediate result, but only
  121. effective when input and output are of float16 dtype.
  122. padding_mode: "zeros", "reflect" or "replicate". Default: "zeros".
  123. Refer to :class:`~.module.padding.Pad` for more information.
  124. Note:
  125. * ``weight`` usually has shape ``(out_channels, in_channels, kernel_size)`` ,
  126. if groups is not 1, shape will be ``(groups, out_channels // groups, in_channels // groups, kernel_size)``
  127. * ``bias`` usually has shape ``(1, out_channels, 1)``
  128. Examples:
  129. .. testcode::
  130. import numpy as np
  131. import megengine as mge
  132. import megengine.module as M
  133. m = M.Conv1d(in_channels=3, out_channels=1, kernel_size=3)
  134. inp = mge.tensor(np.arange(0, 24).astype("float32").reshape(2, 3, 4))
  135. oup = m(inp)
  136. print(oup.numpy().shape)
  137. Outputs:
  138. .. testoutput::
  139. (2, 1, 2)
  140. """
  141. def __init__(
  142. self,
  143. in_channels: int,
  144. out_channels: int,
  145. kernel_size: int,
  146. stride: int = 1,
  147. padding: int = 0,
  148. dilation: int = 1,
  149. groups: int = 1,
  150. bias: bool = True,
  151. conv_mode: str = "cross_correlation",
  152. compute_mode: str = "default",
  153. padding_mode: str = "zeros",
  154. **kwargs
  155. ):
  156. kernel_size = kernel_size
  157. stride = stride
  158. padding = padding
  159. dilation = dilation
  160. self.conv_mode = conv_mode
  161. self.compute_mode = compute_mode
  162. self.padding_mode = padding_mode
  163. super().__init__(
  164. in_channels,
  165. out_channels,
  166. kernel_size,
  167. stride,
  168. padding,
  169. dilation,
  170. groups,
  171. bias,
  172. **kwargs,
  173. )
  174. def _get_fanin(self):
  175. kh = self.kernel_size
  176. ic = self.in_channels
  177. return kh * ic
  178. def _infer_weight_shape(self):
  179. group = self.groups
  180. ichl = self.in_channels
  181. ochl = self.out_channels
  182. kh = self.kernel_size
  183. if group == 1:
  184. # Assume format is NCH(W=1)
  185. return (ochl, ichl, kh)
  186. assert (
  187. ichl % group == 0 and ochl % group == 0
  188. ), "invalid config: in_channels={} out_channels={} group={}".format(
  189. ichl, ochl, group
  190. )
  191. # Assume format is NCH(W=1)
  192. return (group, ochl // group, ichl // group, kh)
  193. def _infer_bias_shape(self):
  194. # Assume format is NCH(W=1)
  195. return (1, self.out_channels, 1)
  196. def get_pad_witdth(self):
  197. return ((0, 0), (0, 0), (self.padding, self.padding))
  198. def calc_conv(self, inp, weight, bias):
  199. assert self.padding_mode in [
  200. "zeros",
  201. "reflect",
  202. "replicate",
  203. ]
  204. if self.padding_mode != "zeros":
  205. return conv1d(
  206. pad(inp, self.get_pad_witdth(), self.padding_mode),
  207. weight,
  208. bias,
  209. self.stride,
  210. 0,
  211. self.dilation,
  212. self.groups,
  213. self.conv_mode,
  214. self.compute_mode,
  215. )
  216. return conv1d(
  217. inp,
  218. weight,
  219. bias,
  220. self.stride,
  221. self.padding,
  222. self.dilation,
  223. self.groups,
  224. self.conv_mode,
  225. self.compute_mode,
  226. )
  227. def forward(self, inp):
  228. return self.calc_conv(inp, self.weight, self.bias)
  229. class Conv2d(_ConvNd):
  230. r"""Applies a 2D convolution over an input tensor.
  231. For instance, given an input of the size :math:`(N, C_{\text{in}}, H, W)`,
  232. this layer generates an output of the size
  233. :math:`(N, C_{\text{out}}, H_{\text{out}}, W_{\text{out}})` through the
  234. process described as below:
  235. .. math::
  236. \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
  237. \sum_{k = 0}^{C_{\text{in}} - 1} \text{weight}(C_{\text{out}_j}, k) \star \text{input}(N_i, k)
  238. where :math:`\star` is the valid 2D cross-correlation operator,
  239. :math:`N` is batch size, :math:`C` denotes number of channels,
  240. :math:`H` is height of input planes in pixels, and :math:`W` is
  241. width in pixels.
  242. In general, output feature maps' shapes can be inferred as follows:
  243. input: :math:`(N, C_{\text{in}}, H_{\text{in}}, W_{\text{in}})`
  244. output: :math:`(N, C_{\text{out}}, H_{\text{out}}, W_{\text{out}})` where
  245. .. math::
  246. \text{H}_{out} = \lfloor \frac{\text{H}_{in} + 2 * \text{padding[0]} -
  247. \text{dilation[0]} * (\text{kernel_size[0]} - 1) - 1}{\text{stride[0]}} + 1 \rfloor
  248. .. math::
  249. \text{W}_{out} = \lfloor \frac{\text{W}_{in} + 2 * \text{padding[1]} -
  250. \text{dilation[1]} * (\text{kernel_size[1]} - 1) - 1}{\text{stride[1]}} + 1 \rfloor
  251. When `groups == in_channels` and `out_channels == K * in_channels`,
  252. where K is a positive integer, this operation is also known as depthwise
  253. convolution.
  254. In other words, for an input of size :math:`(N, C_{in}, H_{in}, W_{in})`,
  255. a depthwise convolution with a depthwise multiplier `K`, can be constructed
  256. by arguments :math:`(in\_channels=C_{in}, out\_channels=C_{in} \times K, ..., groups=C_{in})`.
  257. Args:
  258. in_channels: number of input channels.
  259. out_channels: number of output channels.
  260. kernel_size: size of weight on spatial dimensions. If kernel_size is
  261. an :class:`int`, the actual kernel size would be
  262. ``(kernel_size, kernel_size)``.
  263. stride: stride of the 2D convolution operation. Default: 1
  264. padding: size of the paddings added to the input on both sides of its
  265. spatial dimensions. Default: 0
  266. dilation: dilation of the 2D convolution operation. Default: 1
  267. groups: number of groups into which the input and output channels are divided,
  268. so as to perform a ``grouped convolution``. When ``groups`` is not 1,
  269. ``in_channels`` and ``out_channels`` must be divisible by ``groups``,
  270. and the shape of weight should be ``(groups, out_channel // groups,
  271. in_channels // groups, height, width)``. Default: 1
  272. bias: whether to add a bias onto the result of convolution. Default: True
  273. conv_mode: Supports `cross_correlation`. Default: `cross_correlation`
  274. compute_mode: When set to "default", no special requirements will be
  275. placed on the precision of intermediate results. When set to "float32",
  276. "float32" would be used for accumulator and intermediate result, but only
  277. effective when input and output are of float16 dtype.
  278. padding_mode: "zeros", "reflect" or "replicate". Default: "zeros".
  279. Refer to :class:`~.module.padding.Pad` for more information.
  280. Note:
  281. * ``weight`` usually has shape ``(out_channels, in_channels, height, width)`` ,
  282. if groups is not 1, shape will be ``(groups, out_channels // groups, in_channels // groups, height, width)``
  283. * ``bias`` usually has shape ``(1, out_channels, *1)``
  284. Examples:
  285. .. testcode::
  286. import numpy as np
  287. import megengine as mge
  288. import megengine.module as M
  289. m = M.Conv2d(in_channels=3, out_channels=1, kernel_size=3)
  290. inp = mge.tensor(np.arange(0, 96).astype("float32").reshape(2, 3, 4, 4))
  291. oup = m(inp)
  292. print(oup.numpy().shape)
  293. Outputs:
  294. .. testoutput::
  295. (2, 1, 2, 2)
  296. """
  297. def __init__(
  298. self,
  299. in_channels: int,
  300. out_channels: int,
  301. kernel_size: Union[int, Tuple[int, int]],
  302. stride: Union[int, Tuple[int, int]] = 1,
  303. padding: Union[int, Tuple[int, int]] = 0,
  304. dilation: Union[int, Tuple[int, int]] = 1,
  305. groups: int = 1,
  306. bias: bool = True,
  307. conv_mode: str = "cross_correlation",
  308. compute_mode: str = "default",
  309. padding_mode: str = "zeros",
  310. **kwargs
  311. ):
  312. kernel_size = _pair_nonzero(kernel_size)
  313. stride = _pair_nonzero(stride)
  314. padding = _pair(padding)
  315. dilation = _pair_nonzero(dilation)
  316. self.conv_mode = conv_mode
  317. self.compute_mode = compute_mode
  318. self.padding_mode = padding_mode
  319. super().__init__(
  320. in_channels,
  321. out_channels,
  322. kernel_size,
  323. stride,
  324. padding,
  325. dilation,
  326. groups,
  327. bias,
  328. **kwargs,
  329. )
  330. def _get_fanin(self):
  331. kh, kw = self.kernel_size
  332. ic = self.in_channels
  333. return kh * kw * ic
  334. def _infer_weight_shape(self):
  335. group = self.groups
  336. ichl = self.in_channels
  337. ochl = self.out_channels
  338. kh, kw = self.kernel_size
  339. if group == 1:
  340. # Assume format is NCHW
  341. return (ochl, ichl, kh, kw)
  342. assert (
  343. ichl % group == 0 and ochl % group == 0
  344. ), "invalid config: in_channels={} out_channels={} group={}".format(
  345. ichl, ochl, group
  346. )
  347. # Assume format is NCHW
  348. return (group, ochl // group, ichl // group, kh, kw)
  349. def _infer_bias_shape(self):
  350. # Assume format is NCHW
  351. return (1, self.out_channels, 1, 1)
  352. def get_pad_witdth(self):
  353. return (
  354. (0, 0),
  355. (0, 0),
  356. (self.padding[0], self.padding[0]),
  357. (self.padding[1], self.padding[1]),
  358. )
  359. def calc_conv(self, inp, weight, bias):
  360. assert self.padding_mode in [
  361. "zeros",
  362. "reflect",
  363. "replicate",
  364. ]
  365. if self.padding_mode != "zeros":
  366. return conv2d(
  367. pad(inp, self.get_pad_witdth(), self.padding_mode),
  368. weight,
  369. bias,
  370. self.stride,
  371. 0,
  372. self.dilation,
  373. self.groups,
  374. self.conv_mode,
  375. self.compute_mode,
  376. )
  377. return conv2d(
  378. inp,
  379. weight,
  380. bias,
  381. self.stride,
  382. self.padding,
  383. self.dilation,
  384. self.groups,
  385. self.conv_mode,
  386. self.compute_mode,
  387. )
  388. def forward(self, inp):
  389. return self.calc_conv(inp, self.weight, self.bias)
  390. class Conv3d(_ConvNd):
  391. r"""Applies a 3D convolution over an input tensor.
  392. For instance, given an input of the size :math:`(N, C_{\text{in}}, T, H, W)`,
  393. this layer generates an output of the size
  394. :math:`(N, C_{\text{out}}, T_{\text{out}}, H_{\text{out}}, W_{\text{out}})` through the
  395. process described as below:
  396. .. math::
  397. \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
  398. \sum_{k = 0}^{C_{\text{in}} - 1} \text{weight}(C_{\text{out}_j}, k) \star \text{input}(N_i, k)
  399. where :math:`\star` is the valid 3D cross-correlation operator,
  400. :math:`N` is batch size, :math:`C` denotes number of channels.
  401. When `groups == in_channels` and `out_channels == K * in_channels`,
  402. where K is a positive integer, this operation is also known as depthwise
  403. convolution.
  404. In other words, for an input of size :math:`(N, C_{in}, T_{int}, H_{in}, W_{in})`,
  405. a depthwise convolution with a depthwise multiplier `K`, can be constructed
  406. by arguments :math:`(in\_channels=C_{in}, out\_channels=C_{in} \times K, ..., groups=C_{in})`.
  407. Args:
  408. in_channels: number of input channels.
  409. out_channels: number of output channels.
  410. kernel_size: size of weight on spatial dimensions. If kernel_size is
  411. an :class:`int`, the actual kernel size would be
  412. `(kernel_size, kernel_size, kernel_size)`.
  413. stride: stride of the 3D convolution operation. Default: 1
  414. padding: size of the paddings added to the input on both sides of its
  415. spatial dimensions. Only zero-padding is supported. Default: 0
  416. dilation: dilation of the 3D convolution operation. Default: 1
  417. groups: number of groups into which the input and output channels are divided,
  418. so as to perform a ``grouped convolution``. When ``groups`` is not 1,
  419. ``in_channels`` and ``out_channels`` must be divisible by ``groups``,
  420. and the shape of weight should be ``(groups, out_channel // groups,
  421. in_channels // groups, depth, height, width)``. Default: 1
  422. bias: whether to add a bias onto the result of convolution. Default: True
  423. conv_mode: Supports `cross_correlation`. Default: `cross_correlation`
  424. Note:
  425. * ``weight`` usually has shape ``(out_channels, in_channels, depth, height, width)`` ,
  426. if groups is not 1, shape will be ``(groups, out_channels // groups, in_channels // groups, depth, height, width)``
  427. * ``bias`` usually has shape ``(1, out_channels, *1)``
  428. Examples:
  429. .. testcode::
  430. import numpy as np
  431. import megengine as mge
  432. import megengine.module as M
  433. m = M.Conv3d(in_channels=3, out_channels=1, kernel_size=3)
  434. inp = mge.tensor(np.arange(0, 384).astype("float32").reshape(2, 3, 4, 4, 4))
  435. oup = m(inp)
  436. print(oup.numpy().shape)
  437. Outputs:
  438. .. testoutput::
  439. (2, 1, 2, 2, 2)
  440. """
  441. def __init__(
  442. self,
  443. in_channels: int,
  444. out_channels: int,
  445. kernel_size: Union[int, Tuple[int, int, int]],
  446. stride: Union[int, Tuple[int, int, int]] = 1,
  447. padding: Union[int, Tuple[int, int, int]] = 0,
  448. dilation: Union[int, Tuple[int, int, int]] = 1,
  449. groups: int = 1,
  450. bias: bool = True,
  451. conv_mode: str = "cross_correlation",
  452. ):
  453. kernel_size = _triple_nonzero(kernel_size)
  454. stride = _triple_nonzero(stride)
  455. padding = _triple(padding)
  456. dilation = _triple_nonzero(dilation)
  457. self.conv_mode = conv_mode
  458. super().__init__(
  459. in_channels,
  460. out_channels,
  461. kernel_size,
  462. stride,
  463. padding,
  464. dilation,
  465. groups,
  466. bias,
  467. )
  468. def _get_fanin(self):
  469. kt, kh, kw = self.kernel_size
  470. ic = self.in_channels
  471. return kt * kh * kw * ic
  472. def _infer_weight_shape(self):
  473. group = self.groups
  474. ichl = self.in_channels
  475. ochl = self.out_channels
  476. kt, kh, kw = self.kernel_size
  477. if group == 1:
  478. # Assume format is NCTHW
  479. return (ochl, ichl, kt, kh, kw)
  480. assert (
  481. ichl % group == 0 and ochl % group == 0
  482. ), "invalid config: in_channels={} out_channels={} group={}".format(
  483. ichl, ochl, group
  484. )
  485. # Assume format is NCTHW
  486. return (group, ochl // group, ichl // group, kt, kh, kw)
  487. def _infer_bias_shape(self):
  488. # Assume format is NCTHW
  489. return (1, self.out_channels, 1, 1, 1)
  490. def calc_conv(self, inp, weight, bias):
  491. return conv3d(
  492. inp,
  493. weight,
  494. bias,
  495. self.stride,
  496. self.padding,
  497. self.dilation,
  498. self.groups,
  499. self.conv_mode,
  500. )
  501. def forward(self, inp):
  502. return self.calc_conv(inp, self.weight, self.bias)
  503. class ConvTranspose2d(_ConvNd):
  504. r"""Applies a 2D transposed convolution over an input tensor.
  505. This module is also known as a deconvolution or a fractionally-strided convolution.
  506. :class:`ConvTranspose2d` can be seen as the gradient of :class:`Conv2d` operation
  507. with respect to its input.
  508. Convolution usually reduces the size of input, while transposed convolution works
  509. the opposite way, transforming a smaller input to a larger output while preserving the
  510. connectivity pattern.
  511. Args:
  512. in_channels: number of input channels.
  513. out_channels: number of output channels.
  514. kernel_size: size of weight on spatial dimensions. If ``kernel_size`` is
  515. an :class:`int`, the actual kernel size would be
  516. ``(kernel_size, kernel_size)``.
  517. stride: stride of the 2D convolution operation. Default: 1
  518. padding: size of the paddings added to the input on both sides of its
  519. spatial dimensions. Only zero-padding is supported. Default: 0
  520. dilation: dilation of the 2D convolution operation. Default: 1
  521. groups: number of groups into which the input and output channels are divided,
  522. so as to perform a ``grouped convolution``. When ``groups`` is not 1,
  523. ``in_channels`` and ``out_channels`` must be divisible by groups,
  524. and the shape of weight should be ``(groups, in_channels // groups,
  525. out_channels // groups, height, width)``. Default: 1
  526. bias: wether to add a bias onto the result of convolution. Default: True
  527. conv_mode: Supports `cross_correlation`. Default: `cross_correlation`
  528. compute_mode: When set to "default", no special requirements will be
  529. placed on the precision of intermediate results. When set to "float32",
  530. "float32" would be used for accumulator and intermediate result, but only
  531. effective when input and output are of float16 dtype.
  532. Note:
  533. * ``weight`` usually has shape ``(in_channels, out_channels, height, width)`` ,
  534. if groups is not 1, shape will be ``(groups, in_channels // groups, out_channels // groups, height, width)``
  535. * ``bias`` usually has shape ``(1, out_channels, *1)``
  536. """
  537. def __init__(
  538. self,
  539. in_channels: int,
  540. out_channels: int,
  541. kernel_size: Union[int, Tuple[int, int]],
  542. stride: Union[int, Tuple[int, int]] = 1,
  543. padding: Union[int, Tuple[int, int]] = 0,
  544. dilation: Union[int, Tuple[int, int]] = 1,
  545. groups: int = 1,
  546. bias: bool = True,
  547. conv_mode: str = "cross_correlation",
  548. compute_mode: str = "default",
  549. **kwargs
  550. ):
  551. kernel_size = _pair_nonzero(kernel_size)
  552. stride = _pair_nonzero(stride)
  553. padding = _pair(padding)
  554. dilation = _pair_nonzero(dilation)
  555. self.conv_mode = conv_mode
  556. self.compute_mode = compute_mode
  557. super().__init__(
  558. in_channels,
  559. out_channels,
  560. kernel_size,
  561. stride,
  562. padding,
  563. dilation,
  564. groups,
  565. bias,
  566. **kwargs,
  567. )
  568. def _get_fanin(self):
  569. kh, kw = self.kernel_size
  570. oc = self.out_channels
  571. return kh * kw * oc
  572. def _infer_weight_shape(self):
  573. group = self.groups
  574. ichl = self.in_channels
  575. ochl = self.out_channels
  576. kh, kw = self.kernel_size
  577. if group == 1:
  578. # Assume format is NCHW
  579. return (ichl, ochl, kh, kw)
  580. assert (
  581. ichl % group == 0 and ochl % group == 0
  582. ), "invalid config: in_channels={} out_channels={} group={}".format(
  583. ichl, ochl, group
  584. )
  585. # Assume format is NCHW
  586. return (group, ichl // group, ochl // group, kh, kw)
  587. def _infer_bias_shape(self):
  588. # Assume format is NCHW
  589. return (1, self.out_channels, 1, 1)
  590. def calc_conv_transpose2d(self, inp, weight, bias):
  591. return conv_transpose2d(
  592. inp,
  593. weight,
  594. bias,
  595. self.stride,
  596. self.padding,
  597. self.dilation,
  598. self.groups,
  599. self.conv_mode,
  600. self.compute_mode,
  601. )
  602. def forward(self, inp):
  603. return self.calc_conv_transpose2d(inp, self.weight, self.bias)
  604. class LocalConv2d(Conv2d):
  605. r"""Applies a spatial convolution with untied kernels over an groupped channeled input 4D tensor.
  606. It is also known as the locally connected layer.
  607. Args:
  608. in_channels: number of input channels.
  609. out_channels: number of output channels.
  610. input_height: the height of the input images.
  611. input_width: the width of the input images.
  612. kernel_size: size of weight on spatial dimensions. If kernel_size is
  613. an :class:`int`, the actual kernel size would be
  614. ``(kernel_size, kernel_size)``.
  615. stride: stride of the 2D convolution operation. Default: 1
  616. padding: size of the paddings added to the input on both sides of its
  617. spatial dimensions. Only zero-padding is supported. Default: 0
  618. dilation: dilation of the 2D convolution operation. Default: 1
  619. groups: number of groups into which the input and output channels are divided,
  620. so as to perform a "grouped convolution". When ``groups`` is not 1,
  621. ``in_channels`` and ``out_channels`` must be divisible by ``groups``. Default: 1
  622. Note:
  623. * ``weight`` usually has shape ``(out_height, out_width, in_channels, height, width, in_channels)`` ,
  624. if groups is not 1, shape will be ``(groups, out_height, out_width, in_channels // groups, height, width, out_channels // groups)``
  625. * ``bias`` usually has shape ``(1, out_channels, *1)``
  626. """
  627. def __init__(
  628. self,
  629. in_channels: int,
  630. out_channels: int,
  631. input_height: int,
  632. input_width: int,
  633. kernel_size: Union[int, Tuple[int, int]],
  634. stride: Union[int, Tuple[int, int]] = 1,
  635. padding: Union[int, Tuple[int, int]] = 0,
  636. dilation: Union[int, Tuple[int, int]] = 1,
  637. groups: int = 1,
  638. conv_mode: str = "cross_correlation",
  639. **kwargs
  640. ):
  641. self.input_height = input_height
  642. self.input_width = input_width
  643. super().__init__(
  644. in_channels,
  645. out_channels,
  646. kernel_size,
  647. stride,
  648. padding,
  649. dilation,
  650. groups,
  651. bias=False,
  652. **kwargs,
  653. )
  654. def _infer_weight_shape(self):
  655. group = self.groups
  656. out_height = (
  657. self.input_height + self.padding[0] * 2 - self.kernel_size[0]
  658. ) // self.stride[0] + 1
  659. out_width = (
  660. self.input_width + self.padding[1] * 2 - self.kernel_size[1]
  661. ) // self.stride[1] + 1
  662. # Assume format is NCHW
  663. return (
  664. group,
  665. out_height,
  666. out_width,
  667. self.in_channels // group,
  668. self.kernel_size[0],
  669. self.kernel_size[1],
  670. self.out_channels // group,
  671. )
  672. def forward(self, inp):
  673. return local_conv2d(
  674. inp,
  675. self.weight,
  676. None,
  677. self.stride,
  678. self.padding,
  679. self.dilation,
  680. self.conv_mode,
  681. )
  682. class ConvRelu2d(Conv2d):
  683. r"""A fused :class:`~.Module` including :class:`~.module.Conv2d` and :func:`~.relu`.
  684. Could be replaced with :class:`~.QATModule` version :class:`~.qat.ConvRelu2d` using :func:`~.quantize.quantize_qat`.
  685. """
  686. def forward(self, inp):
  687. return relu(self.calc_conv(inp, self.weight, self.bias))
  688. class DeformableConv2d(_ConvNd):
  689. r"""Deformable Convolution.
  690. Args:
  691. in_channels: number of input channels.
  692. out_channels: number of output channels.
  693. kernel_size: size of weight on spatial dimensions. If kernel_size is
  694. an :class:`int`, the actual kernel size would be
  695. ``(kernel_size, kernel_size)``.
  696. stride: stride of the 2D convolution operation. Default: 1
  697. padding: size of the paddings added to the input on both sides of its
  698. spatial dimensions. Only zero-padding is supported. Default: 0
  699. dilation: dilation of the 2D convolution operation. Default: 1
  700. groups: number of groups into which the input and output channels are divided,
  701. so as to perform a ``grouped convolution``. When ``groups`` is not 1,
  702. ``in_channels`` and ``out_channels`` must be divisible by groups,
  703. and the shape of weight should be ``(groups, out_channel // groups,
  704. in_channels // groups, height, width)``. Default: 1
  705. bias: whether to add a bias onto the result of convolution. Default: True
  706. conv_mode: Supports `cross_correlation`. Default: `cross_correlation`
  707. compute_mode: When set to "default", no special requirements will be
  708. placed on the precision of intermediate results. When set to "float32",
  709. "float32" would be used for accumulator and intermediate result, but only
  710. effective when input and output are of float16 dtype.
  711. Note:
  712. * ``weight`` usually has shape ``(out_channels, in_channels, height, width)`` ,
  713. if groups is not 1, shape will be ``(groups, out_channels // groups, in_channels // groups, height, width)``
  714. * ``bias`` usually has shape ``(1, out_channels, *1)``
  715. """
  716. def __init__(
  717. self,
  718. in_channels: int,
  719. out_channels: int,
  720. kernel_size: Union[int, Tuple[int, int]],
  721. stride: Union[int, Tuple[int, int]] = 1,
  722. padding: Union[int, Tuple[int, int]] = 0,
  723. dilation: Union[int, Tuple[int, int]] = 1,
  724. groups: int = 1,
  725. bias: bool = True,
  726. conv_mode: str = "cross_correlation",
  727. compute_mode: str = "default",
  728. **kwargs
  729. ):
  730. kernel_size = _pair_nonzero(kernel_size)
  731. stride = _pair_nonzero(stride)
  732. padding = _pair(padding)
  733. dilation = _pair_nonzero(dilation)
  734. self.conv_mode = conv_mode
  735. self.compute_mode = compute_mode
  736. super().__init__(
  737. in_channels,
  738. out_channels,
  739. kernel_size,
  740. stride,
  741. padding,
  742. dilation,
  743. groups,
  744. bias,
  745. **kwargs,
  746. )
  747. def _get_fanin(self):
  748. kh, kw = self.kernel_size
  749. ic = self.in_channels
  750. return kh * kw * ic
  751. def _infer_weight_shape(self):
  752. group = self.groups
  753. ichl = self.in_channels
  754. ochl = self.out_channels
  755. kh, kw = self.kernel_size
  756. if group == 1:
  757. # Assume format is NCHW
  758. return (ochl, ichl, kh, kw)
  759. assert (
  760. ichl % group == 0 and ochl % group == 0
  761. ), "invalid config: in_channels={} out_channels={} group={}".format(
  762. ichl, ochl, group
  763. )
  764. # Assume format is NCHW
  765. return (group, ochl // group, ichl // group, kh, kw)
  766. def _infer_bias_shape(self):
  767. # Assume format is NCHW
  768. return (1, self.out_channels, 1, 1)
  769. def calc_conv(self, inp, weight, offset, mask, bias):
  770. return deformable_conv2d(
  771. inp,
  772. weight,
  773. offset,
  774. mask,
  775. bias,
  776. self.stride,
  777. self.padding,
  778. self.dilation,
  779. self.groups,
  780. self.conv_mode,
  781. self.compute_mode,
  782. )
  783. def forward(self, inp, offset, mask):
  784. return self.calc_conv(inp, self.weight, offset, mask, self.bias)
  785. class ConvTranspose3d(_ConvNd):
  786. r"""Applies a 3D transposed convolution over an input tensor.
  787. Only support the case that groups = 1 and conv_mode = "cross_correlation".
  788. :class:`ConvTranspose3d` can be seen as the gradient of :class:`Conv3d` operation
  789. with respect to its input.
  790. Convolution3D usually reduces the size of input, while transposed convolution3d
  791. works the opposite way, transforming a smaller input to a larger output while
  792. preserving the connectivity pattern.
  793. Args:
  794. in_channels: number of input channels.
  795. out_channels: number of output channels.
  796. kernel_size: size of weight on spatial dimensions. If ``kernel_size`` is
  797. an :class:`int`, the actual kernel size would be
  798. ``(kernel_size, kernel_size, kernel_size)``.
  799. stride: stride of the 3D convolution operation. Default: 1
  800. padding: size of the paddings added to the input on all sides of its
  801. spatial dimensions. Only zero-padding is supported. Default: 0
  802. dilation: dilation of the 3D convolution operation. Default: 1
  803. groups: number of groups into which the input and output channels are divided,
  804. so as to perform a ``grouped convolution``. When ``groups`` is not 1,
  805. ``in_channels`` and ``out_channels`` must be divisible by groups,
  806. and the shape of weight should be ``(groups, in_channels // groups,
  807. out_channels // groups, depth, height, width)``. Default: 1
  808. bias: wether to add a bias onto the result of convolution. Default: True
  809. Note:
  810. * ``weight`` usually has shape ``(in_channels, out_channels, depth, height, width)`` .
  811. * ``bias`` usually has shape ``(1, out_channels, *1)``
  812. """
  813. def __init__(
  814. self,
  815. in_channels: int,
  816. out_channels: int,
  817. kernel_size: Union[int, Tuple[int, int, int]],
  818. stride: Union[int, Tuple[int, int, int]] = 1,
  819. padding: Union[int, Tuple[int, int, int]] = 0,
  820. dilation: Union[int, Tuple[int, int, int]] = 1,
  821. groups: int = 1,
  822. bias: bool = True,
  823. ):
  824. kernel_size = _triple_nonzero(kernel_size)
  825. stride = _triple_nonzero(stride)
  826. padding = _triple(padding)
  827. dilation = _triple_nonzero(dilation)
  828. super().__init__(
  829. in_channels=in_channels,
  830. out_channels=out_channels,
  831. kernel_size=kernel_size,
  832. stride=stride,
  833. padding=padding,
  834. dilation=dilation,
  835. groups=groups,
  836. bias=bias,
  837. )
  838. def _get_fanin(self):
  839. kt, kh, kw = self.kernel_size
  840. ic = self.in_channels
  841. return kt * kh * kw * ic
  842. def _infer_weight_shape(self):
  843. group = self.groups
  844. ichl = self.in_channels
  845. ochl = self.out_channels
  846. kt, kh, kw = self.kernel_size
  847. if group == 1:
  848. # Assume format is NCHW
  849. return (ichl, ochl, kt, kh, kw)
  850. assert (
  851. ichl % group == 0 and ochl % group == 0
  852. ), "invalid config: in_channels={} out_channels={} group={}".format(
  853. ichl, ochl, group
  854. )
  855. # Assume format is NCHW
  856. return (group, ichl // group, ochl // group, kt, kh, kw)
  857. def _infer_bias_shape(self):
  858. # Assume format is NCTHW
  859. return (1, self.out_channels, 1, 1, 1)
  860. def forward(self, inp):
  861. return conv_transpose3d(
  862. inp, self.weight, self.bias, self.stride, self.padding, self.dilation,
  863. )