You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

nn.py 56 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. # pylint: disable=too-many-lines
  10. from functools import lru_cache
  11. from typing import NamedTuple, Optional, Sequence, Tuple, Union
  12. from ..core._imperative_rt.core2 import apply, dtype_promotion
  13. from ..core._imperative_rt.ops import SubgraphBuilder as _SubgraphBuilder
  14. from ..core.ops import builtin
  15. from ..core.ops.builtin import (
  16. BatchNorm,
  17. Elemwise,
  18. GetVarShape,
  19. Identity,
  20. Reduce,
  21. Reshape,
  22. TypeCvt,
  23. )
  24. from ..core.ops.special import Const
  25. from ..core.tensor import amp, megbrain_graph
  26. from ..core.tensor.array_method import _elwise_apply
  27. from ..core.tensor.utils import (
  28. astensor1d,
  29. astype,
  30. cast_tensors,
  31. convert_single_value,
  32. make_shape_tuple,
  33. setscalar,
  34. subgraph,
  35. )
  36. from ..device import get_default_device
  37. from ..distributed import WORLD, is_distributed
  38. from ..jit import exclude_from_trace
  39. from ..random import uniform
  40. from ..tensor import Tensor
  41. from ..utils.deprecation import deprecated_func
  42. from ..utils.tuple_function import _pair, _pair_nonzero, _triple, _triple_nonzero
  43. from .debug_param import get_execution_strategy
  44. from .distributed import all_reduce_sum
  45. from .elemwise import _elwise, exp, log, log1p, maximum, minimum
  46. from .math import matmul, max, sum
  47. from .tensor import broadcast_to, concat, expand_dims, ones, squeeze, zeros
  48. __all__ = [
  49. "adaptive_avg_pool2d",
  50. "adaptive_max_pool2d",
  51. "avg_pool2d",
  52. "batch_norm",
  53. "conv1d",
  54. "conv2d",
  55. "conv3d",
  56. "conv_transpose2d",
  57. "conv_transpose3d",
  58. "deformable_conv2d",
  59. "deformable_psroi_pooling",
  60. "dropout",
  61. "embedding",
  62. "gelu",
  63. "hsigmoid",
  64. "hswish",
  65. "indexing_one_hot",
  66. "leaky_relu",
  67. "linear",
  68. "local_conv2d",
  69. "logsigmoid",
  70. "logsumexp",
  71. "logsoftmax",
  72. "max_pool2d",
  73. "one_hot",
  74. "prelu",
  75. "relu",
  76. "relu6",
  77. "remap",
  78. "sigmoid",
  79. "sliding_window",
  80. "sliding_window_transpose",
  81. "silu",
  82. "softmax",
  83. "softplus",
  84. "sync_batch_norm",
  85. "warp_affine",
  86. "warp_perspective",
  87. ]
  88. def expand_hw(x):
  89. # NOTE: >1d array is accepted, as long as 1 <= size <= 2
  90. try:
  91. x = int(x)
  92. return [x, x]
  93. except (TypeError, ValueError):
  94. pass
  95. h, w = x
  96. return int(h), int(w)
  97. def linear(
  98. inp: Tensor, weight: Tensor, bias: Optional[Tensor] = None, compute_mode="default",
  99. ) -> Tensor:
  100. r"""Applies a linear transformation to the input tensor.
  101. Refer to :class:`~.module.linear.Linear` for more information.
  102. Args:
  103. inp: input tensor with shape `(N, in_features)`.
  104. weight: weight with shape `(out_features, in_features)`.
  105. bias: bias with shape `(out_features,)`. Default: None
  106. """
  107. ret = matmul(inp, weight, transpose_b=True, compute_mode=compute_mode)
  108. if bias is not None:
  109. if amp._enabled:
  110. bias = bias.astype("float16")
  111. ret += bias
  112. return ret
  113. def conv1d(
  114. inp: Tensor,
  115. weight: Tensor,
  116. bias: Optional[Tensor] = None,
  117. stride: int = 1,
  118. padding: int = 0,
  119. dilation: int = 1,
  120. groups: int = 1,
  121. conv_mode="cross_correlation",
  122. compute_mode="default",
  123. ) -> Tensor:
  124. r"""1D convolution operation.
  125. Refer to :class:`~.Conv1d` for more information.
  126. Args:
  127. inp: The feature map of the convolution operation
  128. weight: The convolution kernel.
  129. bias: The bias added to the result of convolution (if given)
  130. stride: Stride of the 1D convolution operation. Default: 1
  131. padding: Size of the paddings added to the input on both sides of its
  132. spatial dimensions. Only zero-padding is supported. Default: 0
  133. dilation: Dilation of the 1D convolution operation. Default: 1
  134. groups: number of groups to divide input and output channels into,
  135. so as to perform a "grouped convolution". When ``groups`` is not 1,
  136. ``in_channels`` and ``out_channels`` must be divisible by ``groups``,
  137. and the shape of weight should be ``(groups, out_channel // groups,
  138. in_channels // groups, kernel_size)``. Default: 1
  139. conv_mode: Supports 'cross_correlation'. Default:
  140. 'cross_correlation'.
  141. compute_mode: When set to 'default', no special requirements will be
  142. placed on the precision of intermediate results. When set to 'float32',
  143. float32 would be used for accumulator and intermediate result, but only
  144. effective when input and output are of float16 dtype.
  145. """
  146. assert (
  147. conv_mode.lower() == "cross_correlation"
  148. or conv_mode.name == "CROSS_CORRELATION"
  149. )
  150. assert compute_mode.lower() == "default" or compute_mode.name == "DEFAULT"
  151. assert inp.ndim == 3, "the input dimension of conv1d should be 3"
  152. assert weight.ndim == 3, "the weight dimension of conv1d should be 3"
  153. if amp._enabled:
  154. compute_mode = "float32"
  155. inp, weight, bias = cast_tensors(inp, weight, bias)
  156. else:
  157. dtype = dtype_promotion(inp, weight)
  158. if inp.dtype != dtype:
  159. inp = inp.astype(dtype)
  160. if weight.dtype != dtype:
  161. weight = weight.astype(dtype)
  162. inp = expand_dims(inp, 3)
  163. weight = expand_dims(weight, 3)
  164. if bias is not None:
  165. assert bias.ndim == 3, "the bias dimension of conv1d should be 3"
  166. bias = expand_dims(bias, 3)
  167. stride_h = stride
  168. pad_h = padding
  169. dilate_h = dilation
  170. sparse_type = "dense" if groups == 1 else "group"
  171. op = builtin.Convolution(
  172. stride_h=stride_h,
  173. stride_w=1,
  174. pad_h=pad_h,
  175. pad_w=0,
  176. dilate_h=dilate_h,
  177. dilate_w=1,
  178. strategy=get_execution_strategy(),
  179. mode=conv_mode,
  180. compute_mode=compute_mode,
  181. sparse=sparse_type,
  182. )
  183. (output,) = apply(op, inp, weight)
  184. if bias is not None:
  185. output += bias
  186. output = squeeze(output, 3)
  187. return output
  188. def conv2d(
  189. inp: Tensor,
  190. weight: Tensor,
  191. bias: Optional[Tensor] = None,
  192. stride: Union[int, Tuple[int, int]] = 1,
  193. padding: Union[int, Tuple[int, int]] = 0,
  194. dilation: Union[int, Tuple[int, int]] = 1,
  195. groups: int = 1,
  196. conv_mode="cross_correlation",
  197. compute_mode="default",
  198. ) -> Tensor:
  199. r"""2D convolution operation.
  200. Refer to :class:`~.module.Conv2d` for more information.
  201. Args:
  202. inp: feature map of the convolution operation.
  203. weight: convolution kernel.
  204. bias: bias added to the result of convolution (if given).
  205. stride: stride of the 2D convolution operation. Default: 1
  206. padding: size of the paddings added to the input on both sides of its
  207. spatial dimensions. Only zero-padding is supported. Default: 0
  208. dilation: dilation of the 2D convolution operation. Default: 1
  209. groups: number of groups into which the input and output channels are divided,
  210. so as to perform a ``grouped convolution``. When ``groups`` is not 1,
  211. ``in_channels`` and ``out_channels`` must be divisible by ``groups``,
  212. and the shape of weight should be ``(groups, out_channel // groups,
  213. in_channels // groups, height, width)``. Default: 1
  214. conv_mode: supports "cross_correlation". Default: "cross_correlation"
  215. compute_mode: when set to "default", no special requirements will be
  216. placed on the precision of intermediate results. When set to "float32",
  217. "float32" would be used for accumulator and intermediate result, but only
  218. effective when input and output are of float16 dtype.
  219. Returns:
  220. output tensor.
  221. """
  222. assert (
  223. conv_mode.lower() == "cross_correlation"
  224. or conv_mode.name == "CROSS_CORRELATION"
  225. )
  226. if amp._enabled:
  227. compute_mode = "float32"
  228. inp, weight, bias = cast_tensors(inp, weight, bias)
  229. else:
  230. dtype = dtype_promotion(inp, weight)
  231. if inp.dtype != dtype:
  232. inp = inp.astype(dtype)
  233. if weight.dtype != dtype:
  234. weight = weight.astype(dtype)
  235. stride_h, stride_w = expand_hw(stride)
  236. pad_h, pad_w = expand_hw(padding)
  237. dilate_h, dilate_w = expand_hw(dilation)
  238. sparse_type = "dense" if groups == 1 else "group"
  239. op = builtin.Convolution(
  240. stride_h=stride_h,
  241. stride_w=stride_w,
  242. pad_h=pad_h,
  243. pad_w=pad_w,
  244. dilate_h=dilate_h,
  245. dilate_w=dilate_w,
  246. strategy=get_execution_strategy(),
  247. mode=conv_mode,
  248. compute_mode=compute_mode,
  249. sparse=sparse_type,
  250. )
  251. (output,) = apply(op, inp, weight)
  252. if bias is not None:
  253. output += bias
  254. return output
  255. def conv3d(
  256. inp: Tensor,
  257. weight: Tensor,
  258. bias: Optional[Tensor] = None,
  259. stride: Union[int, Tuple[int, int, int]] = 1,
  260. padding: Union[int, Tuple[int, int, int]] = 0,
  261. dilation: Union[int, Tuple[int, int, int]] = 1,
  262. groups: int = 1,
  263. conv_mode: str = "cross_correlation",
  264. ) -> Tensor:
  265. r"""3D convolution operation.
  266. Refer to :class:`~.Conv3d` for more information.
  267. Args:
  268. inp: feature map of the convolution operation.
  269. weight: convolution kernel.
  270. bias: bias added to the result of convolution (if given).
  271. stride: stride of the 3D convolution operation. Default: 1
  272. padding: size of the paddings added to the input on both sides of its
  273. spatial dimensions. Only zero-padding is supported. Default: 0
  274. dilation: dilation of the 3D convolution operation. Default: 1
  275. groups: number of groups into which the input and output channels are divided,
  276. so as to perform a ``grouped convolution``. When ``groups`` is not 1,
  277. ``in_channels`` and ``out_channels`` must be divisible by ``groups``,
  278. and the shape of weight should be ``(groups, out_channel // groups,
  279. in_channels // groups, depth, height, width)``. Default: 1
  280. conv_mode: supports "cross_correlation". Default: "cross_correlation"
  281. Returns:
  282. output tensor.
  283. """
  284. assert conv_mode.lower() == "cross_correlation"
  285. D, H, W = 0, 1, 2
  286. pad = _triple(padding)
  287. stride = _triple_nonzero(stride)
  288. dilate = _triple_nonzero(dilation)
  289. dtype = dtype_promotion(inp, weight)
  290. if inp.dtype != dtype:
  291. inp = inp.astype(dtype)
  292. if weight.dtype != dtype:
  293. weight = weight.astype(dtype)
  294. sparse_type = "dense" if groups == 1 else "group"
  295. op = builtin.Convolution3D(
  296. pad_d=pad[D],
  297. pad_h=pad[H],
  298. pad_w=pad[W],
  299. stride_d=stride[D],
  300. stride_h=stride[H],
  301. stride_w=stride[W],
  302. dilate_d=dilate[D],
  303. dilate_h=dilate[H],
  304. dilate_w=dilate[W],
  305. strategy=get_execution_strategy(),
  306. mode=conv_mode,
  307. sparse=sparse_type,
  308. )
  309. (output,) = apply(op, inp, weight)
  310. if bias is not None:
  311. output += bias
  312. return output
  313. def conv_transpose2d(
  314. inp: Tensor,
  315. weight: Tensor,
  316. bias: Optional[Tensor] = None,
  317. stride: Union[int, Tuple[int, int]] = 1,
  318. padding: Union[int, Tuple[int, int]] = 0,
  319. dilation: Union[int, Tuple[int, int]] = 1,
  320. groups: int = 1,
  321. conv_mode="cross_correlation",
  322. compute_mode="default",
  323. ) -> Tensor:
  324. r"""2D transposed convolution operation.
  325. Refer to :class:`~.ConvTranspose2d` for more information.
  326. Args:
  327. inp: feature map of the convolution operation.
  328. weight: convolution kernel.
  329. bias: bias added to the result of convolution (if given).
  330. stride: stride of the 2D convolution operation. Default: 1
  331. padding: size of the paddings added to the input on both sides of its
  332. spatial dimensions. Only zero-padding is supported. Default: 0
  333. dilation: dilation of the 2D convolution operation. Default: 1
  334. groups: number of groups into which the input and output channels are divided,
  335. so as to perform a ``grouped convolution``. When ``groups`` is not 1,
  336. ``in_channels`` and ``out_channels`` must be divisible by groups,
  337. and the shape of weight should be ``(groups, in_channels // groups,
  338. out_channels // groups, height, width)``. Default: 1
  339. conv_mode: supports "cross_correlation". Default: "cross_correlation"
  340. compute_mode: when set to "default", no special requirements will be
  341. placed on the precision of intermediate results. When set to "float32",
  342. "float32" would be used for accumulator and intermediate result, but only
  343. effective when input and output are of float16 dtype.
  344. Returns:
  345. output tensor.
  346. """
  347. assert (
  348. conv_mode.lower() == "cross_correlation"
  349. or conv_mode.name == "CROSS_CORRELATION"
  350. )
  351. if amp._enabled:
  352. compute_mode = "float32"
  353. inp, weight, bias = cast_tensors(inp, weight, bias)
  354. else:
  355. dtype = dtype_promotion(inp, weight)
  356. if inp.dtype != dtype:
  357. inp = inp.astype(dtype)
  358. if weight.dtype != dtype:
  359. weight = weight.astype(dtype)
  360. if groups != 1:
  361. raise NotImplementedError("group transposed conv2d is not supported yet.")
  362. stride_h, stride_w = expand_hw(stride)
  363. pad_h, pad_w = expand_hw(padding)
  364. dilate_h, dilate_w = expand_hw(dilation)
  365. op = builtin.ConvolutionBackwardData(
  366. stride_h=stride_h,
  367. stride_w=stride_w,
  368. pad_h=pad_h,
  369. pad_w=pad_w,
  370. dilate_h=dilate_h,
  371. dilate_w=dilate_w,
  372. strategy=get_execution_strategy(),
  373. compute_mode=compute_mode,
  374. )
  375. (output,) = apply(op, weight, inp)
  376. if bias is not None:
  377. output += bias
  378. return output
  379. def deformable_conv2d(
  380. inp: Tensor,
  381. weight: Tensor,
  382. offset: Tensor,
  383. mask: Tensor,
  384. bias: Optional[Tensor] = None,
  385. stride: Union[int, Tuple[int, int]] = 1,
  386. padding: Union[int, Tuple[int, int]] = 0,
  387. dilation: Union[int, Tuple[int, int]] = 1,
  388. groups: int = 1,
  389. conv_mode="cross_correlation",
  390. compute_mode="default",
  391. ) -> Tensor:
  392. r"""Deformable Convolution.
  393. Args:
  394. inp: input feature map.
  395. weight: convolution kernel.
  396. offset: input offset to kernel, channel of this tensor should match the deformable settings.
  397. mask: input mask to kernel, channel of this tensor should match the deformable settings.
  398. bias: bias added to the result of convolution (if given).
  399. stride: stride of the 2D convolution operation. Default: 1
  400. padding: size of the paddings added to the input on both sides of its
  401. spatial dimensions. Only zero-padding is supported. Default: 0
  402. dilation: dilation of the 2D convolution operation. Default: 1
  403. groups: number of groups into which the input and output channels are divided,
  404. so as to perform a ``grouped convolution``. When ``groups`` is not 1,
  405. ``in_channels`` and ``out_channels`` must be divisible by groups,
  406. and the shape of weight should be ``(groups, out_channel // groups,
  407. in_channels // groups, height, width)``. Default: 1
  408. conv_mode: supports "cross_correlation". Default: "cross_correlation"
  409. compute_mode: when set to "default", no special requirements will be
  410. placed on the precision of intermediate results. When set to "float32",
  411. "float32" would be used for accumulator and intermediate result, but only
  412. effective when input and output are of float16 dtype.
  413. Returns:
  414. output tensor.
  415. """
  416. assert (
  417. conv_mode.lower() == "cross_correlation"
  418. or conv_mode.name == "CROSS_CORRELATION"
  419. )
  420. if amp._enabled:
  421. compute_mode = "float32"
  422. inp, weight, offset, mask, bias = cast_tensors(inp, weight, offset, mask, bias)
  423. else:
  424. offset = offset.astype("float32")
  425. mask = mask.astype("float32")
  426. stride_h, stride_w = expand_hw(stride)
  427. pad_h, pad_w = expand_hw(padding)
  428. dilate_h, dilate_w = expand_hw(dilation)
  429. sparse_type = "dense" if groups == 1 else "group"
  430. op = builtin.DeformableConv(
  431. stride_h=stride_h,
  432. stride_w=stride_w,
  433. pad_h=pad_h,
  434. pad_w=pad_w,
  435. dilate_h=dilate_h,
  436. dilate_w=dilate_w,
  437. strategy=get_execution_strategy(),
  438. mode=conv_mode,
  439. compute_mode=compute_mode,
  440. sparse=sparse_type,
  441. )
  442. (output,) = apply(op, inp, weight, offset, mask)
  443. if bias is not None:
  444. output += bias
  445. return output
  446. def local_conv2d(
  447. inp: Tensor,
  448. weight: Tensor,
  449. bias: Optional[Tensor] = None,
  450. stride: Union[int, Tuple[int, int]] = 1,
  451. padding: Union[int, Tuple[int, int]] = 0,
  452. dilation: Union[int, Tuple[int, int]] = 1,
  453. conv_mode="cross_correlation",
  454. ):
  455. r"""Applies spatial 2D convolution over an groupped channeled image with untied kernels."""
  456. assert (
  457. conv_mode.lower() == "cross_correlation"
  458. or conv_mode.name == "CROSS_CORRELATION"
  459. )
  460. stride_h, stride_w = expand_hw(stride)
  461. pad_h, pad_w = expand_hw(padding)
  462. dilate_h, dilate_w = expand_hw(dilation)
  463. dtype = dtype_promotion(inp, weight)
  464. if inp.dtype != dtype:
  465. inp = inp.astype(dtype)
  466. if weight.dtype != dtype:
  467. weight = weight.astype(dtype)
  468. op = builtin.GroupLocal(
  469. stride_h=stride_h,
  470. stride_w=stride_w,
  471. pad_h=pad_h,
  472. pad_w=pad_w,
  473. dilate_h=dilate_h,
  474. dilate_w=dilate_w,
  475. mode=conv_mode,
  476. sparse="dense",
  477. )
  478. (output,) = apply(op, inp, weight)
  479. if bias is not None:
  480. output += bias
  481. return output
  482. def conv_transpose3d(
  483. inp: Tensor,
  484. weight: Tensor,
  485. bias: Optional[Tensor] = None,
  486. stride: Union[int, Tuple[int, int, int]] = 1,
  487. padding: Union[int, Tuple[int, int, int]] = 0,
  488. dilation: Union[int, Tuple[int, int, int]] = 1,
  489. ) -> Tensor:
  490. r"""3D transposed convolution operation. Only support the case that groups = 1
  491. and conv_mode = "cross_correlation".
  492. Refer to :class:`~.ConvTranspose3d` for more information.
  493. Args:
  494. inp: feature map of the convolution operation.
  495. weight: convolution kernel.
  496. weight usually has shape ``(in_channels, out_channels, depth, height, width)``.
  497. bias: bias added to the result of convolution (if given).
  498. stride: stride of the 3D convolution operation. Default: 1
  499. padding: size of the paddings added to the input on all sides of its
  500. spatial dimensions. Only zero-padding is supported. Default: 0
  501. dilation: dilation of the 3D convolution operation. Default: 1
  502. Returns:
  503. output tensor.
  504. """
  505. D, H, W = 0, 1, 2
  506. pad = _triple(padding)
  507. stride = _triple_nonzero(stride)
  508. dilate = _triple_nonzero(dilation)
  509. dtype = dtype_promotion(inp, weight)
  510. if inp.dtype != dtype:
  511. inp = inp.astype(dtype)
  512. if weight.dtype != dtype:
  513. weight = weight.astype(dtype)
  514. op = builtin.Convolution3DBackwardData(
  515. pad_d=pad[D],
  516. pad_h=pad[H],
  517. pad_w=pad[W],
  518. stride_d=stride[D],
  519. stride_h=stride[H],
  520. stride_w=stride[W],
  521. dilate_d=dilate[D],
  522. dilate_h=dilate[H],
  523. dilate_w=dilate[W],
  524. strategy=get_execution_strategy(),
  525. )
  526. (output,) = apply(op, weight, inp)
  527. if bias is not None:
  528. output += bias
  529. return output
  530. def max_pool2d(
  531. inp: Tensor,
  532. kernel_size: Union[int, Tuple[int, int]],
  533. stride: Optional[Union[int, Tuple[int, int]]] = None,
  534. padding: Union[int, Tuple[int, int]] = 0,
  535. ) -> Tensor:
  536. r"""Applies a 2D max pooling over an input tensor.
  537. Refer to :class:`~.MaxPool2d` for more information.
  538. Args:
  539. inp: input tensor.
  540. kernel_size: size of the window.
  541. stride: stride of the window. If not provided, its value is set to kernel_size.
  542. Default: None
  543. padding: implicit zero padding added on both sides. Default: 0
  544. Returns:
  545. output tensor.
  546. """
  547. if stride is None:
  548. stride = kernel_size
  549. window_h, window_w = _pair_nonzero(kernel_size)
  550. stride_h, stride_w = _pair_nonzero(stride)
  551. padding_h, padding_w = _pair(padding)
  552. op = builtin.Pooling(
  553. window_h=window_h,
  554. window_w=window_w,
  555. stride_h=stride_h,
  556. stride_w=stride_w,
  557. pad_h=padding_h,
  558. pad_w=padding_w,
  559. mode="max",
  560. )
  561. (output,) = apply(op, inp)
  562. return output
  563. def avg_pool2d(
  564. inp: Tensor,
  565. kernel_size: Union[int, Tuple[int, int]],
  566. stride: Optional[Union[int, Tuple[int, int]]] = None,
  567. padding: Union[int, Tuple[int, int]] = 0,
  568. mode: str = "average_count_exclude_padding",
  569. ) -> Tensor:
  570. r"""Applies 2D average pooling over an input tensor.
  571. Refer to :class:`~.AvgPool2d` for more information.
  572. Args:
  573. inp: input tensor.
  574. kernel_size: size of the window.
  575. stride: stride of the window. If not provided, its value is set to ``kernel_size``.
  576. Default: None
  577. padding: implicit zero padding added on both sides. Default: 0
  578. mode: whether to count padding values, set to "average" will do counting.
  579. Default: "average_count_exclude_padding"
  580. Returns:
  581. output tensor.
  582. """
  583. if stride is None:
  584. stride = kernel_size
  585. window_h, window_w = _pair_nonzero(kernel_size)
  586. stride_h, stride_w = _pair_nonzero(stride)
  587. padding_h, padding_w = _pair(padding)
  588. op = builtin.Pooling(
  589. window_h=window_h,
  590. window_w=window_w,
  591. stride_h=stride_h,
  592. stride_w=stride_w,
  593. pad_h=padding_h,
  594. pad_w=padding_w,
  595. mode=mode,
  596. )
  597. (output,) = apply(op, inp)
  598. return output
  599. def adaptive_max_pool2d(
  600. inp: Tensor, oshp: Union[Tuple[int, int], int, Tensor],
  601. ) -> Tensor:
  602. r"""Applies a 2D max adaptive pooling over an input.
  603. Refer to :class:`~.MaxAdaptivePool2d` for more information.
  604. Args:
  605. inp: input tensor.
  606. oshp: OH, OW)` size of the output shape.
  607. Returns:
  608. output tensor.
  609. """
  610. if isinstance(oshp, int):
  611. oshp = (oshp, oshp)
  612. op = builtin.AdaptivePooling(mode="max", format="NCHW",)
  613. oshp = astensor1d(oshp, inp, dtype="int32", device=inp.device)
  614. (output,) = apply(op, inp, oshp)
  615. return output
  616. def adaptive_avg_pool2d(
  617. inp: Tensor, oshp: Union[Tuple[int, int], int, Tensor],
  618. ) -> Tensor:
  619. r"""Applies a 2D average adaptive pooling over an input.
  620. Refer to :class:`~.AvgAdaptivePool2d` for more information.
  621. Args:
  622. inp: input tensor.
  623. oshp: OH, OW)` size of the output shape.
  624. Returns:
  625. output tensor.
  626. """
  627. if isinstance(oshp, int):
  628. oshp = (oshp, oshp)
  629. op = builtin.AdaptivePooling(mode="average", format="NCHW",)
  630. oshp = astensor1d(oshp, inp, dtype="int32", device=inp.device)
  631. (output,) = apply(op, inp, oshp)
  632. return output
  633. def deformable_psroi_pooling(
  634. inp: Tensor,
  635. rois: Tensor,
  636. trans: Tensor,
  637. no_trans: bool,
  638. part_size: int,
  639. pooled_h: int,
  640. pooled_w: int,
  641. sample_per_part: int,
  642. spatial_scale: float,
  643. trans_std: float = 0.1,
  644. ):
  645. r"""Deformable PSROI(Position Sensitive Region of Interest) Pooling.
  646. Args:
  647. inp: input feature map.
  648. rois: the rois for feature pooling.
  649. trans: input offset to psroi_pooling.
  650. no_trans: check the phase of DeformablePSROIPooling. False to the
  651. 1st phase, True to the 2nd phase.
  652. part_size: part size.
  653. sample_per_part: sample points of each part.
  654. pooled_shape: kernel shape of convolution.
  655. spatial_scale: the spatial_scale w.r.t input image.
  656. trans_std: multiplier used in 2nd phase.
  657. """
  658. op = builtin.DeformablePSROIPooling(
  659. no_trans=no_trans,
  660. part_size=part_size,
  661. pooled_h=pooled_h,
  662. pooled_w=pooled_w,
  663. sample_per_part=sample_per_part,
  664. spatial_scale=spatial_scale,
  665. trans_std=trans_std,
  666. )
  667. output, _ = apply(op, inp, rois, trans)
  668. return output
  669. def hswish(x):
  670. r"""Element-wise `x * relu6(x + 3) / 6`.
  671. Example:
  672. .. testcode::
  673. import numpy as np
  674. from megengine import tensor
  675. import megengine.functional as F
  676. x = tensor(np.arange(5).astype(np.float32))
  677. out = F.hswish(x)
  678. print(out.numpy().round(decimals=4))
  679. .. testoutput::
  680. [0. 0.6667 1.6667 3. 4. ]
  681. """
  682. return _elwise(x, mode=Elemwise.Mode.H_SWISH)
  683. def sigmoid(x):
  684. r"""Element-wise `1 / ( 1 + exp( -x ) )`."""
  685. return _elwise(x, mode=Elemwise.Mode.SIGMOID)
  686. def hsigmoid(x):
  687. r"""Element-wise `relu6(x + 3) / 6`."""
  688. return relu6(x + 3) / 6
  689. def relu(x):
  690. r"""Element-wise `max(x, 0)`."""
  691. return _elwise(x, mode=Elemwise.Mode.RELU)
  692. def relu6(x):
  693. r"""Element-wise `min(max(x, 0), 6)`."""
  694. return minimum(maximum(x, 0), 6)
  695. def prelu(inp: Tensor, weight: Tensor) -> Tensor:
  696. r"""Elememt-wise PReLU function.
  697. Refer to :class:`~.PReLU` for more information.
  698. """
  699. return maximum(inp, 0) + weight * minimum(inp, 0)
  700. def leaky_relu(inp: Tensor, negative_slope: float = 0.01) -> Tensor:
  701. r"""Element-wose LeakyReLU function
  702. Refer to :class:`~.LeakyReLU` for more information.
  703. """
  704. return maximum(inp, 0) + negative_slope * minimum(inp, 0)
  705. def silu(x):
  706. r"""Applies the element-wise Sigmoid Linear Unit function, i.e. `x * sigmoid(x)`."""
  707. return _elwise(x, mode=Elemwise.Mode.SILU)
  708. def gelu(x):
  709. r"""Applies the element-wise function:
  710. .. math::
  711. \text{gelu}(x) = x\Phi(x)
  712. where :math:`\Phi(x)` is the Cumulative Distribution Function for Gaussian Distribution.
  713. """
  714. return _elwise(x, mode=Elemwise.Mode.GELU)
  715. def softplus(inp: Tensor) -> Tensor:
  716. r"""Applies the element-wise function:
  717. .. math::
  718. \text{softplus}(x) = \log(1 + \exp(x))
  719. softplus is a smooth approximation to the ReLU function and can be used
  720. to constrain the output to be always positive.
  721. For numerical stability the implementation follows this transformation:
  722. .. math::
  723. \text{softplus}(x) = \log(1 + \exp(x))
  724. = \log(1 + \exp(-\text{abs}(x))) + \max(x, 0)
  725. = \log1p(\exp(-\text{abs}(x))) + \text{relu}(x)
  726. Examples:
  727. .. testcode::
  728. import numpy as np
  729. from megengine import tensor
  730. import megengine.functional as F
  731. x = tensor(np.arange(-3, 3, dtype=np.float32))
  732. y = F.softplus(x)
  733. print(y.numpy().round(decimals=4))
  734. Outputs:
  735. .. testoutput::
  736. [0.0486 0.1269 0.3133 0.6931 1.3133 2.1269]
  737. """
  738. return log1p(exp(-abs(inp))) + relu(inp)
  739. def logsoftmax(inp: Tensor, axis: Union[int, Sequence[int]]) -> Tensor:
  740. r"""Applies the :math:`\log(\text{softmax}(x))` function to an n-dimensional
  741. input tensor. The :math:`\text{logsoftmax}(x)` formulation can be simplified as:
  742. .. math::
  743. \text{logsoftmax}(x_{i}) = \log(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} )
  744. For numerical stability the implementation follows this transformation:
  745. .. math::
  746. \text{logsoftmax}(x)
  747. = \log (\frac{\exp (x)}{\sum_{i}(\exp (x_{i}))})
  748. = x - \log (\sum_{i}(\exp (x_{i})))
  749. = x - \text{logsumexp}(x)
  750. Examples:
  751. .. testcode::
  752. import numpy as np
  753. from megengine import tensor
  754. import megengine.functional as F
  755. x = tensor(np.arange(-5, 5, dtype=np.float32)).reshape(2,5)
  756. y = F.logsoftmax(x, axis=1)
  757. print(y.numpy().round(decimals=4))
  758. Outputs:
  759. .. testoutput::
  760. [[-4.4519 -3.4519 -2.4519 -1.4519 -0.4519]
  761. [-4.4519 -3.4519 -2.4519 -1.4519 -0.4519]]
  762. """
  763. return inp - logsumexp(inp, axis, keepdims=True)
  764. def logsigmoid(inp: Tensor) -> Tensor:
  765. r"""Applies the element-wise function:
  766. .. math::
  767. \text{logsigmoid}(x) = \log(\frac{ 1 }{ 1 + \exp(-x)})
  768. = \log(1/(1 + \exp(-x)))
  769. = - \log(1 + \exp(-x))
  770. = - \text{softplus}(-x)
  771. Examples:
  772. .. testcode::
  773. import numpy as np
  774. from megengine import tensor
  775. import megengine.functional as F
  776. x = tensor(np.arange(-5, 5, dtype=np.float32))
  777. y = F.logsigmoid(x)
  778. print(y.numpy().round(decimals=4))
  779. Outputs:
  780. .. testoutput::
  781. [-5.0067 -4.0182 -3.0486 -2.1269 -1.3133 -0.6931 -0.3133 -0.1269 -0.0486
  782. -0.0181]
  783. """
  784. return -softplus(-inp)
  785. def logsumexp(
  786. inp: Tensor, axis: Union[int, Sequence[int]], keepdims: bool = False
  787. ) -> Tensor:
  788. r"""Calculates the logarithm of the inputs' exponential sum along the given :attr:`axis`.
  789. .. math::
  790. \text{logsumexp}(x)= \log \sum_{j=1}^{n} \exp \left(x_{j}\right)
  791. For numerical stability, the implementation follows this transformation:
  792. .. math::
  793. \text{logsumexp}(x)= \log \sum_{j=1}^{n} \exp \left(x_{j}\right)
  794. = \text{logsumexp}(x)=b+\log \sum_{j=1}^{n} \exp \left(x_{j}-b\right)
  795. where
  796. .. math::
  797. b = \max(x_j)
  798. Examples:
  799. .. testcode::
  800. import numpy as np
  801. from megengine import tensor
  802. import megengine.functional as F
  803. x = tensor(np.arange(-5, 5, dtype=np.float32)).reshape(2,5)
  804. y = F.logsumexp(x, axis=1, keepdims=False)
  805. print(y.numpy().round(decimals=4))
  806. Outputs:
  807. .. testoutput::
  808. [-0.5481 4.4519]
  809. """
  810. max_value = max(inp.detach(), axis, keepdims=True)
  811. if keepdims:
  812. return max_value + log(sum(exp(inp - max_value), axis, keepdims))
  813. else:
  814. return squeeze(max_value, axis=None) + log(
  815. sum(exp(inp - max_value), axis, keepdims)
  816. )
  817. def _get_softmax_axis(ndim: int) -> int:
  818. if ndim in (0, 1, 3):
  819. return 0
  820. return 1
  821. def softmax(inp: Tensor, axis: Optional[int] = None) -> Tensor:
  822. r"""Applies a :math:`\text{softmax}(x)` function. :math:`\text{softmax}(x)` is defined as:
  823. .. math::
  824. \text{softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
  825. It is applied to all elements along axis, and rescales elements so that
  826. they stay in the range `[0, 1]` and sum to 1.
  827. See :class:`~.module.Softmax` for more details.
  828. Examples:
  829. .. testcode::
  830. import numpy as np
  831. from megengine import tensor
  832. import megengine.functional as F
  833. x = tensor(np.arange(-5, 5, dtype=np.float32)).reshape(2,5)
  834. out = F.softmax(x)
  835. print(out.numpy().round(decimals=4))
  836. Outputs:
  837. .. testoutput::
  838. [[0.0117 0.0317 0.0861 0.2341 0.6364]
  839. [0.0117 0.0317 0.0861 0.2341 0.6364]]
  840. """
  841. if axis is None:
  842. axis = _get_softmax_axis(len(inp.shape))
  843. offset = inp.max(axis=axis, keepdims=True).detach()
  844. cached = exp(inp - offset)
  845. down = sum(cached, axis=axis, keepdims=True)
  846. return cached / down
  847. @lru_cache(maxsize=None)
  848. def _get_layerNorm(device, dtype, dim, gopt_level=2):
  849. @subgraph("LayerNormAffine", dtype, device, 5, gopt_level=gopt_level)
  850. def layerNormAffine(inputs, f, c):
  851. inp, eps, _flatten_shape, weight, bias = inputs
  852. inp_shape = f(GetVarShape(), inp)
  853. inp = f(Reshape(axis=dim), inp, _flatten_shape)
  854. mean = f(Reduce(mode="mean", axis=-1), inp)
  855. x2s = f(Reduce(mode="sum_sqr", axis=-1), inp)
  856. reduce_shape = f(GetVarShape(), x2s)
  857. reduce_size = f(
  858. "//",
  859. f(Reduce(mode="product", axis=0), inp_shape),
  860. f(Reduce(mode="product", axis=0), reduce_shape),
  861. )
  862. reduce_size_f = f(TypeCvt(dtype=dtype), reduce_size)
  863. var = f("-", f("/", x2s, reduce_size_f), f("**", mean, c(2)))
  864. inv_sqrt_var = f("**", f("+", var, eps), c(-0.5))
  865. oup = f("fma3", inp, inv_sqrt_var, f("*", f("-", mean), inv_sqrt_var))
  866. affine_oup = f(Reshape(), oup, inp_shape)
  867. affine_oup = f("fma3", affine_oup, weight, bias)
  868. # NOTE: return oup make backward faster but take more memory
  869. return (affine_oup, oup, mean, x2s), (True, False, False, False)
  870. @subgraph("LayerNorm", dtype, device, 3, gopt_level=gopt_level)
  871. def layerNorm(inputs, f, c):
  872. inp, eps, _flatten_shape = inputs
  873. inp_shape = f(GetVarShape(), inp)
  874. inp = f(Reshape(axis=dim), inp, _flatten_shape)
  875. mean = f(Reduce(mode="mean", axis=-1), inp)
  876. x2s = f(Reduce(mode="sum_sqr", axis=-1), inp)
  877. reduce_shape = f(GetVarShape(), x2s)
  878. reduce_size = f(
  879. "//",
  880. f(Reduce(mode="product", axis=0), inp_shape),
  881. f(Reduce(mode="product", axis=0), reduce_shape),
  882. )
  883. reduce_size_f = f(TypeCvt(dtype=dtype), reduce_size)
  884. var = f("-", f("/", x2s, reduce_size_f), f("**", mean, c(2)))
  885. inv_sqrt_var = f("**", f("+", var, eps), c(-0.5))
  886. oup = f("fma3", inp, inv_sqrt_var, f("*", f("-", mean), inv_sqrt_var))
  887. oup = f(Reshape(), oup, inp_shape)
  888. return (oup,), (True,)
  889. return (layerNorm, layerNormAffine)
  890. def layer_norm(
  891. inp: Tensor,
  892. normalized_shape: tuple,
  893. affine: bool,
  894. weight: Optional[Tensor] = None,
  895. bias: Optional[Tensor] = None,
  896. eps: float = 1e-5,
  897. eps_mode="additive",
  898. ):
  899. assert eps_mode.lower() in {"max", "additive"}, "unknown eps_mode: {}".format(
  900. eps_mode
  901. )
  902. _device = inp.device
  903. _dtype = inp.dtype
  904. _dim = len(inp.shape) - len(normalized_shape)
  905. _flatten_shape = concat(
  906. (
  907. convert_single_value(inp.shape[:_dim], dtype="int32", device=inp.device),
  908. convert_single_value(-1, dtype="int32", device=inp.device),
  909. )
  910. )
  911. (layerNorm, layerNormAffine) = _get_layerNorm(_device, _dtype, _dim)
  912. eps = convert_single_value(eps, dtype=inp.dtype, device=inp.device)
  913. if affine:
  914. outvar, *_ = apply(layerNormAffine(), inp, eps, _flatten_shape, weight, bias)
  915. else:
  916. outvar, *_ = apply(layerNorm(), inp, eps, _flatten_shape)
  917. return outvar
  918. def batch_norm(
  919. inp: Tensor,
  920. running_mean: Tensor = None,
  921. running_var: Tensor = None,
  922. weight: Optional[Tensor] = None,
  923. bias: Optional[Tensor] = None,
  924. *,
  925. training: bool = False,
  926. momentum: float = 0.9,
  927. eps: float = 1e-5,
  928. inplace: bool = True,
  929. compute_mode="default"
  930. ):
  931. r"""Applies batch normalization to the input.
  932. Refer to :class:`~.BatchNorm2d` and :class:`~.BatchNorm1d` for more information.
  933. Args:
  934. inp: input tensor.
  935. running_mean: tensor to store running mean.
  936. running_var: tensor to store running variance.
  937. weight: scaling tensor in the learnable affine parameters.
  938. See :math:`\gamma` in :class:`~.BatchNorm2d`.
  939. bias: bias tensor in the learnable affine parameters.
  940. See :math:`\beta` in :class:`~.BatchNorm2d`.
  941. training: a boolean value to indicate whether batch norm is performed
  942. in training mode. Default: False
  943. momentum: value used for the ``running_mean`` and ``running_var``
  944. computation. Default: 0.9
  945. eps: a value added to the denominator for numerical stability. Default: 1e-5
  946. inplace: whether to update ``running_mean`` and ``running_var``
  947. inplace or return new tensors. Default: True
  948. """
  949. if inp.ndim != 4:
  950. raise NotImplementedError("batch_norm for ndim != 4")
  951. C = inp.shape[1]
  952. def make_full_if_none(x, value):
  953. if x is None:
  954. (x,) = Const(value, dtype=inp.dtype, device=inp.device)()
  955. shape = astensor1d((1, C, 1, 1), inp, dtype="int32", device=inp.device)
  956. (result,) = apply(builtin.Broadcast(), x, shape)
  957. return result
  958. elif x.ndim == 1:
  959. shape = astensor1d((1, C, 1, 1), inp, dtype="int32", device=inp.device)
  960. (result,) = apply(builtin.Reshape(), x, shape)
  961. return result
  962. return x
  963. has_mean = running_mean is not None
  964. has_var = running_var is not None
  965. if not training:
  966. assert has_mean, "running_mean must be provided in inference mode"
  967. assert has_var, "running_var must be provided in inference mode"
  968. if has_mean and running_mean.ndim != 4:
  969. raise ValueError
  970. if has_var and running_var.ndim != 4:
  971. raise ValueError
  972. if amp._enabled:
  973. inp = inp.astype("float16")
  974. weight, bias, running_mean, running_var = cast_tensors(
  975. weight, bias, running_mean, running_var, promote=True
  976. )
  977. weight = make_full_if_none(weight, 1)
  978. bias = make_full_if_none(bias, 0)
  979. if not training:
  980. op = builtin.BatchNorm(
  981. fwd_mode=BatchNorm.FwdMode.INFERENCE, epsilon=eps, param_dim="dim_1c11"
  982. )
  983. ret = apply(op, inp, weight, bias, running_mean, running_var)[-1]
  984. return ret
  985. else:
  986. op = builtin.BatchNorm(
  987. avg_factor=1 - momentum, epsilon=eps, param_dim="dim_1c11"
  988. )
  989. if has_mean or has_var:
  990. running_mean = make_full_if_none(running_mean, 0)
  991. running_var = make_full_if_none(running_var, 1)
  992. new_mean, new_var, _, _, inp = apply(
  993. op, inp, weight, bias, running_mean, running_var
  994. )
  995. if not has_mean:
  996. new_mean = None
  997. if not has_var:
  998. new_var = None
  999. if inplace:
  1000. if has_mean:
  1001. running_mean[...] = new_mean
  1002. if has_var:
  1003. running_var[...] = new_var
  1004. return inp
  1005. else:
  1006. return inp, new_mean, new_var
  1007. else:
  1008. (_, _, inp,) = apply(op, inp, weight, bias)
  1009. return inp
  1010. @lru_cache(maxsize=None)
  1011. def _get_sync_bn_ops(device, dtype, eps_mode, ndim, channels):
  1012. # fmt: off
  1013. @subgraph("SyncBnStage0", dtype, device, 1)
  1014. def syncbn_stage0(inputs, f, c):
  1015. input = inputs[0]
  1016. reduce_shape = c((1, channels) + (1,) * (ndim - 2), dtype="int32", device=device)
  1017. input_shape = f(GetVarShape(), input)
  1018. input_elems = f(Reduce(mode="product", axis=0), input_shape)
  1019. reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
  1020. reduce_size = f("//", input_elems, reduce_elems)
  1021. channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
  1022. channel_x2s = f(Reduce(mode="sum_sqr"), input, reduce_shape)
  1023. reduce_size_f = f(TypeCvt(dtype=dtype), reduce_size)
  1024. return (reduce_shape, reduce_size_f, channel_x1s, channel_x2s), (False, False, True, True)
  1025. @subgraph("SyncBnStage1", dtype, device, 7)
  1026. def syncbn_stage1(inputs, f, c):
  1027. input, reduce_size, channel_x1s, channel_x2s, eps = inputs[0:5]
  1028. weight, bias = inputs[5:7]
  1029. channel_mean = f("/", channel_x1s, reduce_size)
  1030. channel_var =\
  1031. f("+", f("/", f("**", channel_x1s, c(2)),
  1032. f("-", f("*", reduce_size, reduce_size))),
  1033. f("/", channel_x2s, reduce_size))
  1034. invsqrt_channel_var = f("**", f(eps_mode, channel_var, eps), c(-0.5))
  1035. inv_var_wt = f("*", invsqrt_channel_var, weight)
  1036. neg_channel_mean = f("-", channel_mean)
  1037. outvar =\
  1038. f("fma3", input, inv_var_wt,
  1039. f("+", f("*", neg_channel_mean, inv_var_wt),
  1040. bias))
  1041. return (outvar, channel_mean, channel_var, inv_var_wt), (True, False, False, False)
  1042. @subgraph("SyncBnStage1Inference", dtype, device, 6)
  1043. def syncbn_stage1_inference(inputs, f, c):
  1044. input, channel_mean, channel_var, eps = inputs[0:4]
  1045. weight, bias = inputs[4:6]
  1046. invsqrt_channel_var = f("**", f(eps_mode, channel_var, eps), c(-0.5))
  1047. inv_var_wt = f("*", invsqrt_channel_var, weight)
  1048. neg_channel_mean = f("-", channel_mean)
  1049. outvar =\
  1050. f("+", f("*", input, inv_var_wt),
  1051. f("+", f("*", neg_channel_mean, inv_var_wt),
  1052. bias))
  1053. return (outvar,), (True,)
  1054. @subgraph("SyncBnStage2", dtype, device, 7)
  1055. def syncbn_stage2(inputs, f, c):
  1056. running_mean, running_var, momentum = inputs[0:3]
  1057. reduce_size, channel_x1s, channel_x2s, channel_mean = inputs[3:7]
  1058. c1_minus_momentum = f("-", c(1), momentum)
  1059. reduce_size_minus_c1 = f("-", reduce_size, c(1))
  1060. running_mean = f("fma4",
  1061. running_mean, momentum,
  1062. c1_minus_momentum, channel_mean,
  1063. )
  1064. channel_variance_unbiased =\
  1065. f("+", f("/", f("**", channel_x1s, c(2)),
  1066. f("*", f("-", reduce_size),
  1067. reduce_size_minus_c1)),
  1068. f("/", channel_x2s,
  1069. reduce_size_minus_c1))
  1070. running_var = f("fma4",
  1071. running_var, momentum,
  1072. c1_minus_momentum, channel_variance_unbiased
  1073. )
  1074. return (running_mean, running_var), (True, True)
  1075. @subgraph("SyncBnConcatStats", dtype, device, 3)
  1076. def syncbn_concat_stats(inputs, f, c):
  1077. reduce_size, channel_x1s, channel_x2s = inputs[0:3]
  1078. reduce_size = f(builtin.Broadcast(), reduce_size, c([1]*ndim, dtype="int32"))
  1079. stats = f(builtin.Concat(axis=1, comp_node=device), reduce_size, channel_x1s, channel_x2s)
  1080. return (stats,), (True,)
  1081. @subgraph("SyncBnSplitStats", dtype, device, 1)
  1082. def syncbn_split_stats(inputs, f, c):
  1083. stats = inputs[0]
  1084. c_1 = c(1, dtype="int32")
  1085. channel_x1s_end = c(channels+1, dtype="int32")
  1086. def _subtensor(src, axis, begin, end):
  1087. items = (axis, (begin is not None), (end is not None), False, False),
  1088. args = ()
  1089. if begin is not None:
  1090. args += begin,
  1091. if end is not None:
  1092. args += end,
  1093. return f(builtin.Subtensor(items=items), src, *args)
  1094. reduce_size = _subtensor(stats, 1, None, c_1)
  1095. channel_x1s = _subtensor(stats, 1, c_1, channel_x1s_end)
  1096. channel_x2s = _subtensor(stats, 1, channel_x1s_end, None)
  1097. reduce_size = f(builtin.Reshape(), reduce_size, c_1)
  1098. return (reduce_size, channel_x1s, channel_x2s), (False, True, True)
  1099. # fmt: on
  1100. return (
  1101. syncbn_stage0,
  1102. syncbn_stage1,
  1103. syncbn_stage1_inference,
  1104. syncbn_stage2,
  1105. syncbn_concat_stats,
  1106. syncbn_split_stats,
  1107. )
  1108. def sync_batch_norm(
  1109. inp: Tensor,
  1110. running_mean: Tensor,
  1111. running_var: Tensor,
  1112. weight: Optional[Tensor] = None,
  1113. bias: Optional[Tensor] = None,
  1114. training: bool = False,
  1115. momentum: Union[float, Tensor] = 0.9,
  1116. eps: float = 1e-5,
  1117. eps_mode="additive",
  1118. group=WORLD,
  1119. ) -> Tensor:
  1120. r"""Applies synchronized batch normalization to the input.
  1121. Refer to :class:`~.BatchNorm2d` and :class:`~.BatchNorm1d` for more information.
  1122. Args:
  1123. inp: input tensor.
  1124. running_mean: tensor to store running mean.
  1125. running_var: tensor to store running variance.
  1126. weight: scaling tensor in the learnable affine parameters.
  1127. See :math:`\gamma` in :class:`~.BatchNorm2d`.
  1128. bias: bias tensor in the learnable affine parameters.
  1129. See :math:`\beta` in :class:`~.BatchNorm2d`.
  1130. training: a boolean value to indicate whether batch norm is performed
  1131. in traning mode. Default: False
  1132. momentum: value used for the ``running_mean`` and ``running_var``
  1133. computation. Default: 0.9
  1134. eps: a value added to the denominator for numerical stability.
  1135. Default: 1e-5
  1136. eps_mode: mode of calculation for eps, "max" or "additive".
  1137. Default: "additive"
  1138. group: communication group, caculate mean and variance between this group.
  1139. Default: :obj:`~megengine.distributed.WORLD`
  1140. """
  1141. _eps_mode = eps_mode.lower()
  1142. assert _eps_mode in {"max", "additive"}, "unknown eps_mode: {}".format(eps_mode)
  1143. if _eps_mode == "additive" and not (is_distributed() and training):
  1144. return batch_norm(
  1145. inp,
  1146. running_mean,
  1147. running_var,
  1148. weight,
  1149. bias,
  1150. training=training,
  1151. momentum=momentum,
  1152. eps=eps,
  1153. )
  1154. _channels = make_shape_tuple(inp.shape)[1]
  1155. _ndim = inp.ndim
  1156. _device = inp.device
  1157. _dtype = inp.dtype
  1158. if _ndim != 4:
  1159. raise NotImplementedError("sync_batch_norm for ndim != 4")
  1160. def _make_full_if_none(x, value):
  1161. if x is None:
  1162. (x,) = Const(value, dtype=inp.dtype, device=_device)()
  1163. (result,) = apply(builtin.Broadcast(), x, reduce_shape)
  1164. return result
  1165. elif x.ndim == 1:
  1166. (result,) = apply(builtin.Reshape(), x, reduce_shape)
  1167. return result
  1168. return x
  1169. (
  1170. syncbn_stage0,
  1171. syncbn_stage1,
  1172. syncbn_stage1_inference,
  1173. syncbn_stage2,
  1174. syncbn_concat_stats,
  1175. syncbn_split_stats,
  1176. ) = _get_sync_bn_ops(_device, _dtype, eps_mode, _ndim, _channels)
  1177. reduce_shape, reduce_size, channel_x1s, channel_x2s = apply(syncbn_stage0(), inp)
  1178. eps = convert_single_value(eps, dtype=inp.dtype, device=inp.device)
  1179. weight = _make_full_if_none(weight, 1)
  1180. bias = _make_full_if_none(bias, 0)
  1181. if training:
  1182. if is_distributed():
  1183. # reduce all nodes' data to calculate mean and variance
  1184. (stat,) = apply(
  1185. syncbn_concat_stats(), reduce_size, channel_x1s, channel_x2s
  1186. )
  1187. stat = all_reduce_sum(stat, group)
  1188. reduce_size, channel_x1s, channel_x2s = apply(syncbn_split_stats(), stat)
  1189. outvar, channel_mean, *_ = apply(
  1190. syncbn_stage1(),
  1191. inp,
  1192. reduce_size,
  1193. channel_x1s,
  1194. channel_x2s,
  1195. eps,
  1196. weight,
  1197. bias,
  1198. )
  1199. else:
  1200. assert running_var is not None and running_mean is not None
  1201. channel_mean = running_mean
  1202. channel_var = running_var
  1203. outvar, *_ = apply(
  1204. syncbn_stage1_inference(), inp, channel_mean, channel_var, eps, weight, bias
  1205. )
  1206. # outvar = output * weight + bias
  1207. # where output = inp * invsqrt_channel_variance + (
  1208. # -channel_mean * invsqrt_channel_variance
  1209. # )
  1210. # Manually expand output for gopt
  1211. if training and running_var is not None and running_mean is not None:
  1212. momentum = convert_single_value(momentum, dtype=inp.dtype, device=inp.device)
  1213. running_mean[...], running_var[...] = apply(
  1214. syncbn_stage2(),
  1215. running_mean,
  1216. running_var,
  1217. momentum,
  1218. reduce_size,
  1219. channel_x1s,
  1220. channel_x2s,
  1221. channel_mean,
  1222. )
  1223. return outvar
  1224. def dropout(inp: Tensor, drop_prob: float, training: bool = True) -> Tensor:
  1225. r"""Returns a new tensor where each of the elements are randomly set to zero
  1226. with probability P = ``drop_prob``. Optionally rescale the output tensor if ``training`` is True.
  1227. Args:
  1228. inp: input tensor.
  1229. drop_prob: probability to drop (set to zero) a single element.
  1230. training: the default behavior of ``dropout`` during training is to rescale the output,
  1231. then it can be replaced by an :class:`~.Identity` during inference. Default: True
  1232. Returns:
  1233. the ouput tensor
  1234. Examples:
  1235. .. testcode::
  1236. import numpy as np
  1237. from megengine import tensor
  1238. import megengine.functional as F
  1239. # test training mode
  1240. data = tensor(np.ones(10000000, dtype=np.float32))
  1241. out = F.nn.dropout(data, 1.0 / 3.0, training=True)
  1242. assert not out.numpy().all()
  1243. # test eval mode
  1244. out = F.nn.dropout(data, 1.0 / 3.0, training=False)
  1245. assert out.numpy().all()
  1246. Outputs:
  1247. .. testoutput::
  1248. :options: +SKIP
  1249. [1.5 1.5 0. 1.5 1.5 1.5 1.5 1.5 1.5 1.5]
  1250. """
  1251. assert 0 <= drop_prob < 1
  1252. if not training or drop_prob == 0:
  1253. return inp
  1254. # model in training mode, e.g. model.train()
  1255. rv = uniform(size=inp.shape)
  1256. mask = rv > drop_prob
  1257. ret = inp * mask.astype(inp.dtype)
  1258. ret *= 1 / (1 - drop_prob)
  1259. return ret
  1260. def one_hot(inp: Tensor, num_classes: int) -> Tensor:
  1261. r"""Performs one-hot encoding for the input tensor.
  1262. Args:
  1263. inp: input tensor.
  1264. num_classes: number of classes denotes the last dimension of the output tensor.
  1265. Examples:
  1266. .. testcode::
  1267. import numpy as np
  1268. from megengine import tensor
  1269. import megengine.functional as F
  1270. x = tensor(np.arange(1, 4, dtype=np.int32))
  1271. out = F.one_hot(x, num_classes=4)
  1272. print(out.numpy())
  1273. Outputs:
  1274. .. testoutput::
  1275. [[0 1 0 0]
  1276. [0 0 1 0]
  1277. [0 0 0 1]]
  1278. """
  1279. zeros_tensor = zeros(list(inp.shape) + [num_classes], inp.dtype, inp.device)
  1280. ones_tensor = ones(list(inp.shape) + [1], inp.dtype, inp.device)
  1281. op = builtin.IndexingSetOneHot(axis=inp.ndim)
  1282. (result,) = apply(op, zeros_tensor, inp, ones_tensor)
  1283. return result
  1284. def embedding(
  1285. inp: Tensor,
  1286. weight: Tensor,
  1287. padding_idx: Optional[int] = None,
  1288. max_norm: Optional[float] = None,
  1289. norm_type: Optional[float] = None,
  1290. ):
  1291. r"""Applies lookup table for embedding.
  1292. Args:
  1293. inp: tensor with indices.
  1294. weight: learnable weights which embeds from.
  1295. padding_idx: should be set to None, not supported now.
  1296. max_norm: should be set to None, not supported now.
  1297. norm_type: should be set to None, not supported now.
  1298. Refer to :class:`~.module.Embedding` for more information.
  1299. """
  1300. if padding_idx is not None:
  1301. raise ValueError("Not support padding_idx Now!")
  1302. if max_norm is not None or norm_type is not None:
  1303. raise ValueError("Not support weight normlization Now!")
  1304. dest_shp = list(inp.shape) + [weight.shape[-1]]
  1305. return weight[inp.reshape(-1)].reshape(dest_shp)
  1306. def indexing_one_hot(
  1307. src: Tensor, index: Tensor, axis: int = 1, keepdims=False
  1308. ) -> Tensor:
  1309. r"""One-hot indexing for some axes.
  1310. Args:
  1311. src: input tensor.
  1312. index: index tensor.
  1313. axis: axis on src for which values in index index. Default: 1
  1314. keepdims: whether not to remove the axis in result. Default: False
  1315. Examples:
  1316. .. testcode::
  1317. import megengine.functional as F
  1318. from megengine import tensor
  1319. src = tensor([[1.0, 2.0]])
  1320. index = tensor([0])
  1321. val = F.indexing_one_hot(src, index)
  1322. print(val.numpy())
  1323. Outputs:
  1324. .. testoutput::
  1325. [1.]
  1326. """
  1327. assert isinstance(src, Tensor), "src must be of Tensor type"
  1328. op = builtin.IndexingOneHot(axis=axis)
  1329. index = convert_single_value(index, dtype="int32", device=src.device)
  1330. (result,) = apply(op, src, index)
  1331. if not keepdims:
  1332. result = squeeze(result, axis)
  1333. return result
  1334. def sliding_window(
  1335. inp: Tensor,
  1336. kernel_size: Union[int, Tuple[int, int]],
  1337. padding: Union[int, Tuple[int, int]] = 0,
  1338. stride: Union[int, Tuple[int, int]] = 1,
  1339. dilation: Union[int, Tuple[int, int]] = 1,
  1340. ) -> Tensor:
  1341. r"""Extracts sliding local blocks from a batched input tensor.
  1342. Refer to :class:`~.SlidingWindow` for more information.
  1343. Args:
  1344. inp: input tensor.
  1345. kernel_size: size of the window.
  1346. padding: implicit zero padding added on both sides of input. Default: 0
  1347. stride: stride of the window. Default: 1
  1348. dilation: dilation of the window. Default: 1
  1349. """
  1350. padding_h, padding_w = _pair(padding)
  1351. stride_h, stride_w = _pair_nonzero(stride)
  1352. dilation_h, dilation_w = _pair_nonzero(dilation)
  1353. window_h, window_w = _pair_nonzero(kernel_size)
  1354. op = builtin.Images2Neibs(
  1355. pad_h=padding_h,
  1356. pad_w=padding_w,
  1357. stride_h=stride_h,
  1358. stride_w=stride_w,
  1359. dilate_h=dilation_h,
  1360. dilate_w=dilation_w,
  1361. window_h=window_h,
  1362. window_w=window_w,
  1363. )
  1364. (output,) = apply(op, inp)
  1365. return output
  1366. def sliding_window_transpose(
  1367. inp: Tensor,
  1368. output_size: Union[int, Tuple[int, int]],
  1369. kernel_size: Union[int, Tuple[int, int]],
  1370. padding: Union[int, Tuple[int, int]] = 0,
  1371. stride: Union[int, Tuple[int, int]] = 1,
  1372. dilation: Union[int, Tuple[int, int]] = 1,
  1373. ) -> Tensor:
  1374. r"""Sum over the sliding windows on the corresponding input location.
  1375. Refer to :class:`~.SlidingWindowTranspose` for more information.
  1376. Args:
  1377. inp: input tensor.
  1378. output_size: shape of output tensor.
  1379. kernel_size: size of the window.
  1380. padding: implicit zero padding added on both sides of input. Default: 0
  1381. stride: stride of the window. Default: 1
  1382. dilation: dilation of the window. Default: 1
  1383. """
  1384. output_h, output_w = _pair_nonzero(output_size)
  1385. padding_h, padding_w = _pair(padding)
  1386. stride_h, stride_w = _pair_nonzero(stride)
  1387. dilation_h, dilation_w = _pair_nonzero(dilation)
  1388. window_h, window_w = _pair_nonzero(kernel_size)
  1389. expected_h = (
  1390. output_h + 2 * padding_h - dilation_h * (window_h - 1) - 1
  1391. ) // stride_h + 1
  1392. expected_w = (
  1393. output_w + 2 * padding_w - dilation_w * (window_w - 1) - 1
  1394. ) // stride_w + 1
  1395. assert inp.ndim == 6, "the input dimension of sliding_window_transpose should be 6"
  1396. assert (
  1397. inp.shape[2] == expected_h and inp.shape[3] == expected_w
  1398. ), "the input shape and output size do not match"
  1399. op = builtin.SlidingWindowTranspose(
  1400. out_h=output_h,
  1401. out_w=output_w,
  1402. pad_h=padding_h,
  1403. pad_w=padding_w,
  1404. stride_h=stride_h,
  1405. stride_w=stride_w,
  1406. dilate_h=dilation_h,
  1407. dilate_w=dilation_w,
  1408. window_h=window_h,
  1409. window_w=window_w,
  1410. )
  1411. (output,) = apply(op, inp)
  1412. return output
  1413. def pad(
  1414. src: Tensor,
  1415. pad_witdth: Tuple[Tuple[int, int], ...],
  1416. mode: str = "constant",
  1417. constant_value: float = 0.0,
  1418. ) -> Tensor:
  1419. """
  1420. Pad is python warpper for padding opr in megbrain, can padding in random one of the max 7 dimensions.
  1421. Supported constant, edge(replicate) and reflect mode, constatnt is the default mode.
  1422. """
  1423. p_offsets = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
  1424. assert mode.lower() in ["constant", "edge", "replicate", "reflect"]
  1425. if mode.lower() == "edge":
  1426. mode = "replicate"
  1427. for i in range(0, len(pad_witdth)):
  1428. p_offsets[i * 2] = pad_witdth[i][0]
  1429. p_offsets[i * 2 + 1] = pad_witdth[i][1]
  1430. op = builtin.Padding(
  1431. front_offset_dim0=p_offsets[0],
  1432. front_offset_dim1=p_offsets[2],
  1433. front_offset_dim2=p_offsets[4],
  1434. front_offset_dim3=p_offsets[6],
  1435. front_offset_dim4=p_offsets[8],
  1436. front_offset_dim5=p_offsets[10],
  1437. front_offset_dim6=p_offsets[12],
  1438. back_offset_dim0=p_offsets[1],
  1439. back_offset_dim1=p_offsets[3],
  1440. back_offset_dim2=p_offsets[5],
  1441. back_offset_dim3=p_offsets[7],
  1442. back_offset_dim4=p_offsets[9],
  1443. back_offset_dim5=p_offsets[11],
  1444. back_offset_dim6=p_offsets[13],
  1445. padding_val=constant_value,
  1446. padding_mode=mode.upper(),
  1447. )
  1448. (output,) = apply(op, src)
  1449. return output
  1450. from .quantized import conv_bias_activation # isort:skip
  1451. from .loss import * # isort:skip
  1452. from .metric import * # isort:skip
  1453. from .vision import * # isort:skip

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台