You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

nn.py 53 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. # pylint: disable=too-many-lines
  10. from functools import lru_cache
  11. from typing import NamedTuple, Optional, Sequence, Tuple, Union
  12. from ..core._imperative_rt.core2 import apply, dtype_promotion
  13. from ..core._imperative_rt.ops import SubgraphBuilder as _SubgraphBuilder
  14. from ..core.ops import builtin
  15. from ..core.ops.builtin import (
  16. BatchNorm,
  17. Elemwise,
  18. GetVarShape,
  19. Identity,
  20. Reduce,
  21. TypeCvt,
  22. )
  23. from ..core.ops.special import Const
  24. from ..core.tensor import amp, megbrain_graph
  25. from ..core.tensor.array_method import _elwise_apply
  26. from ..core.tensor.utils import (
  27. astensor1d,
  28. astype,
  29. cast_tensors,
  30. convert_single_value,
  31. make_shape_tuple,
  32. setscalar,
  33. subgraph,
  34. )
  35. from ..device import get_default_device
  36. from ..distributed import WORLD, is_distributed
  37. from ..jit import exclude_from_trace
  38. from ..random import uniform
  39. from ..tensor import Tensor
  40. from ..utils.deprecation import deprecated_func
  41. from ..utils.tuple_function import _pair, _pair_nonzero, _triple, _triple_nonzero
  42. from .debug_param import get_execution_strategy
  43. from .distributed import all_reduce_sum
  44. from .elemwise import _elwise, exp, log, log1p, maximum, minimum
  45. from .math import matmul, max, sum
  46. from .tensor import broadcast_to, concat, expand_dims, ones, squeeze, zeros
  47. __all__ = [
  48. "adaptive_avg_pool2d",
  49. "adaptive_max_pool2d",
  50. "avg_pool2d",
  51. "batch_norm",
  52. "conv1d",
  53. "conv2d",
  54. "conv3d",
  55. "conv_transpose2d",
  56. "conv_transpose3d",
  57. "deformable_conv2d",
  58. "deformable_psroi_pooling",
  59. "dropout",
  60. "embedding",
  61. "gelu",
  62. "hsigmoid",
  63. "hswish",
  64. "indexing_one_hot",
  65. "leaky_relu",
  66. "linear",
  67. "local_conv2d",
  68. "logsigmoid",
  69. "logsumexp",
  70. "logsoftmax",
  71. "max_pool2d",
  72. "one_hot",
  73. "prelu",
  74. "relu",
  75. "relu6",
  76. "remap",
  77. "resize",
  78. "sigmoid",
  79. "sliding_window",
  80. "sliding_window_transpose",
  81. "silu",
  82. "softmax",
  83. "softplus",
  84. "sync_batch_norm",
  85. "warp_affine",
  86. "warp_perspective",
  87. ]
  88. def expand_hw(x):
  89. # NOTE: >1d array is accepted, as long as 1 <= size <= 2
  90. try:
  91. x = int(x)
  92. return [x, x]
  93. except (TypeError, ValueError):
  94. pass
  95. h, w = x
  96. return int(h), int(w)
  97. def linear(
  98. inp: Tensor, weight: Tensor, bias: Optional[Tensor] = None, compute_mode="default",
  99. ) -> Tensor:
  100. """
  101. Applies a linear transformation to the input tensor.
  102. Refer to :class:`~.module.linear.Linear` for more information.
  103. :param inp: input tensor with shape `(N, in_features)`.
  104. :param weight: weight with shape `(out_features, in_features)`.
  105. :param bias: bias with shape `(out_features,)`.
  106. Default: None
  107. """
  108. ret = matmul(inp, weight, transpose_b=True, compute_mode=compute_mode)
  109. if bias is not None:
  110. if amp._enabled:
  111. bias = bias.astype("float16")
  112. ret += bias
  113. return ret
  114. def conv1d(
  115. inp: Tensor,
  116. weight: Tensor,
  117. bias: Optional[Tensor] = None,
  118. stride: int = 1,
  119. padding: int = 0,
  120. dilation: int = 1,
  121. groups: int = 1,
  122. conv_mode="cross_correlation",
  123. compute_mode="default",
  124. ) -> Tensor:
  125. """1D convolution operation.
  126. Refer to :class:`~.Conv1d` for more information.
  127. :param inp: The feature map of the convolution operation
  128. :param weight: The convolution kernel.
  129. :param bias: The bias added to the result of convolution (if given)
  130. :param stride: Stride of the 1D convolution operation. Default: 1
  131. :param padding: Size of the paddings added to the input on both sides of its
  132. spatial dimensions. Only zero-padding is supported. Default: 0
  133. :param dilation: Dilation of the 1D convolution operation. Default: 1
  134. :param groups: number of groups to divide input and output channels into,
  135. so as to perform a "grouped convolution". When ``groups`` is not 1,
  136. ``in_channels`` and ``out_channels`` must be divisible by ``groups``,
  137. and the shape of weight should be ``(groups, out_channel // groups,
  138. in_channels // groups, kernel_size)``. Default: 1
  139. :type conv_mode: string or :class:`mgb.opr_param_defs.Convolution.Mode`
  140. :param conv_mode: Supports 'cross_correlation'. Default:
  141. 'cross_correlation'.
  142. :type compute_mode: string or
  143. :class:`mgb.opr_param_defs.Convolution.ComputeMode`
  144. :param compute_mode: When set to 'default', no special requirements will be
  145. placed on the precision of intermediate results. When set to 'float32',
  146. float32 would be used for accumulator and intermediate result, but only
  147. effective when input and output are of float16 dtype.
  148. """
  149. assert (
  150. conv_mode.lower() == "cross_correlation"
  151. or conv_mode.name == "CROSS_CORRELATION"
  152. )
  153. assert compute_mode.lower() == "default" or compute_mode.name == "DEFAULT"
  154. assert inp.ndim == 3, "the input dimension of conv1d should be 3"
  155. assert weight.ndim == 3, "the weight dimension of conv1d should be 3"
  156. if amp._enabled:
  157. compute_mode = "float32"
  158. inp, weight, bias = cast_tensors(inp, weight, bias)
  159. else:
  160. dtype = dtype_promotion(inp, weight)
  161. if inp.dtype != dtype:
  162. inp = inp.astype(dtype)
  163. if weight.dtype != dtype:
  164. weight = weight.astype(dtype)
  165. inp = expand_dims(inp, 3)
  166. weight = expand_dims(weight, 3)
  167. if bias is not None:
  168. assert bias.ndim == 3, "the bias dimension of conv1d should be 3"
  169. bias = expand_dims(bias, 3)
  170. stride_h = stride
  171. pad_h = padding
  172. dilate_h = dilation
  173. sparse_type = "dense" if groups == 1 else "group"
  174. op = builtin.Convolution(
  175. stride_h=stride_h,
  176. stride_w=1,
  177. pad_h=pad_h,
  178. pad_w=0,
  179. dilate_h=dilate_h,
  180. dilate_w=1,
  181. strategy=get_execution_strategy(),
  182. mode=conv_mode,
  183. compute_mode=compute_mode,
  184. sparse=sparse_type,
  185. )
  186. (output,) = apply(op, inp, weight)
  187. if bias is not None:
  188. output += bias
  189. output = squeeze(output, 3)
  190. return output
  191. def conv2d(
  192. inp: Tensor,
  193. weight: Tensor,
  194. bias: Optional[Tensor] = None,
  195. stride: Union[int, Tuple[int, int]] = 1,
  196. padding: Union[int, Tuple[int, int]] = 0,
  197. dilation: Union[int, Tuple[int, int]] = 1,
  198. groups: int = 1,
  199. conv_mode="cross_correlation",
  200. compute_mode="default",
  201. ) -> Tensor:
  202. """
  203. 2D convolution operation.
  204. Refer to :class:`~.module.Conv2d` for more information.
  205. :param inp: feature map of the convolution operation.
  206. :param weight: convolution kernel.
  207. :param bias: bias added to the result of convolution (if given).
  208. :param stride: stride of the 2D convolution operation. Default: 1
  209. :param padding: size of the paddings added to the input on both sides of its
  210. spatial dimensions. Only zero-padding is supported. Default: 0
  211. :param dilation: dilation of the 2D convolution operation. Default: 1
  212. :param groups: number of groups into which the input and output channels are divided,
  213. so as to perform a ``grouped convolution``. When ``groups`` is not 1,
  214. ``in_channels`` and ``out_channels`` must be divisible by ``groups``,
  215. and the shape of weight should be ``(groups, out_channel // groups,
  216. in_channels // groups, height, width)``. Default: 1
  217. :type conv_mode: string or :class:`Convolution.Mode`
  218. :param conv_mode: supports "cross_correlation". Default:
  219. "cross_correlation"
  220. :type compute_mode: string or
  221. :class:`Convolution.ComputeMode`
  222. :param compute_mode: when set to "default", no special requirements will be
  223. placed on the precision of intermediate results. When set to "float32",
  224. "float32" would be used for accumulator and intermediate result, but only
  225. effective when input and output are of float16 dtype.
  226. :return: output tensor.
  227. """
  228. assert (
  229. conv_mode.lower() == "cross_correlation"
  230. or conv_mode.name == "CROSS_CORRELATION"
  231. )
  232. if amp._enabled:
  233. compute_mode = "float32"
  234. inp, weight, bias = cast_tensors(inp, weight, bias)
  235. else:
  236. dtype = dtype_promotion(inp, weight)
  237. if inp.dtype != dtype:
  238. inp = inp.astype(dtype)
  239. if weight.dtype != dtype:
  240. weight = weight.astype(dtype)
  241. stride_h, stride_w = expand_hw(stride)
  242. pad_h, pad_w = expand_hw(padding)
  243. dilate_h, dilate_w = expand_hw(dilation)
  244. sparse_type = "dense" if groups == 1 else "group"
  245. op = builtin.Convolution(
  246. stride_h=stride_h,
  247. stride_w=stride_w,
  248. pad_h=pad_h,
  249. pad_w=pad_w,
  250. dilate_h=dilate_h,
  251. dilate_w=dilate_w,
  252. strategy=get_execution_strategy(),
  253. mode=conv_mode,
  254. compute_mode=compute_mode,
  255. sparse=sparse_type,
  256. )
  257. (output,) = apply(op, inp, weight)
  258. if bias is not None:
  259. output += bias
  260. return output
  261. def conv3d(
  262. inp: Tensor,
  263. weight: Tensor,
  264. bias: Optional[Tensor] = None,
  265. stride: Union[int, Tuple[int, int, int]] = 1,
  266. padding: Union[int, Tuple[int, int, int]] = 0,
  267. dilation: Union[int, Tuple[int, int, int]] = 1,
  268. groups: int = 1,
  269. conv_mode: str = "cross_correlation",
  270. ) -> Tensor:
  271. """
  272. 3D convolution operation.
  273. Refer to :class:`~.Conv3d` for more information.
  274. :param inp: feature map of the convolution operation.
  275. :param weight: convolution kernel.
  276. :param bias: bias added to the result of convolution (if given).
  277. :param stride: stride of the 3D convolution operation. Default: 1
  278. :param padding: size of the paddings added to the input on both sides of its
  279. spatial dimensions. Only zero-padding is supported. Default: 0
  280. :param dilation: dilation of the 3D convolution operation. Default: 1
  281. :param groups: number of groups into which the input and output channels are divided,
  282. so as to perform a ``grouped convolution``. When ``groups`` is not 1,
  283. ``in_channels`` and ``out_channels`` must be divisible by ``groups``,
  284. and the shape of weight should be ``(groups, out_channel // groups,
  285. in_channels // groups, depth, height, width)``. Default: 1
  286. :param conv_mode: supports "cross_correlation". Default:
  287. "cross_correlation"
  288. :return: output tensor.
  289. """
  290. assert conv_mode.lower() == "cross_correlation"
  291. D, H, W = 0, 1, 2
  292. pad = _triple(padding)
  293. stride = _triple_nonzero(stride)
  294. dilate = _triple_nonzero(dilation)
  295. dtype = dtype_promotion(inp, weight)
  296. if inp.dtype != dtype:
  297. inp = inp.astype(dtype)
  298. if weight.dtype != dtype:
  299. weight = weight.astype(dtype)
  300. sparse_type = "dense" if groups == 1 else "group"
  301. op = builtin.Convolution3D(
  302. pad_d=pad[D],
  303. pad_h=pad[H],
  304. pad_w=pad[W],
  305. stride_d=stride[D],
  306. stride_h=stride[H],
  307. stride_w=stride[W],
  308. dilate_d=dilate[D],
  309. dilate_h=dilate[H],
  310. dilate_w=dilate[W],
  311. strategy=get_execution_strategy(),
  312. mode=conv_mode,
  313. sparse=sparse_type,
  314. )
  315. (output,) = apply(op, inp, weight)
  316. if bias is not None:
  317. output += bias
  318. return output
  319. def conv_transpose2d(
  320. inp: Tensor,
  321. weight: Tensor,
  322. bias: Optional[Tensor] = None,
  323. stride: Union[int, Tuple[int, int]] = 1,
  324. padding: Union[int, Tuple[int, int]] = 0,
  325. dilation: Union[int, Tuple[int, int]] = 1,
  326. groups: int = 1,
  327. conv_mode="cross_correlation",
  328. compute_mode="default",
  329. ) -> Tensor:
  330. """
  331. 2D transposed convolution operation.
  332. Refer to :class:`~.ConvTranspose2d` for more information.
  333. :param inp: feature map of the convolution operation.
  334. :param weight: convolution kernel.
  335. :param bias: bias added to the result of convolution (if given).
  336. :param stride: stride of the 2D convolution operation. Default: 1
  337. :param padding: size of the paddings added to the input on both sides of its
  338. spatial dimensions. Only zero-padding is supported. Default: 0
  339. :param dilation: dilation of the 2D convolution operation. Default: 1
  340. :param groups: number of groups into which the input and output channels are divided,
  341. so as to perform a ``grouped convolution``. When ``groups`` is not 1,
  342. ``in_channels`` and ``out_channels`` must be divisible by groups,
  343. and the shape of weight should be ``(groups, in_channels // groups,
  344. out_channels // groups, height, width)``. Default: 1
  345. :type conv_mode: string or :class:`Convolution.Mode`
  346. :param conv_mode: supports "cross_correlation". Default:
  347. "cross_correlation"
  348. :type compute_mode: string or
  349. :class:`Convolution.ComputeMode`
  350. :param compute_mode: when set to "default", no special requirements will be
  351. placed on the precision of intermediate results. When set to "float32",
  352. "float32" would be used for accumulator and intermediate result, but only
  353. effective when input and output are of float16 dtype.
  354. :return: output tensor.
  355. """
  356. assert (
  357. conv_mode.lower() == "cross_correlation"
  358. or conv_mode.name == "CROSS_CORRELATION"
  359. )
  360. if amp._enabled:
  361. compute_mode = "float32"
  362. inp, weight, bias = cast_tensors(inp, weight, bias)
  363. else:
  364. dtype = dtype_promotion(inp, weight)
  365. if inp.dtype != dtype:
  366. inp = inp.astype(dtype)
  367. if weight.dtype != dtype:
  368. weight = weight.astype(dtype)
  369. if groups != 1:
  370. raise NotImplementedError("group transposed conv2d is not supported yet.")
  371. stride_h, stride_w = expand_hw(stride)
  372. pad_h, pad_w = expand_hw(padding)
  373. dilate_h, dilate_w = expand_hw(dilation)
  374. op = builtin.ConvolutionBackwardData(
  375. stride_h=stride_h,
  376. stride_w=stride_w,
  377. pad_h=pad_h,
  378. pad_w=pad_w,
  379. dilate_h=dilate_h,
  380. dilate_w=dilate_w,
  381. strategy=get_execution_strategy(),
  382. compute_mode=compute_mode,
  383. )
  384. (output,) = apply(op, weight, inp)
  385. if bias is not None:
  386. output += bias
  387. return output
  388. def deformable_conv2d(
  389. inp: Tensor,
  390. weight: Tensor,
  391. offset: Tensor,
  392. mask: Tensor,
  393. bias: Optional[Tensor] = None,
  394. stride: Union[int, Tuple[int, int]] = 1,
  395. padding: Union[int, Tuple[int, int]] = 0,
  396. dilation: Union[int, Tuple[int, int]] = 1,
  397. groups: int = 1,
  398. conv_mode="cross_correlation",
  399. compute_mode="default",
  400. ) -> Tensor:
  401. """
  402. Deformable Convolution.
  403. :param inp: input feature map.
  404. :param weight: convolution kernel.
  405. :param offset: input offset to kernel, channel of this tensor should match the deformable settings.
  406. :param mask: input mask to kernel, channel of this tensor should match the deformable settings.
  407. :param bias: bias added to the result of convolution (if given).
  408. :param stride: stride of the 2D convolution operation. Default: 1
  409. :param padding: size of the paddings added to the input on both sides of its
  410. spatial dimensions. Only zero-padding is supported. Default: 0
  411. :param dilation: dilation of the 2D convolution operation. Default: 1
  412. :param groups: number of groups into which the input and output channels are divided,
  413. so as to perform a ``grouped convolution``. When ``groups`` is not 1,
  414. ``in_channels`` and ``out_channels`` must be divisible by groups,
  415. and the shape of weight should be ``(groups, out_channel // groups,
  416. in_channels // groups, height, width)``. Default: 1
  417. :type conv_mode: string or :class:`Convolution.Mode`
  418. :param conv_mode: supports "cross_correlation". Default:
  419. "cross_correlation"
  420. :type compute_mode: string or
  421. :class:`Convolution.ComputeMode`
  422. :param compute_mode: when set to "default", no special requirements will be
  423. placed on the precision of intermediate results. When set to "float32",
  424. "float32" would be used for accumulator and intermediate result, but only
  425. effective when input and output are of float16 dtype.
  426. :return: output tensor.
  427. """
  428. assert (
  429. conv_mode.lower() == "cross_correlation"
  430. or conv_mode.name == "CROSS_CORRELATION"
  431. )
  432. if amp._enabled:
  433. compute_mode = "float32"
  434. inp, weight, offset, mask, bias = cast_tensors(inp, weight, offset, mask, bias)
  435. else:
  436. offset = offset.astype("float32")
  437. mask = mask.astype("float32")
  438. stride_h, stride_w = expand_hw(stride)
  439. pad_h, pad_w = expand_hw(padding)
  440. dilate_h, dilate_w = expand_hw(dilation)
  441. sparse_type = "dense" if groups == 1 else "group"
  442. op = builtin.DeformableConv(
  443. stride_h=stride_h,
  444. stride_w=stride_w,
  445. pad_h=pad_h,
  446. pad_w=pad_w,
  447. dilate_h=dilate_h,
  448. dilate_w=dilate_w,
  449. strategy=get_execution_strategy(),
  450. mode=conv_mode,
  451. compute_mode=compute_mode,
  452. sparse=sparse_type,
  453. )
  454. (output,) = apply(op, inp, weight, offset, mask)
  455. if bias is not None:
  456. output += bias
  457. return output
  458. def local_conv2d(
  459. inp: Tensor,
  460. weight: Tensor,
  461. bias: Optional[Tensor] = None,
  462. stride: Union[int, Tuple[int, int]] = 1,
  463. padding: Union[int, Tuple[int, int]] = 0,
  464. dilation: Union[int, Tuple[int, int]] = 1,
  465. conv_mode="cross_correlation",
  466. ):
  467. """Applies spatial 2D convolution over an groupped channeled image with untied kernels."""
  468. assert (
  469. conv_mode.lower() == "cross_correlation"
  470. or conv_mode.name == "CROSS_CORRELATION"
  471. )
  472. stride_h, stride_w = expand_hw(stride)
  473. pad_h, pad_w = expand_hw(padding)
  474. dilate_h, dilate_w = expand_hw(dilation)
  475. dtype = dtype_promotion(inp, weight)
  476. if inp.dtype != dtype:
  477. inp = inp.astype(dtype)
  478. if weight.dtype != dtype:
  479. weight = weight.astype(dtype)
  480. op = builtin.GroupLocal(
  481. stride_h=stride_h,
  482. stride_w=stride_w,
  483. pad_h=pad_h,
  484. pad_w=pad_w,
  485. dilate_h=dilate_h,
  486. dilate_w=dilate_w,
  487. mode=conv_mode,
  488. sparse="dense",
  489. )
  490. (output,) = apply(op, inp, weight)
  491. if bias is not None:
  492. output += bias
  493. return output
  494. def conv_transpose3d(
  495. inp: Tensor,
  496. weight: Tensor,
  497. bias: Optional[Tensor] = None,
  498. stride: Union[int, Tuple[int, int, int]] = 1,
  499. padding: Union[int, Tuple[int, int, int]] = 0,
  500. dilation: Union[int, Tuple[int, int, int]] = 1,
  501. ) -> Tensor:
  502. """
  503. 3D transposed convolution operation. Only support the case that groups = 1
  504. and conv_mode = "cross_correlation".
  505. Refer to :class:`~.ConvTranspose3d` for more information.
  506. :param inp: feature map of the convolution operation.
  507. :param weight: convolution kernel.
  508. weight usually has shape ``(in_channels, out_channels, depth, height, width)``.
  509. :param bias: bias added to the result of convolution (if given).
  510. :param stride: stride of the 3D convolution operation. Default: 1
  511. :param padding: size of the paddings added to the input on all sides of its
  512. spatial dimensions. Only zero-padding is supported. Default: 0
  513. :param dilation: dilation of the 3D convolution operation. Default: 1
  514. :return: output tensor.
  515. """
  516. D, H, W = 0, 1, 2
  517. pad = _triple(padding)
  518. stride = _triple_nonzero(stride)
  519. dilate = _triple_nonzero(dilation)
  520. dtype = dtype_promotion(inp, weight)
  521. if inp.dtype != dtype:
  522. inp = inp.astype(dtype)
  523. if weight.dtype != dtype:
  524. weight = weight.astype(dtype)
  525. op = builtin.Convolution3DBackwardData(
  526. pad_d=pad[D],
  527. pad_h=pad[H],
  528. pad_w=pad[W],
  529. stride_d=stride[D],
  530. stride_h=stride[H],
  531. stride_w=stride[W],
  532. dilate_d=dilate[D],
  533. dilate_h=dilate[H],
  534. dilate_w=dilate[W],
  535. strategy=get_execution_strategy(),
  536. )
  537. (output,) = apply(op, weight, inp)
  538. if bias is not None:
  539. output += bias
  540. return output
  541. def max_pool2d(
  542. inp: Tensor,
  543. kernel_size: Union[int, Tuple[int, int]],
  544. stride: Optional[Union[int, Tuple[int, int]]] = None,
  545. padding: Union[int, Tuple[int, int]] = 0,
  546. ) -> Tensor:
  547. """
  548. Applies a 2D max pooling over an input tensor.
  549. Refer to :class:`~.MaxPool2d` for more information.
  550. :param inp: input tensor.
  551. :param kernel_size: size of the window.
  552. :param stride: stride of the window. If not provided, its value is set to kernel_size.
  553. Default: None
  554. :param padding: implicit zero padding added on both sides. Default: 0
  555. :return: output tensor.
  556. """
  557. if stride is None:
  558. stride = kernel_size
  559. window_h, window_w = _pair_nonzero(kernel_size)
  560. stride_h, stride_w = _pair_nonzero(stride)
  561. padding_h, padding_w = _pair(padding)
  562. op = builtin.Pooling(
  563. window_h=window_h,
  564. window_w=window_w,
  565. stride_h=stride_h,
  566. stride_w=stride_w,
  567. pad_h=padding_h,
  568. pad_w=padding_w,
  569. mode="max",
  570. )
  571. (output,) = apply(op, inp)
  572. return output
  573. def avg_pool2d(
  574. inp: Tensor,
  575. kernel_size: Union[int, Tuple[int, int]],
  576. stride: Optional[Union[int, Tuple[int, int]]] = None,
  577. padding: Union[int, Tuple[int, int]] = 0,
  578. mode: str = "average_count_exclude_padding",
  579. ) -> Tensor:
  580. """
  581. Applies 2D average pooling over an input tensor.
  582. Refer to :class:`~.AvgPool2d` for more information.
  583. :param inp: input tensor.
  584. :param kernel_size: size of the window.
  585. :param stride: stride of the window. If not provided, its value is set to ``kernel_size``.
  586. Default: None
  587. :param padding: implicit zero padding added on both sides. Default: 0
  588. :param mode: whether to count padding values, set to "average" will do counting.
  589. Default: "average_count_exclude_padding"
  590. :return: output tensor.
  591. """
  592. if stride is None:
  593. stride = kernel_size
  594. window_h, window_w = _pair_nonzero(kernel_size)
  595. stride_h, stride_w = _pair_nonzero(stride)
  596. padding_h, padding_w = _pair(padding)
  597. op = builtin.Pooling(
  598. window_h=window_h,
  599. window_w=window_w,
  600. stride_h=stride_h,
  601. stride_w=stride_w,
  602. pad_h=padding_h,
  603. pad_w=padding_w,
  604. mode=mode,
  605. )
  606. (output,) = apply(op, inp)
  607. return output
  608. def adaptive_max_pool2d(
  609. inp: Tensor, oshp: Union[Tuple[int, int], int, Tensor],
  610. ) -> Tensor:
  611. """
  612. Applies a 2D max adaptive pooling over an input.
  613. Refer to :class:`~.MaxAdaptivePool2d` for more information.
  614. :param inp: input tensor.
  615. :param oshp: `(OH, OW)` size of the output shape.
  616. :return: output tensor.
  617. """
  618. if isinstance(oshp, int):
  619. oshp = (oshp, oshp)
  620. op = builtin.AdaptivePooling(mode="max", format="NCHW",)
  621. oshp = astensor1d(oshp, inp, dtype="int32", device=inp.device)
  622. (output,) = apply(op, inp, oshp)
  623. return output
  624. def adaptive_avg_pool2d(
  625. inp: Tensor, oshp: Union[Tuple[int, int], int, Tensor],
  626. ) -> Tensor:
  627. """
  628. Applies a 2D average adaptive pooling over an input.
  629. Refer to :class:`~.AvgAdaptivePool2d` for more information.
  630. :param inp: input tensor.
  631. :param oshp: `(OH, OW)` size of the output shape.
  632. :return: output tensor.
  633. """
  634. if isinstance(oshp, int):
  635. oshp = (oshp, oshp)
  636. op = builtin.AdaptivePooling(mode="average", format="NCHW",)
  637. oshp = astensor1d(oshp, inp, dtype="int32", device=inp.device)
  638. (output,) = apply(op, inp, oshp)
  639. return output
  640. def deformable_psroi_pooling(
  641. inp: Tensor,
  642. rois: Tensor,
  643. trans: Tensor,
  644. no_trans: bool,
  645. part_size: int,
  646. pooled_h: int,
  647. pooled_w: int,
  648. sample_per_part: int,
  649. spatial_scale: float,
  650. trans_std: float = 0.1,
  651. ):
  652. """
  653. Deformable PSROI(Position Sensitive Region of Interest) Pooling.
  654. :param inp: input feature map.
  655. :param rois: the rois for feature pooling.
  656. :param trans: input offset to psroi_pooling.
  657. :param no_trans: check the phase of DeformablePSROIPooling. False to the
  658. 1st phase, True to the 2nd phase.
  659. :param part_size: part size.
  660. :param sample_per_part: sample points of each part.
  661. :param pooled_shape: kernel shape of convolution.
  662. :param spatial_scale: the spatial_scale w.r.t input image.
  663. :param trans_std: multiplier used in 2nd phase.
  664. """
  665. op = builtin.DeformablePSROIPooling(
  666. no_trans=no_trans,
  667. part_size=part_size,
  668. pooled_h=pooled_h,
  669. pooled_w=pooled_w,
  670. sample_per_part=sample_per_part,
  671. spatial_scale=spatial_scale,
  672. trans_std=trans_std,
  673. )
  674. output, _ = apply(op, inp, rois, trans)
  675. return output
  676. def hswish(x):
  677. """
  678. Element-wise `x * relu6(x + 3) / 6`.
  679. :param x: input tensor.
  680. :return: computed tensor.
  681. Example:
  682. .. testcode::
  683. import numpy as np
  684. from megengine import tensor
  685. import megengine.functional as F
  686. x = tensor(np.arange(5).astype(np.float32))
  687. out = F.hswish(x)
  688. print(out.numpy().round(decimals=4))
  689. .. testoutput::
  690. [0. 0.6667 1.6667 3. 4. ]
  691. """
  692. return _elwise(x, mode=Elemwise.Mode.H_SWISH)
  693. def sigmoid(x):
  694. """Element-wise `1 / ( 1 + exp( -x ) )`."""
  695. return _elwise(x, mode=Elemwise.Mode.SIGMOID)
  696. def hsigmoid(x):
  697. """Element-wise `relu6(x + 3) / 6`."""
  698. return relu6(x + 3) / 6
  699. def relu(x):
  700. """Element-wise `max(x, 0)`."""
  701. return _elwise(x, mode=Elemwise.Mode.RELU)
  702. def relu6(x):
  703. """Element-wise `min(max(x, 0), 6)`."""
  704. return minimum(maximum(x, 0), 6)
  705. def prelu(inp: Tensor, weight: Tensor) -> Tensor:
  706. r"""
  707. Applies the element-wise PReLU function.
  708. Refer to :class:`~.PReLU` for more information.
  709. """
  710. return maximum(inp, 0) + weight * minimum(inp, 0)
  711. def leaky_relu(inp: Tensor, negative_slope: float = 0.01) -> Tensor:
  712. r"""
  713. Applies the element-wise leaky_relu function
  714. Refer to :class:`~.LeakyReLU` for more information.
  715. """
  716. return maximum(inp, 0) + negative_slope * minimum(inp, 0)
  717. def silu(x):
  718. r"""
  719. Applies the element-wise Sigmoid Linear Unit function, i.e. `x * sigmoid(x)`.
  720. """
  721. return _elwise(x, mode=Elemwise.Mode.SILU)
  722. def gelu(x):
  723. r"""
  724. Applies the element-wise function:
  725. .. math::
  726. \text{gelu}(x) = x\Phi(x)
  727. where :math:`\Phi(x)` is the Cumulative Distribution Function for Gaussian Distribution.
  728. """
  729. return _elwise(x, mode=Elemwise.Mode.GELU)
  730. def softplus(inp: Tensor) -> Tensor:
  731. r"""
  732. Applies the element-wise function:
  733. .. math::
  734. \text{softplus}(x) = \log(1 + \exp(x))
  735. softplus is a smooth approximation to the ReLU function and can be used
  736. to constrain the output to be always positive.
  737. For numerical stability the implementation follows this transformation:
  738. .. math::
  739. \text{softplus}(x) = \log(1 + \exp(x))
  740. = \log(1 + \exp(-\text{abs}(x))) + \max(x, 0)
  741. = \log1p(\exp(-\text{abs}(x))) + \text{relu}(x)
  742. :param inp: input tensor.
  743. Examples:
  744. .. testcode::
  745. import numpy as np
  746. from megengine import tensor
  747. import megengine.functional as F
  748. x = tensor(np.arange(-3, 3, dtype=np.float32))
  749. y = F.softplus(x)
  750. print(y.numpy().round(decimals=4))
  751. Outputs:
  752. .. testoutput::
  753. [0.0486 0.1269 0.3133 0.6931 1.3133 2.1269]
  754. """
  755. return log1p(exp(-abs(inp))) + relu(inp)
  756. def logsoftmax(inp: Tensor, axis: Union[int, Sequence[int]]) -> Tensor:
  757. r"""
  758. Applies the :math:`\log(\text{softmax}(x))` function to an n-dimensional
  759. input tensor. The :math:`\text{logsoftmax}(x)` formulation can be simplified as:
  760. .. math::
  761. \text{logsoftmax}(x_{i}) = \log(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} )
  762. For numerical stability the implementation follows this transformation:
  763. .. math::
  764. \text{logsoftmax}(x)
  765. = \log (\frac{\exp (x)}{\sum_{i}(\exp (x_{i}))})
  766. = x - \log (\sum_{i}(\exp (x_{i})))
  767. = x - \text{logsumexp}(x)
  768. :param inp: input tensor.
  769. :param axis: axis along which :math:`\text{logsoftmax}(x)` will be applied.
  770. Examples:
  771. .. testcode::
  772. import numpy as np
  773. from megengine import tensor
  774. import megengine.functional as F
  775. x = tensor(np.arange(-5, 5, dtype=np.float32)).reshape(2,5)
  776. y = F.logsoftmax(x, axis=1)
  777. print(y.numpy().round(decimals=4))
  778. Outputs:
  779. .. testoutput::
  780. [[-4.4519 -3.4519 -2.4519 -1.4519 -0.4519]
  781. [-4.4519 -3.4519 -2.4519 -1.4519 -0.4519]]
  782. """
  783. return inp - logsumexp(inp, axis, keepdims=True)
  784. def logsigmoid(inp: Tensor) -> Tensor:
  785. r"""
  786. Applies the element-wise function:
  787. .. math::
  788. \text{logsigmoid}(x) = \log(\frac{ 1 }{ 1 + \exp(-x)})
  789. = \log(1/(1 + \exp(-x)))
  790. = - \log(1 + \exp(-x))
  791. = - \text{softplus}(-x)
  792. :param inp: input tensor.
  793. Examples:
  794. .. testcode::
  795. import numpy as np
  796. from megengine import tensor
  797. import megengine.functional as F
  798. x = tensor(np.arange(-5, 5, dtype=np.float32))
  799. y = F.logsigmoid(x)
  800. print(y.numpy().round(decimals=4))
  801. Outputs:
  802. .. testoutput::
  803. [-5.0067 -4.0182 -3.0486 -2.1269 -1.3133 -0.6931 -0.3133 -0.1269 -0.0486
  804. -0.0181]
  805. """
  806. return -softplus(-inp)
  807. def logsumexp(
  808. inp: Tensor, axis: Union[int, Sequence[int]], keepdims: bool = False
  809. ) -> Tensor:
  810. r"""
  811. Calculates the logarithm of the inputs' exponential sum along the given :attr:`axis`.
  812. .. math::
  813. \text{logsumexp}(x)= \log \sum_{j=1}^{n} \exp \left(x_{j}\right)
  814. For numerical stability, the implementation follows this transformation:
  815. .. math::
  816. \text{logsumexp}(x)= \log \sum_{j=1}^{n} \exp \left(x_{j}\right)
  817. = \text{logsumexp}(x)=b+\log \sum_{j=1}^{n} \exp \left(x_{j}-b\right)
  818. where
  819. .. math::
  820. b = \max(x_j)
  821. :param inp: input tensor.
  822. :param axis: axis over which the sum is taken. It could be single axis or list of axes.
  823. :param keepdims: whether to retain :attr:`axis` or not for the output tensor.
  824. Examples:
  825. .. testcode::
  826. import numpy as np
  827. from megengine import tensor
  828. import megengine.functional as F
  829. x = tensor(np.arange(-5, 5, dtype=np.float32)).reshape(2,5)
  830. y = F.logsumexp(x, axis=1, keepdims=False)
  831. print(y.numpy().round(decimals=4))
  832. Outputs:
  833. .. testoutput::
  834. [-0.5481 4.4519]
  835. """
  836. max_value = max(inp.detach(), axis, keepdims=True)
  837. if keepdims:
  838. return max_value + log(sum(exp(inp - max_value), axis, keepdims))
  839. else:
  840. return squeeze(max_value, axis=None) + log(
  841. sum(exp(inp - max_value), axis, keepdims)
  842. )
  843. def _get_softmax_axis(ndim: int) -> int:
  844. if ndim in (0, 1, 3):
  845. return 0
  846. return 1
  847. def softmax(inp: Tensor, axis: Optional[int] = None) -> Tensor:
  848. r"""
  849. Applies a :math:`\text{softmax}(x)` function. :math:`\text{softmax}(x)` is defined as:
  850. .. math::
  851. \text{softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
  852. It is applied to all elements along axis, and rescales elements so that
  853. they stay in the range `[0, 1]` and sum to 1.
  854. See :class:`~megengine.module.activation.Softmax` for more details.
  855. :param inp: input tensor.
  856. :param axis: an axis along which :math:`\text{softmax}(x)` will be applied. By default,
  857. :math:`\text{softmax}(x)` will apply along the highest ranked axis.
  858. Examples:
  859. .. testcode::
  860. import numpy as np
  861. from megengine import tensor
  862. import megengine.functional as F
  863. x = tensor(np.arange(-5, 5, dtype=np.float32)).reshape(2,5)
  864. out = F.softmax(x)
  865. print(out.numpy().round(decimals=4))
  866. Outputs:
  867. .. testoutput::
  868. [[0.0117 0.0317 0.0861 0.2341 0.6364]
  869. [0.0117 0.0317 0.0861 0.2341 0.6364]]
  870. """
  871. if axis is None:
  872. axis = _get_softmax_axis(len(inp.shape))
  873. offset = inp.max(axis=axis, keepdims=True).detach()
  874. cached = exp(inp - offset)
  875. down = sum(cached, axis=axis, keepdims=True)
  876. return cached / down
  877. def batch_norm(
  878. inp: Tensor,
  879. running_mean: Tensor = None,
  880. running_var: Tensor = None,
  881. weight: Optional[Tensor] = None,
  882. bias: Optional[Tensor] = None,
  883. *,
  884. training: bool = False,
  885. momentum: float = 0.9,
  886. eps: float = 1e-5,
  887. inplace: bool = True,
  888. compute_mode="default"
  889. ):
  890. r"""
  891. Applies batch normalization to the input.
  892. Refer to :class:`~.BatchNorm2d` and :class:`~.BatchNorm1d` for more information.
  893. :param inp: input tensor.
  894. :param running_mean: tensor to store running mean.
  895. :param running_var: tensor to store running variance.
  896. :param weight: scaling tensor in the learnable affine parameters.
  897. See :math:`\gamma` in :class:`~.BatchNorm2d`.
  898. :param bias: bias tensor in the learnable affine parameters.
  899. See :math:`\beta` in :class:`~.BatchNorm2d`.
  900. :param training: a boolean value to indicate whether batch norm is performed
  901. in training mode. Default: False
  902. :param momentum: value used for the ``running_mean`` and ``running_var``
  903. computation.
  904. Default: 0.9
  905. :param eps: a value added to the denominator for numerical stability.
  906. Default: 1e-5
  907. :param inplace: whether to update ``running_mean`` and ``running_var`` inplace or return new tensors
  908. Default: True
  909. :return: output tensor.
  910. """
  911. if inp.ndim != 4:
  912. raise NotImplementedError("batch_norm for ndim != 4")
  913. C = inp.shape[1]
  914. def make_full_if_none(x, value):
  915. if x is None:
  916. (x,) = Const(value, dtype=inp.dtype, device=inp.device)()
  917. shape = astensor1d((1, C, 1, 1), inp, dtype="int32", device=inp.device)
  918. (result,) = apply(builtin.Broadcast(), x, shape)
  919. return result
  920. elif x.ndim == 1:
  921. shape = astensor1d((1, C, 1, 1), inp, dtype="int32", device=inp.device)
  922. (result,) = apply(builtin.Reshape(), x, shape)
  923. return result
  924. return x
  925. has_mean = running_mean is not None
  926. has_var = running_var is not None
  927. if not training:
  928. assert has_mean, "running_mean must be provided in inference mode"
  929. assert has_var, "running_var must be provided in inference mode"
  930. if has_mean and running_mean.ndim != 4:
  931. raise ValueError
  932. if has_var and running_var.ndim != 4:
  933. raise ValueError
  934. if amp._enabled:
  935. inp = inp.astype("float16")
  936. weight, bias, running_mean, running_var = cast_tensors(
  937. weight, bias, running_mean, running_var, promote=True
  938. )
  939. weight = make_full_if_none(weight, 1)
  940. bias = make_full_if_none(bias, 0)
  941. if not training:
  942. op = builtin.BatchNorm(
  943. fwd_mode=BatchNorm.FwdMode.INFERENCE, epsilon=eps, param_dim="dim_1c11"
  944. )
  945. ret = apply(op, inp, weight, bias, running_mean, running_var)[-1]
  946. return ret
  947. else:
  948. op = builtin.BatchNorm(
  949. avg_factor=1 - momentum, epsilon=eps, param_dim="dim_1c11"
  950. )
  951. if has_mean or has_var:
  952. running_mean = make_full_if_none(running_mean, 0)
  953. running_var = make_full_if_none(running_var, 1)
  954. new_mean, new_var, _, _, inp = apply(
  955. op, inp, weight, bias, running_mean, running_var
  956. )
  957. if not has_mean:
  958. new_mean = None
  959. if not has_var:
  960. new_var = None
  961. if inplace:
  962. if has_mean:
  963. running_mean[...] = new_mean
  964. if has_var:
  965. running_var[...] = new_var
  966. return inp
  967. else:
  968. return inp, new_mean, new_var
  969. else:
  970. (_, _, inp,) = apply(op, inp, weight, bias)
  971. return inp
  972. @lru_cache(maxsize=None)
  973. def _get_sync_bn_ops(device, dtype, eps_mode, ndim, channels):
  974. # fmt: off
  975. @subgraph("SyncBnStage0", dtype, device, 1)
  976. def syncbn_stage0(inputs, f, c):
  977. input = inputs[0]
  978. reduce_shape = c((1, channels) + (1,) * (ndim - 2), dtype="int32", device=device)
  979. input_shape = f(GetVarShape(), input)
  980. input_elems = f(Reduce(mode="product", axis=0), input_shape)
  981. reduce_elems = f(Reduce(mode="product", axis=0), reduce_shape)
  982. reduce_size = f("//", input_elems, reduce_elems)
  983. channel_x1s = f(Reduce(mode="sum"), input, reduce_shape)
  984. channel_x2s = f(Reduce(mode="sum_sqr"), input, reduce_shape)
  985. reduce_size_f = f(TypeCvt(dtype=dtype), reduce_size)
  986. return (reduce_shape, reduce_size_f, channel_x1s, channel_x2s), (False, False, True, True)
  987. @subgraph("SyncBnStage1", dtype, device, 7)
  988. def syncbn_stage1(inputs, f, c):
  989. input, reduce_size, channel_x1s, channel_x2s, eps = inputs[0:5]
  990. weight, bias = inputs[5:7]
  991. channel_mean = f("/", channel_x1s, reduce_size)
  992. channel_var =\
  993. f("+", f("/", f("**", channel_x1s, c(2)),
  994. f("-", f("*", reduce_size, reduce_size))),
  995. f("/", channel_x2s, reduce_size))
  996. invsqrt_channel_var = f("**", f(eps_mode, channel_var, eps), c(-0.5))
  997. inv_var_wt = f("*", invsqrt_channel_var, weight)
  998. neg_channel_mean = f("-", channel_mean)
  999. outvar =\
  1000. f("fma3", input, inv_var_wt,
  1001. f("+", f("*", neg_channel_mean, inv_var_wt),
  1002. bias))
  1003. return (outvar, channel_mean, channel_var, inv_var_wt), (True, False, False, False)
  1004. @subgraph("SyncBnStage1Inference", dtype, device, 6)
  1005. def syncbn_stage1_inference(inputs, f, c):
  1006. input, channel_mean, channel_var, eps = inputs[0:4]
  1007. weight, bias = inputs[4:6]
  1008. invsqrt_channel_var = f("**", f(eps_mode, channel_var, eps), c(-0.5))
  1009. inv_var_wt = f("*", invsqrt_channel_var, weight)
  1010. neg_channel_mean = f("-", channel_mean)
  1011. outvar =\
  1012. f("+", f("*", input, inv_var_wt),
  1013. f("+", f("*", neg_channel_mean, inv_var_wt),
  1014. bias))
  1015. return (outvar,), (True,)
  1016. @subgraph("SyncBnStage2", dtype, device, 7)
  1017. def syncbn_stage2(inputs, f, c):
  1018. running_mean, running_var, momentum = inputs[0:3]
  1019. reduce_size, channel_x1s, channel_x2s, channel_mean = inputs[3:7]
  1020. c1_minus_momentum = f("-", c(1), momentum)
  1021. reduce_size_minus_c1 = f("-", reduce_size, c(1))
  1022. running_mean = f("fma4",
  1023. running_mean, momentum,
  1024. c1_minus_momentum, channel_mean,
  1025. )
  1026. channel_variance_unbiased =\
  1027. f("+", f("/", f("**", channel_x1s, c(2)),
  1028. f("*", f("-", reduce_size),
  1029. reduce_size_minus_c1)),
  1030. f("/", channel_x2s,
  1031. reduce_size_minus_c1))
  1032. running_var = f("fma4",
  1033. running_var, momentum,
  1034. c1_minus_momentum, channel_variance_unbiased
  1035. )
  1036. return (running_mean, running_var), (True, True)
  1037. @subgraph("SyncBnConcatStats", dtype, device, 3)
  1038. def syncbn_concat_stats(inputs, f, c):
  1039. reduce_size, channel_x1s, channel_x2s = inputs[0:3]
  1040. reduce_size = f(builtin.Broadcast(), reduce_size, c([1]*ndim, dtype="int32"))
  1041. stats = f(builtin.Concat(axis=1, comp_node=device), reduce_size, channel_x1s, channel_x2s)
  1042. return (stats,), (True,)
  1043. @subgraph("SyncBnSplitStats", dtype, device, 1)
  1044. def syncbn_split_stats(inputs, f, c):
  1045. stats = inputs[0]
  1046. c_1 = c(1, dtype="int32")
  1047. channel_x1s_end = c(channels+1, dtype="int32")
  1048. def _subtensor(src, axis, begin, end):
  1049. items = (axis, (begin is not None), (end is not None), False, False),
  1050. args = ()
  1051. if begin is not None:
  1052. args += begin,
  1053. if end is not None:
  1054. args += end,
  1055. return f(builtin.Subtensor(items=items), src, *args)
  1056. reduce_size = _subtensor(stats, 1, None, c_1)
  1057. channel_x1s = _subtensor(stats, 1, c_1, channel_x1s_end)
  1058. channel_x2s = _subtensor(stats, 1, channel_x1s_end, None)
  1059. reduce_size = f(builtin.Reshape(), reduce_size, c_1)
  1060. return (reduce_size, channel_x1s, channel_x2s), (False, True, True)
  1061. # fmt: on
  1062. return (
  1063. syncbn_stage0,
  1064. syncbn_stage1,
  1065. syncbn_stage1_inference,
  1066. syncbn_stage2,
  1067. syncbn_concat_stats,
  1068. syncbn_split_stats,
  1069. )
  1070. def sync_batch_norm(
  1071. inp: Tensor,
  1072. running_mean: Tensor,
  1073. running_var: Tensor,
  1074. weight: Optional[Tensor] = None,
  1075. bias: Optional[Tensor] = None,
  1076. training: bool = False,
  1077. momentum: Union[float, Tensor] = 0.9,
  1078. eps: float = 1e-5,
  1079. eps_mode="additive",
  1080. group=WORLD,
  1081. ) -> Tensor:
  1082. r"""
  1083. Applies synchronized batch normalization to the input.
  1084. Refer to :class:`~.BatchNorm2d` and :class:`~.BatchNorm1d` for more information.
  1085. :param inp: input tensor.
  1086. :param running_mean: tensor to store running mean.
  1087. :param running_var: tensor to store running variance.
  1088. :param weight: scaling tensor in the learnable affine parameters.
  1089. See :math:`\gamma` in :class:`~.BatchNorm2d`.
  1090. :param bias: bias tensor in the learnable affine parameters.
  1091. See :math:`\beta` in :class:`~.BatchNorm2d`.
  1092. :param training: a boolean value to indicate whether batch norm is performed
  1093. in traning mode. Default: False
  1094. :param momentum: value used for the ``running_mean`` and ``running_var``
  1095. computation.
  1096. Default: 0.9
  1097. :param eps: a value added to the denominator for numerical stability.
  1098. Default: 1e-5
  1099. :param eps_mode: mode of calculation for eps, "max" or "additive".
  1100. Default: "additive"
  1101. :param group: communication group, caculate mean and variance between this group.
  1102. Default: :obj:`~megengine.distributed.WORLD`
  1103. :return: output tensor.
  1104. """
  1105. assert eps_mode.lower() in {"max", "additive"}, "unknown eps_mode: {}".format(
  1106. eps_mode
  1107. )
  1108. # TODO: cudnnBn fastpath
  1109. _channels = make_shape_tuple(inp.shape)[1]
  1110. _ndim = inp.ndim
  1111. _device = inp.device
  1112. _dtype = inp.dtype
  1113. def _make_full_if_none(x, value):
  1114. if x is None:
  1115. (x,) = Const(value, dtype=inp.dtype, device=_device)()
  1116. (result,) = apply(builtin.Broadcast(), x, reduce_shape)
  1117. return result
  1118. elif x.ndim == 1:
  1119. (result,) = apply(builtin.Reshape(), x, reduce_shape)
  1120. return result
  1121. return x
  1122. (
  1123. syncbn_stage0,
  1124. syncbn_stage1,
  1125. syncbn_stage1_inference,
  1126. syncbn_stage2,
  1127. syncbn_concat_stats,
  1128. syncbn_split_stats,
  1129. ) = _get_sync_bn_ops(_device, _dtype, eps_mode, _ndim, _channels)
  1130. reduce_shape, reduce_size, channel_x1s, channel_x2s = apply(syncbn_stage0(), inp)
  1131. eps = convert_single_value(eps, dtype=inp.dtype, device=inp.device)
  1132. weight = _make_full_if_none(weight, 1)
  1133. bias = _make_full_if_none(bias, 0)
  1134. if training:
  1135. if is_distributed():
  1136. # reduce all nodes' data to calculate mean and variance
  1137. (stat,) = apply(
  1138. syncbn_concat_stats(), reduce_size, channel_x1s, channel_x2s
  1139. )
  1140. stat = all_reduce_sum(stat, group)
  1141. reduce_size, channel_x1s, channel_x2s = apply(syncbn_split_stats(), stat)
  1142. outvar, channel_mean, *_ = apply(
  1143. syncbn_stage1(),
  1144. inp,
  1145. reduce_size,
  1146. channel_x1s,
  1147. channel_x2s,
  1148. eps,
  1149. weight,
  1150. bias,
  1151. )
  1152. else:
  1153. assert running_var is not None and running_mean is not None
  1154. channel_mean = running_mean
  1155. channel_var = running_var
  1156. outvar, *_ = apply(
  1157. syncbn_stage1_inference(), inp, channel_mean, channel_var, eps, weight, bias
  1158. )
  1159. # outvar = output * weight + bias
  1160. # where output = inp * invsqrt_channel_variance + (
  1161. # -channel_mean * invsqrt_channel_variance
  1162. # )
  1163. # Manually expand output for gopt
  1164. if training and running_var is not None and running_mean is not None:
  1165. momentum = convert_single_value(momentum, dtype=inp.dtype, device=inp.device)
  1166. running_mean[...], running_var[...] = apply(
  1167. syncbn_stage2(),
  1168. running_mean,
  1169. running_var,
  1170. momentum,
  1171. reduce_size,
  1172. channel_x1s,
  1173. channel_x2s,
  1174. channel_mean,
  1175. )
  1176. return outvar
  1177. def dropout(inp: Tensor, drop_prob: float, training: bool = True) -> Tensor:
  1178. """
  1179. Returns a new tensor where each of the elements are randomly set to zero
  1180. with probability P = ``drop_prob``. Optionally rescale the output tensor if ``training`` is True.
  1181. :param inp: input tensor.
  1182. :param drop_prob: probability to drop (set to zero) a single element.
  1183. :param training: the default behavior of ``dropout`` during training is to rescale the output,
  1184. then it can be replaced by an :class:`~.Identity` during inference. Default: True
  1185. :return: the output tensor
  1186. Examples:
  1187. .. testcode::
  1188. import numpy as np
  1189. from megengine import tensor
  1190. import megengine.functional as F
  1191. # test training mode
  1192. data = tensor(np.ones(10000000, dtype=np.float32))
  1193. out = F.nn.dropout(data, 1.0 / 3.0, training=True)
  1194. assert not out.numpy().all()
  1195. # test eval mode
  1196. out = F.nn.dropout(data, 1.0 / 3.0, training=False)
  1197. assert out.numpy().all()
  1198. Outputs:
  1199. .. testoutput::
  1200. :options: +SKIP
  1201. [1.5 1.5 0. 1.5 1.5 1.5 1.5 1.5 1.5 1.5]
  1202. """
  1203. assert 0 <= drop_prob < 1
  1204. if not training or drop_prob == 0:
  1205. return inp
  1206. # model in training mode, e.g. model.train()
  1207. rv = uniform(size=inp.shape)
  1208. mask = rv > drop_prob
  1209. ret = inp * mask.astype(inp.dtype)
  1210. ret *= 1 / (1 - drop_prob)
  1211. return ret
  1212. def one_hot(inp: Tensor, num_classes: int) -> Tensor:
  1213. r"""
  1214. Performs one-hot encoding for the input tensor.
  1215. :param inp: input tensor.
  1216. :param num_classes: number of classes denotes the last dimension of the output tensor.
  1217. :return: output tensor.
  1218. Examples:
  1219. .. testcode::
  1220. import numpy as np
  1221. from megengine import tensor
  1222. import megengine.functional as F
  1223. x = tensor(np.arange(1, 4, dtype=np.int32))
  1224. out = F.one_hot(x, num_classes=4)
  1225. print(out.numpy())
  1226. Outputs:
  1227. .. testoutput::
  1228. [[0 1 0 0]
  1229. [0 0 1 0]
  1230. [0 0 0 1]]
  1231. """
  1232. zeros_tensor = zeros(list(inp.shape) + [num_classes], inp.dtype, inp.device)
  1233. ones_tensor = ones(list(inp.shape) + [1], inp.dtype, inp.device)
  1234. op = builtin.IndexingSetOneHot(axis=inp.ndim)
  1235. (result,) = apply(op, zeros_tensor, inp, ones_tensor)
  1236. return result
  1237. def embedding(
  1238. inp: Tensor,
  1239. weight: Tensor,
  1240. padding_idx: Optional[int] = None,
  1241. max_norm: Optional[float] = None,
  1242. norm_type: Optional[float] = None,
  1243. ):
  1244. """
  1245. Applies lookup table for embedding.
  1246. :param inp: tensor with indices.
  1247. :param weight: learnable weights which embeds from.
  1248. :param padding_idx: should be set to None, not supported now.
  1249. :param max_norm: should be set to None, not supported now.
  1250. :param norm_type: should be set to None, not supported now.
  1251. :return: output tensor.
  1252. Refer to :class:`~.Embedding` for more information.
  1253. """
  1254. if padding_idx is not None:
  1255. raise ValueError("Not support padding_idx Now!")
  1256. if max_norm is not None or norm_type is not None:
  1257. raise ValueError("Not support weight normlization Now!")
  1258. dest_shp = list(inp.shape) + [weight.shape[-1]]
  1259. return weight[inp.reshape(-1)].reshape(dest_shp)
  1260. def indexing_one_hot(
  1261. src: Tensor, index: Tensor, axis: int = 1, keepdims=False
  1262. ) -> Tensor:
  1263. r"""
  1264. One-hot indexing for some axes.
  1265. :param src: input tensor.
  1266. :param index: index tensor.
  1267. :param axis: axis on src for which values in index index. Default: 1
  1268. :param keepdims: whether not to remove the axis in result. Default: False
  1269. :return: output tensor.
  1270. Examples:
  1271. .. testcode::
  1272. import megengine.functional as F
  1273. from megengine import tensor
  1274. src = tensor([[1.0, 2.0]])
  1275. index = tensor([0])
  1276. val = F.indexing_one_hot(src, index)
  1277. print(val.numpy())
  1278. Outputs:
  1279. .. testoutput::
  1280. [1.]
  1281. """
  1282. assert isinstance(src, Tensor), "src must be of Tensor type"
  1283. op = builtin.IndexingOneHot(axis=axis)
  1284. index = convert_single_value(index, dtype="int32", device=src.device)
  1285. (result,) = apply(op, src, index)
  1286. if not keepdims:
  1287. result = squeeze(result, axis)
  1288. return result
  1289. def sliding_window(
  1290. inp: Tensor,
  1291. kernel_size: Union[int, Tuple[int, int]],
  1292. padding: Union[int, Tuple[int, int]] = 0,
  1293. stride: Union[int, Tuple[int, int]] = 1,
  1294. dilation: Union[int, Tuple[int, int]] = 1,
  1295. ) -> Tensor:
  1296. """
  1297. Extracts sliding local blocks from a batched input tensor.
  1298. Refer to :class:`~.SlidingWindow` for more information.
  1299. :param inp: input tensor.
  1300. :param kernel_size: size of the window.
  1301. :param padding: implicit zero padding added on both sides of input. Default: 0
  1302. :param stride: stride of the window. Default: 1
  1303. :param dilation: dilation of the window. Default: 1
  1304. :return: output tensor.
  1305. """
  1306. padding_h, padding_w = _pair(padding)
  1307. stride_h, stride_w = _pair_nonzero(stride)
  1308. dilation_h, dilation_w = _pair_nonzero(dilation)
  1309. window_h, window_w = _pair_nonzero(kernel_size)
  1310. op = builtin.Images2Neibs(
  1311. pad_h=padding_h,
  1312. pad_w=padding_w,
  1313. stride_h=stride_h,
  1314. stride_w=stride_w,
  1315. dilate_h=dilation_h,
  1316. dilate_w=dilation_w,
  1317. window_h=window_h,
  1318. window_w=window_w,
  1319. )
  1320. (output,) = apply(op, inp)
  1321. return output
  1322. def sliding_window_transpose(
  1323. inp: Tensor,
  1324. output_size: Union[int, Tuple[int, int]],
  1325. kernel_size: Union[int, Tuple[int, int]],
  1326. padding: Union[int, Tuple[int, int]] = 0,
  1327. stride: Union[int, Tuple[int, int]] = 1,
  1328. dilation: Union[int, Tuple[int, int]] = 1,
  1329. ) -> Tensor:
  1330. """
  1331. Sum over the sliding windows on the corresponding input location.
  1332. Refer to :class:`~.SlidingWindowTranspose` for more information.
  1333. :param inp: input tensor.
  1334. :param output_size: shape of output tensor.
  1335. :param kernel_size: size of the window.
  1336. :param padding: implicit zero padding added on both sides of input. Default: 0
  1337. :param stride: stride of the window. Default: 1
  1338. :param dilation: dilation of the window. Default: 1
  1339. :return: output tensor.
  1340. """
  1341. output_h, output_w = _pair_nonzero(output_size)
  1342. padding_h, padding_w = _pair(padding)
  1343. stride_h, stride_w = _pair_nonzero(stride)
  1344. dilation_h, dilation_w = _pair_nonzero(dilation)
  1345. window_h, window_w = _pair_nonzero(kernel_size)
  1346. expected_h = (
  1347. output_h + 2 * padding_h - dilation_h * (window_h - 1) - 1
  1348. ) // stride_h + 1
  1349. expected_w = (
  1350. output_w + 2 * padding_w - dilation_w * (window_w - 1) - 1
  1351. ) // stride_w + 1
  1352. assert inp.ndim == 6, "the input dimension of sliding_window_transpose should be 6"
  1353. assert (
  1354. inp.shape[2] == expected_h and inp.shape[3] == expected_w
  1355. ), "the input shape and output size do not match"
  1356. op = builtin.SlidingWindowTranspose(
  1357. out_h=output_h,
  1358. out_w=output_w,
  1359. pad_h=padding_h,
  1360. pad_w=padding_w,
  1361. stride_h=stride_h,
  1362. stride_w=stride_w,
  1363. dilate_h=dilation_h,
  1364. dilate_w=dilation_w,
  1365. window_h=window_h,
  1366. window_w=window_w,
  1367. )
  1368. (output,) = apply(op, inp)
  1369. return output
  1370. interpolate = deprecated_func("1.3", "megengine.functional.vision", "interpolate", True)
  1371. roi_pooling = deprecated_func("1.3", "megengine.functional.vision", "roi_pooling", True)
  1372. roi_align = deprecated_func("1.3", "megengine.functional.vision", "roi_align", True)
  1373. nms = deprecated_func("1.3", "megengine.functional.vision", "nms", True)
  1374. resize = deprecated_func("1.3", "megengine.functional.vision", "resize", True)
  1375. remap = deprecated_func("1.3", "megengine.functional.vision", "remap", True)
  1376. nvof = deprecated_func("1.3", "megengine.functional.vision", "nvof", True)
  1377. warp_affine = deprecated_func("1.3", "megengine.functional.vision", "warp_affine", True)
  1378. warp_perspective = deprecated_func(
  1379. "1.3", "megengine.functional.vision", "warp_perspective", True
  1380. )
  1381. from .quantized import conv_bias_activation # isort:skip
  1382. from .loss import * # isort:skip

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台