You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

nn.py 47 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. # pylint: disable=too-many-lines
  10. from typing import Optional, Sequence, Tuple, Union
  11. from ..core._imperative_rt.core2 import apply
  12. from ..core._imperative_rt.graph import VarNode
  13. from ..core._trace_option import use_symbolic_shape
  14. from ..core.ops import builtin
  15. from ..core.ops.builtin import BatchNorm, Elemwise
  16. from ..core.ops.special import Const
  17. from ..core.tensor import amp, megbrain_graph
  18. from ..core.tensor.array_method import _elwise_apply
  19. from ..core.tensor.utils import (
  20. astensor1d,
  21. astype,
  22. cast_tensors,
  23. convert_inputs,
  24. convert_single_value,
  25. setscalar,
  26. )
  27. from ..device import get_default_device
  28. from ..distributed import WORLD, is_distributed
  29. from ..random import uniform
  30. from ..tensor import Tensor
  31. from ..utils.deprecation import deprecated_func
  32. from ..utils.tuple_function import _pair, _pair_nonzero, _triple, _triple_nonzero
  33. from .debug_param import get_execution_strategy
  34. from .distributed import all_reduce_sum
  35. from .elemwise import _elwise, exp, floor, log, log1p, maximum, minimum
  36. from .math import argsort, matmul, max, prod, sum
  37. from .tensor import (
  38. broadcast_to,
  39. concat,
  40. expand_dims,
  41. full,
  42. ones,
  43. reshape,
  44. squeeze,
  45. zeros,
  46. )
  47. __all__ = [
  48. "adaptive_avg_pool2d",
  49. "adaptive_max_pool2d",
  50. "avg_pool2d",
  51. "batch_norm",
  52. "conv1d",
  53. "conv2d",
  54. "conv3d",
  55. "conv_transpose2d",
  56. "conv_transpose3d",
  57. "deformable_conv2d",
  58. "deformable_psroi_pooling",
  59. "dropout",
  60. "embedding",
  61. "hsigmoid",
  62. "hswish",
  63. "indexing_one_hot",
  64. "leaky_relu",
  65. "linear",
  66. "local_conv2d",
  67. "logsigmoid",
  68. "logsumexp",
  69. "logsoftmax",
  70. "max_pool2d",
  71. "one_hot",
  72. "prelu",
  73. "relu",
  74. "relu6",
  75. "remap",
  76. "resize",
  77. "sigmoid",
  78. "sliding_window",
  79. "sliding_window_transpose",
  80. "softmax",
  81. "softplus",
  82. "sync_batch_norm",
  83. "warp_affine",
  84. "warp_perspective",
  85. ]
  86. def expand_hw(x):
  87. # NOTE: >1d array is accepted, as long as 1 <= size <= 2
  88. try:
  89. x = int(x)
  90. return [x, x]
  91. except (TypeError, ValueError):
  92. pass
  93. h, w = x
  94. return int(h), int(w)
  95. def linear(
  96. inp: Tensor, weight: Tensor, bias: Optional[Tensor] = None, compute_mode="default",
  97. ) -> Tensor:
  98. """
  99. Applies a linear transformation to the input tensor.
  100. Refer to :class:`~.module.linear.Linear` for more information.
  101. :param inp: input tensor with shape `(N, in_features)`.
  102. :param weight: weight with shape `(out_features, in_features)`.
  103. :param bias: bias with shape `(out_features,)`.
  104. Default: None
  105. """
  106. ret = matmul(inp, weight, transpose_b=True, compute_mode=compute_mode)
  107. if bias is not None:
  108. if amp._enabled:
  109. bias = bias.astype("float16")
  110. ret += bias
  111. return ret
  112. def conv1d(
  113. inp: Tensor,
  114. weight: Tensor,
  115. bias: Optional[Tensor] = None,
  116. stride: int = 1,
  117. padding: int = 0,
  118. dilation: int = 1,
  119. groups: int = 1,
  120. conv_mode="cross_correlation",
  121. compute_mode="default",
  122. ) -> Tensor:
  123. """1D convolution operation.
  124. Refer to :class:`~.Conv1d` for more information.
  125. :param inp: The feature map of the convolution operation
  126. :param weight: The convolution kernel.
  127. :param bias: The bias added to the result of convolution (if given)
  128. :param stride: Stride of the 1D convolution operation. Default: 1
  129. :param padding: Size of the paddings added to the input on both sides of its
  130. spatial dimensions. Only zero-padding is supported. Default: 0
  131. :param dilation: Dilation of the 1D convolution operation. Default: 1
  132. :param groups: number of groups to divide input and output channels into,
  133. so as to perform a "grouped convolution". When ``groups`` is not 1,
  134. ``in_channels`` and ``out_channels`` must be divisible by ``groups``,
  135. and the shape of weight should be ``(groups, out_channel // groups,
  136. in_channels // groups, kernel_size)``. Default: 1
  137. :type conv_mode: string or :class:`mgb.opr_param_defs.Convolution.Mode`
  138. :param conv_mode: Supports 'cross_correlation'. Default:
  139. 'cross_correlation'.
  140. :type compute_mode: string or
  141. :class:`mgb.opr_param_defs.Convolution.ComputeMode`
  142. :param compute_mode: When set to 'default', no special requirements will be
  143. placed on the precision of intermediate results. When set to 'float32',
  144. float32 would be used for accumulator and intermediate result, but only
  145. effective when input and output are of float16 dtype.
  146. """
  147. assert (
  148. conv_mode.lower() == "cross_correlation"
  149. or conv_mode.name == "CROSS_CORRELATION"
  150. )
  151. assert compute_mode.lower() == "default" or compute_mode.name == "DEFAULT"
  152. assert inp.ndim == 3, "the input dimension of conv1d should be 3"
  153. assert weight.ndim == 3, "the weight dimension of conv1d should be 3"
  154. if amp._enabled:
  155. compute_mode = "float32"
  156. inp, weight, bias = cast_tensors(inp, weight, bias)
  157. else:
  158. inp, weight = convert_inputs(inp, weight)
  159. inp = expand_dims(inp, 3)
  160. weight = expand_dims(weight, 3)
  161. if bias is not None:
  162. assert bias.ndim == 3, "the bias dimension of conv1d should be 3"
  163. bias = expand_dims(bias, 3)
  164. stride_h = stride
  165. pad_h = padding
  166. dilate_h = dilation
  167. sparse_type = "dense" if groups == 1 else "group"
  168. op = builtin.Convolution(
  169. stride_h=stride_h,
  170. stride_w=1,
  171. pad_h=pad_h,
  172. pad_w=0,
  173. dilate_h=dilate_h,
  174. dilate_w=1,
  175. strategy=get_execution_strategy(),
  176. mode=conv_mode,
  177. compute_mode=compute_mode,
  178. sparse=sparse_type,
  179. )
  180. (output,) = apply(op, inp, weight)
  181. if bias is not None:
  182. output += bias
  183. output = squeeze(output, 3)
  184. return output
  185. def conv2d(
  186. inp: Tensor,
  187. weight: Tensor,
  188. bias: Optional[Tensor] = None,
  189. stride: Union[int, Tuple[int, int]] = 1,
  190. padding: Union[int, Tuple[int, int]] = 0,
  191. dilation: Union[int, Tuple[int, int]] = 1,
  192. groups: int = 1,
  193. conv_mode="cross_correlation",
  194. compute_mode="default",
  195. ) -> Tensor:
  196. """
  197. 2D convolution operation.
  198. Refer to :class:`~.module.Conv2d` for more information.
  199. :param inp: feature map of the convolution operation.
  200. :param weight: convolution kernel.
  201. :param bias: bias added to the result of convolution (if given).
  202. :param stride: stride of the 2D convolution operation. Default: 1
  203. :param padding: size of the paddings added to the input on both sides of its
  204. spatial dimensions. Only zero-padding is supported. Default: 0
  205. :param dilation: dilation of the 2D convolution operation. Default: 1
  206. :param groups: number of groups into which the input and output channels are divided,
  207. so as to perform a ``grouped convolution``. When ``groups`` is not 1,
  208. ``in_channels`` and ``out_channels`` must be divisible by ``groups``,
  209. and the shape of weight should be ``(groups, out_channel // groups,
  210. in_channels // groups, height, width)``. Default: 1
  211. :type conv_mode: string or :class:`Convolution.Mode`
  212. :param conv_mode: supports "cross_correlation". Default:
  213. "cross_correlation"
  214. :type compute_mode: string or
  215. :class:`Convolution.ComputeMode`
  216. :param compute_mode: when set to "default", no special requirements will be
  217. placed on the precision of intermediate results. When set to "float32",
  218. "float32" would be used for accumulator and intermediate result, but only
  219. effective when input and output are of float16 dtype.
  220. :return: output tensor.
  221. """
  222. assert (
  223. conv_mode.lower() == "cross_correlation"
  224. or conv_mode.name == "CROSS_CORRELATION"
  225. )
  226. if amp._enabled:
  227. compute_mode = "float32"
  228. inp, weight, bias = cast_tensors(inp, weight, bias)
  229. else:
  230. inp, weight = convert_inputs(inp, weight)
  231. stride_h, stride_w = expand_hw(stride)
  232. pad_h, pad_w = expand_hw(padding)
  233. dilate_h, dilate_w = expand_hw(dilation)
  234. sparse_type = "dense" if groups == 1 else "group"
  235. op = builtin.Convolution(
  236. stride_h=stride_h,
  237. stride_w=stride_w,
  238. pad_h=pad_h,
  239. pad_w=pad_w,
  240. dilate_h=dilate_h,
  241. dilate_w=dilate_w,
  242. strategy=get_execution_strategy(),
  243. mode=conv_mode,
  244. compute_mode=compute_mode,
  245. sparse=sparse_type,
  246. )
  247. (output,) = apply(op, inp, weight)
  248. if bias is not None:
  249. output += bias
  250. return output
  251. def conv3d(
  252. inp: Tensor,
  253. weight: Tensor,
  254. bias: Optional[Tensor] = None,
  255. stride: Union[int, Tuple[int, int, int]] = 1,
  256. padding: Union[int, Tuple[int, int, int]] = 0,
  257. dilation: Union[int, Tuple[int, int, int]] = 1,
  258. groups: int = 1,
  259. conv_mode: str = "cross_correlation",
  260. ) -> Tensor:
  261. """
  262. 3D convolution operation.
  263. Refer to :class:`~.Conv3d` for more information.
  264. :param inp: feature map of the convolution operation.
  265. :param weight: convolution kernel.
  266. :param bias: bias added to the result of convolution (if given).
  267. :param stride: stride of the 3D convolution operation. Default: 1
  268. :param padding: size of the paddings added to the input on both sides of its
  269. spatial dimensions. Only zero-padding is supported. Default: 0
  270. :param dilation: dilation of the 3D convolution operation. Default: 1
  271. :param groups: number of groups into which the input and output channels are divided,
  272. so as to perform a ``grouped convolution``. When ``groups`` is not 1,
  273. ``in_channels`` and ``out_channels`` must be divisible by ``groups``,
  274. and the shape of weight should be ``(groups, out_channel // groups,
  275. in_channels // groups, depth, height, width)``. Default: 1
  276. :param conv_mode: supports "cross_correlation". Default:
  277. "cross_correlation"
  278. :return: output tensor.
  279. """
  280. assert conv_mode.lower() == "cross_correlation"
  281. inp, weight = convert_inputs(inp, weight)
  282. D, H, W = 0, 1, 2
  283. pad = _triple(padding)
  284. stride = _triple_nonzero(stride)
  285. dilate = _triple_nonzero(dilation)
  286. sparse_type = "dense" if groups == 1 else "group"
  287. op = builtin.Convolution3D(
  288. pad_d=pad[D],
  289. pad_h=pad[H],
  290. pad_w=pad[W],
  291. stride_d=stride[D],
  292. stride_h=stride[H],
  293. stride_w=stride[W],
  294. dilate_d=dilate[D],
  295. dilate_h=dilate[H],
  296. dilate_w=dilate[W],
  297. strategy=get_execution_strategy(),
  298. mode=conv_mode,
  299. sparse=sparse_type,
  300. )
  301. (output,) = apply(op, inp, weight)
  302. if bias is not None:
  303. output += bias
  304. return output
  305. def conv_transpose2d(
  306. inp: Tensor,
  307. weight: Tensor,
  308. bias: Optional[Tensor] = None,
  309. stride: Union[int, Tuple[int, int]] = 1,
  310. padding: Union[int, Tuple[int, int]] = 0,
  311. dilation: Union[int, Tuple[int, int]] = 1,
  312. groups: int = 1,
  313. conv_mode="cross_correlation",
  314. compute_mode="default",
  315. ) -> Tensor:
  316. """
  317. 2D transposed convolution operation.
  318. Refer to :class:`~.ConvTranspose2d` for more information.
  319. :param inp: feature map of the convolution operation.
  320. :param weight: convolution kernel.
  321. :param bias: bias added to the result of convolution (if given).
  322. :param stride: stride of the 2D convolution operation. Default: 1
  323. :param padding: size of the paddings added to the input on both sides of its
  324. spatial dimensions. Only zero-padding is supported. Default: 0
  325. :param dilation: dilation of the 2D convolution operation. Default: 1
  326. :param groups: number of groups into which the input and output channels are divided,
  327. so as to perform a ``grouped convolution``. When ``groups`` is not 1,
  328. ``in_channels`` and ``out_channels`` must be divisible by groups,
  329. and the shape of weight should be ``(groups, in_channels // groups,
  330. out_channels // groups, height, width)``. Default: 1
  331. :type conv_mode: string or :class:`Convolution.Mode`
  332. :param conv_mode: supports "cross_correlation". Default:
  333. "cross_correlation"
  334. :type compute_mode: string or
  335. :class:`Convolution.ComputeMode`
  336. :param compute_mode: when set to "default", no special requirements will be
  337. placed on the precision of intermediate results. When set to "float32",
  338. "float32" would be used for accumulator and intermediate result, but only
  339. effective when input and output are of float16 dtype.
  340. :return: output tensor.
  341. """
  342. assert (
  343. conv_mode.lower() == "cross_correlation"
  344. or conv_mode.name == "CROSS_CORRELATION"
  345. )
  346. if amp._enabled:
  347. compute_mode = "float32"
  348. inp, weight, bias = cast_tensors(inp, weight, bias)
  349. else:
  350. inp, weight = convert_inputs(inp, weight)
  351. if groups != 1:
  352. raise NotImplementedError("group transposed conv2d is not supported yet.")
  353. stride_h, stride_w = expand_hw(stride)
  354. pad_h, pad_w = expand_hw(padding)
  355. dilate_h, dilate_w = expand_hw(dilation)
  356. op = builtin.ConvolutionBackwardData(
  357. stride_h=stride_h,
  358. stride_w=stride_w,
  359. pad_h=pad_h,
  360. pad_w=pad_w,
  361. dilate_h=dilate_h,
  362. dilate_w=dilate_w,
  363. strategy=get_execution_strategy(),
  364. compute_mode=compute_mode,
  365. )
  366. (output,) = apply(op, weight, inp)
  367. if bias is not None:
  368. output += bias
  369. return output
  370. def deformable_conv2d(
  371. inp: Tensor,
  372. weight: Tensor,
  373. offset: Tensor,
  374. mask: Tensor,
  375. bias: Optional[Tensor] = None,
  376. stride: Union[int, Tuple[int, int]] = 1,
  377. padding: Union[int, Tuple[int, int]] = 0,
  378. dilation: Union[int, Tuple[int, int]] = 1,
  379. groups: int = 1,
  380. conv_mode="cross_correlation",
  381. compute_mode="default",
  382. ) -> Tensor:
  383. """
  384. Deformable Convolution.
  385. :param inp: input feature map.
  386. :param weight: convolution kernel.
  387. :param offset: input offset to kernel, channel of this tensor should match the deformable settings.
  388. :param mask: input mask to kernel, channel of this tensor should match the deformable settings.
  389. :param bias: bias added to the result of convolution (if given).
  390. :param stride: stride of the 2D convolution operation. Default: 1
  391. :param padding: size of the paddings added to the input on both sides of its
  392. spatial dimensions. Only zero-padding is supported. Default: 0
  393. :param dilation: dilation of the 2D convolution operation. Default: 1
  394. :param groups: number of groups into which the input and output channels are divided,
  395. so as to perform a ``grouped convolution``. When ``groups`` is not 1,
  396. ``in_channels`` and ``out_channels`` must be divisible by groups,
  397. and the shape of weight should be ``(groups, out_channel // groups,
  398. in_channels // groups, height, width)``. Default: 1
  399. :type conv_mode: string or :class:`Convolution.Mode`
  400. :param conv_mode: supports "cross_correlation". Default:
  401. "cross_correlation"
  402. :type compute_mode: string or
  403. :class:`Convolution.ComputeMode`
  404. :param compute_mode: when set to "default", no special requirements will be
  405. placed on the precision of intermediate results. When set to "float32",
  406. "float32" would be used for accumulator and intermediate result, but only
  407. effective when input and output are of float16 dtype.
  408. :return: output tensor.
  409. """
  410. assert (
  411. conv_mode.lower() == "cross_correlation"
  412. or conv_mode.name == "CROSS_CORRELATION"
  413. )
  414. if amp._enabled:
  415. compute_mode = "float32"
  416. inp, weight, offset, mask, bias = cast_tensors(inp, weight, offset, mask, bias)
  417. else:
  418. inp, weight, offset, mask = convert_inputs(inp, weight, offset, mask)
  419. stride_h, stride_w = expand_hw(stride)
  420. pad_h, pad_w = expand_hw(padding)
  421. dilate_h, dilate_w = expand_hw(dilation)
  422. sparse_type = "dense" if groups == 1 else "group"
  423. op = builtin.DeformableConv(
  424. stride_h=stride_h,
  425. stride_w=stride_w,
  426. pad_h=pad_h,
  427. pad_w=pad_w,
  428. dilate_h=dilate_h,
  429. dilate_w=dilate_w,
  430. strategy=get_execution_strategy(),
  431. mode=conv_mode,
  432. compute_mode=compute_mode,
  433. sparse=sparse_type,
  434. )
  435. (output,) = apply(op, inp, weight, offset, mask)
  436. if bias is not None:
  437. output += bias
  438. return output
  439. def local_conv2d(
  440. inp: Tensor,
  441. weight: Tensor,
  442. bias: Optional[Tensor] = None,
  443. stride: Union[int, Tuple[int, int]] = 1,
  444. padding: Union[int, Tuple[int, int]] = 0,
  445. dilation: Union[int, Tuple[int, int]] = 1,
  446. conv_mode="cross_correlation",
  447. ):
  448. """Applies spatial 2D convolution over an groupped channeled image with untied kernels."""
  449. assert (
  450. conv_mode.lower() == "cross_correlation"
  451. or conv_mode.name == "CROSS_CORRELATION"
  452. )
  453. inp, weight = convert_inputs(inp, weight)
  454. stride_h, stride_w = expand_hw(stride)
  455. pad_h, pad_w = expand_hw(padding)
  456. dilate_h, dilate_w = expand_hw(dilation)
  457. op = builtin.GroupLocal(
  458. stride_h=stride_h,
  459. stride_w=stride_w,
  460. pad_h=pad_h,
  461. pad_w=pad_w,
  462. dilate_h=dilate_h,
  463. dilate_w=dilate_w,
  464. mode=conv_mode,
  465. sparse="dense",
  466. )
  467. (output,) = apply(op, inp, weight)
  468. if bias is not None:
  469. output += bias
  470. return output
  471. def conv_transpose3d(
  472. inp: Tensor,
  473. weight: Tensor,
  474. bias: Optional[Tensor] = None,
  475. stride: Union[int, Tuple[int, int, int]] = 1,
  476. padding: Union[int, Tuple[int, int, int]] = 0,
  477. dilation: Union[int, Tuple[int, int, int]] = 1,
  478. ) -> Tensor:
  479. """
  480. 3D transposed convolution operation. Only support the case that groups = 1
  481. and conv_mode = "cross_correlation".
  482. Refer to :class:`~.ConvTranspose3d` for more information.
  483. :param inp: feature map of the convolution operation.
  484. :param weight: convolution kernel.
  485. weight usually has shape ``(in_channels, out_channels, depth, height, width)``.
  486. :param bias: bias added to the result of convolution (if given).
  487. :param stride: stride of the 3D convolution operation. Default: 1
  488. :param padding: size of the paddings added to the input on all sides of its
  489. spatial dimensions. Only zero-padding is supported. Default: 0
  490. :param dilation: dilation of the 3D convolution operation. Default: 1
  491. :return: output tensor.
  492. """
  493. inp, weight = convert_inputs(inp, weight)
  494. D, H, W = 0, 1, 2
  495. pad = _triple(padding)
  496. stride = _triple_nonzero(stride)
  497. dilate = _triple_nonzero(dilation)
  498. op = builtin.Convolution3DBackwardData(
  499. pad_d=pad[D],
  500. pad_h=pad[H],
  501. pad_w=pad[W],
  502. stride_d=stride[D],
  503. stride_h=stride[H],
  504. stride_w=stride[W],
  505. dilate_d=dilate[D],
  506. dilate_h=dilate[H],
  507. dilate_w=dilate[W],
  508. strategy=get_execution_strategy(),
  509. )
  510. (output,) = apply(op, weight, inp)
  511. if bias is not None:
  512. output += bias
  513. return output
  514. def max_pool2d(
  515. inp: Tensor,
  516. kernel_size: Union[int, Tuple[int, int]],
  517. stride: Optional[Union[int, Tuple[int, int]]] = None,
  518. padding: Union[int, Tuple[int, int]] = 0,
  519. ) -> Tensor:
  520. """
  521. Applies a 2D max pooling over an input tensor.
  522. Refer to :class:`~.MaxPool2d` for more information.
  523. :param inp: input tensor.
  524. :param kernel_size: size of the window.
  525. :param stride: stride of the window. If not provided, its value is set to kernel_size.
  526. Default: None
  527. :param padding: implicit zero padding added on both sides. Default: 0
  528. :return: output tensor.
  529. """
  530. if stride is None:
  531. stride = kernel_size
  532. window_h, window_w = _pair_nonzero(kernel_size)
  533. stride_h, stride_w = _pair_nonzero(stride)
  534. padding_h, padding_w = _pair(padding)
  535. op = builtin.Pooling(
  536. window_h=window_h,
  537. window_w=window_w,
  538. stride_h=stride_h,
  539. stride_w=stride_w,
  540. pad_h=padding_h,
  541. pad_w=padding_w,
  542. mode="max",
  543. )
  544. (output,) = apply(op, inp)
  545. return output
  546. def avg_pool2d(
  547. inp: Tensor,
  548. kernel_size: Union[int, Tuple[int, int]],
  549. stride: Optional[Union[int, Tuple[int, int]]] = None,
  550. padding: Union[int, Tuple[int, int]] = 0,
  551. mode: str = "average_count_exclude_padding",
  552. ) -> Tensor:
  553. """
  554. Applies 2D average pooling over an input tensor.
  555. Refer to :class:`~.AvgPool2d` for more information.
  556. :param inp: input tensor.
  557. :param kernel_size: size of the window.
  558. :param stride: stride of the window. If not provided, its value is set to ``kernel_size``.
  559. Default: None
  560. :param padding: implicit zero padding added on both sides. Default: 0
  561. :param mode: whether to count padding values, set to "average" will do counting.
  562. Default: "average_count_exclude_padding"
  563. :return: output tensor.
  564. """
  565. if stride is None:
  566. stride = kernel_size
  567. window_h, window_w = _pair_nonzero(kernel_size)
  568. stride_h, stride_w = _pair_nonzero(stride)
  569. padding_h, padding_w = _pair(padding)
  570. op = builtin.Pooling(
  571. window_h=window_h,
  572. window_w=window_w,
  573. stride_h=stride_h,
  574. stride_w=stride_w,
  575. pad_h=padding_h,
  576. pad_w=padding_w,
  577. mode=mode,
  578. )
  579. (output,) = apply(op, inp)
  580. return output
  581. def adaptive_max_pool2d(
  582. inp: Tensor, oshp: Union[Tuple[int, int], int, Tensor],
  583. ) -> Tensor:
  584. """
  585. Applies a 2D max adaptive pooling over an input.
  586. Refer to :class:`~.MaxAdaptivePool2d` for more information.
  587. :param inp: input tensor.
  588. :param oshp: `(OH, OW)` size of the output shape.
  589. :return: output tensor.
  590. """
  591. if isinstance(oshp, int):
  592. oshp = (oshp, oshp)
  593. op = builtin.AdaptivePooling(mode="max", format="NCHW",)
  594. oshp = astensor1d(oshp, inp, dtype="int32", device=inp.device)
  595. (output,) = apply(op, inp, oshp)
  596. return output
  597. def adaptive_avg_pool2d(
  598. inp: Tensor, oshp: Union[Tuple[int, int], int, Tensor],
  599. ) -> Tensor:
  600. """
  601. Applies a 2D average adaptive pooling over an input.
  602. Refer to :class:`~.AvgAdaptivePool2d` for more information.
  603. :param inp: input tensor.
  604. :param oshp: `(OH, OW)` size of the output shape.
  605. :return: output tensor.
  606. """
  607. if isinstance(oshp, int):
  608. oshp = (oshp, oshp)
  609. op = builtin.AdaptivePooling(mode="average", format="NCHW",)
  610. oshp = astensor1d(oshp, inp, dtype="int32", device=inp.device)
  611. (output,) = apply(op, inp, oshp)
  612. return output
  613. def deformable_psroi_pooling(
  614. inp: Tensor,
  615. rois: Tensor,
  616. trans: Tensor,
  617. no_trans: bool,
  618. part_size: int,
  619. pooled_h: int,
  620. pooled_w: int,
  621. sample_per_part: int,
  622. spatial_scale: float,
  623. trans_std: float = 0.1,
  624. ):
  625. """
  626. Deformable PSROI(Position Sensitive Region of Interest) Pooling.
  627. :param inp: input feature map.
  628. :param rois: the rois for feature pooling.
  629. :param trans: input offset to psroi_pooling.
  630. :param no_trans: check the phase of DeformablePSROIPooling. False to the
  631. 1st phase, True to the 2nd phase.
  632. :param part_size: part size.
  633. :param sample_per_part: sample points of each part.
  634. :param pooled_shape: kernel shape of convolution.
  635. :param spatial_scale: the spatial_scale w.r.t input image.
  636. :param trans_std: multiplier used in 2nd phase.
  637. """
  638. op = builtin.DeformablePSROIPooling(
  639. no_trans=no_trans,
  640. part_size=part_size,
  641. pooled_h=pooled_h,
  642. pooled_w=pooled_w,
  643. sample_per_part=sample_per_part,
  644. spatial_scale=spatial_scale,
  645. trans_std=trans_std,
  646. )
  647. output, _ = apply(op, inp, rois, trans)
  648. return output
  649. def hswish(x):
  650. """
  651. Element-wise `x * relu6(x + 3) / 6`.
  652. :param x: input tensor.
  653. :return: computed tensor.
  654. Example:
  655. .. testcode::
  656. import numpy as np
  657. from megengine import tensor
  658. import megengine.functional as F
  659. x = tensor(np.arange(5).astype(np.float32))
  660. out = F.hswish(x)
  661. print(out.numpy().round(decimals=4))
  662. .. testoutput::
  663. [0. 0.6667 1.6667 3. 4. ]
  664. """
  665. return _elwise(x, mode=Elemwise.Mode.H_SWISH)
  666. def sigmoid(x):
  667. """Element-wise `1 / ( 1 + exp( -x ) )`."""
  668. return _elwise(x, mode=Elemwise.Mode.SIGMOID)
  669. def hsigmoid(x):
  670. """Element-wise `relu6(x + 3) / 6`."""
  671. return relu6(x + 3) / 6
  672. def relu(x):
  673. """Element-wise `max(x, 0)`."""
  674. return _elwise(x, mode=Elemwise.Mode.RELU)
  675. def relu6(x):
  676. """Element-wise `min(max(x, 0), 6)`."""
  677. return minimum(maximum(x, 0), 6)
  678. def prelu(inp: Tensor, weight: Tensor) -> Tensor:
  679. r"""
  680. Applies the element-wise PReLU function.
  681. Refer to :class:`~.PReLU` for more information.
  682. """
  683. return maximum(inp, 0) + weight * minimum(inp, 0)
  684. def leaky_relu(inp: Tensor, negative_slope: float = 0.01) -> Tensor:
  685. r"""
  686. Applies the element-wise leaky_relu function
  687. Refer to :class:`~.LeakyReLU` for more information.
  688. """
  689. return maximum(inp, 0) + negative_slope * minimum(inp, 0)
  690. def softplus(inp: Tensor) -> Tensor:
  691. r"""
  692. Applies the element-wise function:
  693. .. math::
  694. \text{softplus}(x) = \log(1 + \exp(x))
  695. softplus is a smooth approximation to the ReLU function and can be used
  696. to constrain the output to be always positive.
  697. For numerical stability the implementation follows this transformation:
  698. .. math::
  699. \text{softplus}(x) = \log(1 + \exp(x))
  700. = \log(1 + \exp(-\text{abs}(x))) + \max(x, 0)
  701. = \log1p(\exp(-\text{abs}(x))) + \text{relu}(x)
  702. :param inp: input tensor.
  703. Examples:
  704. .. testcode::
  705. import numpy as np
  706. from megengine import tensor
  707. import megengine.functional as F
  708. x = tensor(np.arange(-3, 3, dtype=np.float32))
  709. y = F.softplus(x)
  710. print(y.numpy().round(decimals=4))
  711. Outputs:
  712. .. testoutput::
  713. [0.0486 0.1269 0.3133 0.6931 1.3133 2.1269]
  714. """
  715. return log1p(exp(-abs(inp))) + relu(inp)
  716. def logsoftmax(inp: Tensor, axis: Union[int, Sequence[int]]) -> Tensor:
  717. r"""
  718. Applies the :math:`\log(\text{softmax}(x))` function to an n-dimensional
  719. input tensor. The :math:`\text{logsoftmax}(x)` formulation can be simplified as:
  720. .. math::
  721. \text{logsoftmax}(x_{i}) = \log(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} )
  722. For numerical stability the implementation follows this transformation:
  723. .. math::
  724. \text{logsoftmax}(x)
  725. = \log (\frac{\exp (x)}{\sum_{i}(\exp (x_{i}))})
  726. = x - \log (\sum_{i}(\exp (x_{i})))
  727. = x - \text{logsumexp}(x)
  728. :param inp: input tensor.
  729. :param axis: axis along which :math:`\text{logsoftmax}(x)` will be applied.
  730. Examples:
  731. .. testcode::
  732. import numpy as np
  733. from megengine import tensor
  734. import megengine.functional as F
  735. x = tensor(np.arange(-5, 5, dtype=np.float32)).reshape(2,5)
  736. y = F.logsoftmax(x, axis=1)
  737. print(y.numpy().round(decimals=4))
  738. Outputs:
  739. .. testoutput::
  740. [[-4.4519 -3.4519 -2.4519 -1.4519 -0.4519]
  741. [-4.4519 -3.4519 -2.4519 -1.4519 -0.4519]]
  742. """
  743. return inp - logsumexp(inp, axis, keepdims=True)
  744. def logsigmoid(inp: Tensor) -> Tensor:
  745. r"""
  746. Applies the element-wise function:
  747. .. math::
  748. \text{logsigmoid}(x) = \log(\frac{ 1 }{ 1 + \exp(-x)})
  749. = \log(1/(1 + \exp(-x)))
  750. = - \log(1 + \exp(-x))
  751. = - \text{softplus}(-x)
  752. :param inp: input tensor.
  753. Examples:
  754. .. testcode::
  755. import numpy as np
  756. from megengine import tensor
  757. import megengine.functional as F
  758. x = tensor(np.arange(-5, 5, dtype=np.float32))
  759. y = F.logsigmoid(x)
  760. print(y.numpy().round(decimals=4))
  761. Outputs:
  762. .. testoutput::
  763. [-5.0067 -4.0182 -3.0486 -2.1269 -1.3133 -0.6931 -0.3133 -0.1269 -0.0486
  764. -0.0181]
  765. """
  766. return -softplus(-inp)
  767. def logsumexp(
  768. inp: Tensor, axis: Union[int, Sequence[int]], keepdims: bool = False
  769. ) -> Tensor:
  770. r"""
  771. Calculates the logarithm of the inputs' exponential sum along the given :attr:`axis`.
  772. .. math::
  773. \text{logsumexp}(x)= \log \sum_{j=1}^{n} \exp \left(x_{j}\right)
  774. For numerical stability, the implementation follows this transformation:
  775. .. math::
  776. \text{logsumexp}(x)= \log \sum_{j=1}^{n} \exp \left(x_{j}\right)
  777. = \text{logsumexp}(x)=b+\log \sum_{j=1}^{n} \exp \left(x_{j}-b\right)
  778. where
  779. .. math::
  780. b = \max(x_j)
  781. :param inp: input tensor.
  782. :param axis: axis over which the sum is taken. It could be single axis or list of axes.
  783. :param keepdims: whether to retain :attr:`axis` or not for the output tensor.
  784. Examples:
  785. .. testcode::
  786. import numpy as np
  787. from megengine import tensor
  788. import megengine.functional as F
  789. x = tensor(np.arange(-5, 5, dtype=np.float32)).reshape(2,5)
  790. y = F.logsumexp(x, axis=1, keepdims=False)
  791. print(y.numpy().round(decimals=4))
  792. Outputs:
  793. .. testoutput::
  794. [-0.5481 4.4519]
  795. """
  796. max_value = max(inp.detach(), axis, keepdims=True)
  797. if keepdims:
  798. return max_value + log(sum(exp(inp - max_value), axis, keepdims))
  799. else:
  800. return squeeze(max_value, axis=None) + log(
  801. sum(exp(inp - max_value), axis, keepdims)
  802. )
  803. def _get_softmax_axis(ndim: int) -> int:
  804. if ndim in (0, 1, 3):
  805. return 0
  806. return 1
  807. def softmax(inp: Tensor, axis: Optional[int] = None) -> Tensor:
  808. r"""
  809. Applies a :math:`\text{softmax}(x)` function. :math:`\text{softmax}(x)` is defined as:
  810. .. math::
  811. \text{softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
  812. It is applied to all elements along axis, and rescales elements so that
  813. they stay in the range `[0, 1]` and sum to 1.
  814. See :class:`~megengine.module.activation.Softmax` for more details.
  815. :param inp: input tensor.
  816. :param axis: an axis along which :math:`\text{softmax}(x)` will be applied. By default,
  817. :math:`\text{softmax}(x)` will apply along the highest ranked axis.
  818. Examples:
  819. .. testcode::
  820. import numpy as np
  821. from megengine import tensor
  822. import megengine.functional as F
  823. x = tensor(np.arange(-5, 5, dtype=np.float32)).reshape(2,5)
  824. out = F.softmax(x)
  825. print(out.numpy().round(decimals=4))
  826. Outputs:
  827. .. testoutput::
  828. [[0.0117 0.0317 0.0861 0.2341 0.6364]
  829. [0.0117 0.0317 0.0861 0.2341 0.6364]]
  830. """
  831. if axis is None:
  832. axis = _get_softmax_axis(len(inp.shape))
  833. offset = inp.max(axis=axis, keepdims=True).detach()
  834. cached = exp(inp - offset)
  835. down = sum(cached, axis=axis, keepdims=True)
  836. return cached / down
  837. def batch_norm(
  838. inp: Tensor,
  839. running_mean: Tensor = None,
  840. running_var: Tensor = None,
  841. weight: Optional[Tensor] = None,
  842. bias: Optional[Tensor] = None,
  843. *,
  844. training: bool = False,
  845. momentum: float = 0.9,
  846. eps: float = 1e-5,
  847. inplace: bool = True,
  848. compute_mode="default",
  849. ):
  850. r"""
  851. Applies batch normalization to the input.
  852. Refer to :class:`~.BatchNorm2d` and :class:`~.BatchNorm1d` for more information.
  853. :param inp: input tensor.
  854. :param running_mean: tensor to store running mean.
  855. :param running_var: tensor to store running variance.
  856. :param weight: scaling tensor in the learnable affine parameters.
  857. See :math:`\gamma` in :class:`~.BatchNorm2d`.
  858. :param bias: bias tensor in the learnable affine parameters.
  859. See :math:`\beta` in :class:`~.BatchNorm2d`.
  860. :param training: a boolean value to indicate whether batch norm is performed
  861. in training mode. Default: False
  862. :param momentum: value used for the ``running_mean`` and ``running_var``
  863. computation.
  864. Default: 0.9
  865. :param eps: a value added to the denominator for numerical stability.
  866. Default: 1e-5
  867. :param inplace: whether to update ``running_mean`` and ``running_var`` inplace or return new tensors
  868. Default: True
  869. :return: output tensor.
  870. """
  871. if inp.ndim != 4:
  872. raise NotImplementedError("batch_norm for ndim != 4")
  873. C = inp.shape[1]
  874. def make_full_if_none(x, value):
  875. if x is None:
  876. (x,) = Const(value, dtype=inp.dtype, device=inp.device)()
  877. shape = astensor1d((1, C, 1, 1), inp, dtype="int32", device=inp.device)
  878. (result,) = apply(builtin.Broadcast(), x, shape)
  879. return result
  880. elif x.ndim == 1:
  881. shape = astensor1d((1, C, 1, 1), inp, dtype="int32", device=inp.device)
  882. (result,) = apply(builtin.Reshape(), x, shape)
  883. return result
  884. return x
  885. has_mean = running_mean is not None
  886. has_var = running_var is not None
  887. if not training:
  888. assert has_mean, "running_mean must be provided in inference mode"
  889. assert has_var, "running_var must be provided in inference mode"
  890. if has_mean and running_mean.ndim != 4:
  891. raise ValueError
  892. if has_var and running_var.ndim != 4:
  893. raise ValueError
  894. if amp._enabled:
  895. inp = inp.astype("float16")
  896. weight, bias, running_mean, running_var = cast_tensors(
  897. weight, bias, running_mean, running_var, promote=True
  898. )
  899. elif compute_mode != "float32":
  900. inp, weight, bias, running_mean, running_var = convert_inputs(
  901. inp, weight, bias, running_mean, running_var
  902. )
  903. weight = make_full_if_none(weight, 1)
  904. bias = make_full_if_none(bias, 0)
  905. if not training:
  906. op = builtin.BatchNorm(
  907. fwd_mode=BatchNorm.FwdMode.INFERENCE, epsilon=eps, param_dim="dim_1c11"
  908. )
  909. ret = apply(op, inp, weight, bias, running_mean, running_var)[-1]
  910. return ret
  911. else:
  912. op = builtin.BatchNorm(
  913. avg_factor=1 - momentum, epsilon=eps, param_dim="dim_1c11"
  914. )
  915. if has_mean or has_var:
  916. running_mean = make_full_if_none(running_mean, 0)
  917. running_var = make_full_if_none(running_var, 1)
  918. new_mean, new_var, _, _, inp = apply(
  919. op, inp, weight, bias, running_mean, running_var
  920. )
  921. if not has_mean:
  922. new_mean = None
  923. if not has_var:
  924. new_var = None
  925. if inplace:
  926. if has_mean:
  927. running_mean[...] = new_mean
  928. if has_var:
  929. running_var[...] = new_var
  930. return inp
  931. else:
  932. return inp, new_mean, new_var
  933. else:
  934. (_, _, inp,) = apply(op, inp, weight, bias)
  935. return inp
  936. def sync_batch_norm(
  937. inp: Tensor,
  938. running_mean: Tensor,
  939. running_var: Tensor,
  940. weight: Optional[Tensor] = None,
  941. bias: Optional[Tensor] = None,
  942. training: bool = False,
  943. momentum: Union[float, Tensor] = 0.9,
  944. eps: float = 1e-5,
  945. eps_mode="additive",
  946. group=WORLD,
  947. ) -> Tensor:
  948. r"""
  949. Applies synchronized batch normalization to the input.
  950. Refer to :class:`~.BatchNorm2d` and :class:`~.BatchNorm1d` for more information.
  951. :param inp: input tensor.
  952. :param running_mean: tensor to store running mean.
  953. :param running_var: tensor to store running variance.
  954. :param weight: scaling tensor in the learnable affine parameters.
  955. See :math:`\gamma` in :class:`~.BatchNorm2d`.
  956. :param bias: bias tensor in the learnable affine parameters.
  957. See :math:`\beta` in :class:`~.BatchNorm2d`.
  958. :param training: a boolean value to indicate whether batch norm is performed
  959. in traning mode. Default: False
  960. :param momentum: value used for the ``running_mean`` and ``running_var``
  961. computation.
  962. Default: 0.9
  963. :param eps: a value added to the denominator for numerical stability.
  964. Default: 1e-5
  965. :param eps_mode: mode of calculation for eps, "max" or "additive".
  966. Default: "additive"
  967. :param group: communication group, caculate mean and variance between this group.
  968. Default: :obj:`~megengine.distributed.WORLD`
  969. :return: output tensor.
  970. """
  971. assert eps_mode.lower() in {"max", "additive"}, "unknown eps_mode: {}".format(
  972. eps_mode
  973. )
  974. _channels = inp.shape[1]
  975. _ndim = inp.ndim
  976. _device = inp.device
  977. _dtype = inp.dtype
  978. _param_shape = (1, _channels) + (1,) * (_ndim - 2)
  979. _reduce_axis = [0] + [i for i in range(2, _ndim)]
  980. if training:
  981. def _sum_on_channel(inp):
  982. return inp.sum(axis=_reduce_axis, keepdims=True)
  983. reduce_size = inp.shape[0]
  984. for i in range(2, _ndim):
  985. reduce_size = reduce_size * inp.shape[i]
  986. channel_x1s = _sum_on_channel(inp)
  987. channel_x2s = _sum_on_channel(inp ** 2)
  988. if is_distributed():
  989. # reduce all nodes' data to calculate mean and variance
  990. reduce_size = broadcast_to(
  991. Tensor(reduce_size).astype(dtype=_dtype), [1] * _ndim
  992. )
  993. stat = concat([reduce_size, channel_x1s, channel_x2s], axis=1)
  994. stat = all_reduce_sum(stat, group)
  995. reduce_size = stat[:, :1].reshape(1)
  996. channel_x1s = stat[:, 1 : 1 + _channels]
  997. channel_x2s = stat[:, 1 + _channels :]
  998. channel_mean = channel_x1s / reduce_size
  999. channel_variance = (
  1000. channel_x1s ** 2 / (-reduce_size * reduce_size) + channel_x2s / reduce_size
  1001. )
  1002. else:
  1003. assert running_var is not None and running_mean is not None
  1004. channel_variance = running_var.reshape(*_param_shape)
  1005. channel_mean = running_mean.reshape(*_param_shape)
  1006. invsqrt_channel_variance = (
  1007. maximum(channel_variance, eps) if eps_mode == "max" else channel_variance + eps
  1008. ) ** -0.5
  1009. if weight is not None:
  1010. weight = weight.reshape(*_param_shape)
  1011. if bias is not None:
  1012. bias = bias.reshape(*_param_shape)
  1013. # outvar = output * weight + bias
  1014. # where output = inp * invsqrt_channel_variance + (
  1015. # -channel_mean * invsqrt_channel_variance
  1016. # )
  1017. # Manually expand output for gopt
  1018. if weight is not None:
  1019. inv_var_wt = invsqrt_channel_variance * weight
  1020. neg_channel_mean = -channel_mean
  1021. if bias is not None:
  1022. outvar = inp * inv_var_wt + (neg_channel_mean * inv_var_wt + bias)
  1023. else:
  1024. outvar = inp * inv_var_wt + neg_channel_mean * inv_var_wt
  1025. else:
  1026. outvar = inp * invsqrt_channel_variance + (
  1027. -channel_mean * invsqrt_channel_variance
  1028. )
  1029. if bias is not None:
  1030. outvar = outvar + bias
  1031. if training and running_var is not None and running_mean is not None:
  1032. running_mean *= momentum
  1033. running_mean += (1 - momentum) * channel_mean
  1034. channel_variance_unbiased = channel_x1s ** 2 / (
  1035. -reduce_size * (reduce_size - 1)
  1036. ) + channel_x2s / (reduce_size - 1)
  1037. running_var *= momentum
  1038. running_var += (1 - momentum) * channel_variance_unbiased
  1039. return outvar
  1040. def dropout(inp: Tensor, drop_prob: float, training: bool = True) -> Tensor:
  1041. """
  1042. Returns a new tensor where each of the elements are randomly set to zero
  1043. with probability P = ``drop_prob``. Optionally rescale the output tensor if ``training`` is True.
  1044. :param inp: input tensor.
  1045. :param drop_prob: probability to drop (set to zero) a single element.
  1046. :param training: the default behavior of ``dropout`` during training is to rescale the output,
  1047. then it can be replaced by an :class:`~.Identity` during inference. Default: True
  1048. :return: the output tensor
  1049. Examples:
  1050. .. testcode::
  1051. import numpy as np
  1052. from megengine import tensor
  1053. import megengine.functional as F
  1054. x = tensor(np.ones(10, dtype=np.float32))
  1055. out = F.dropout(x, 1./3.)
  1056. print(out.numpy())
  1057. Outputs:
  1058. .. testoutput::
  1059. :options: +SKIP
  1060. [1.5 1.5 0. 1.5 1.5 1.5 1.5 1.5 1.5 1.5]
  1061. """
  1062. assert 0 <= drop_prob < 1
  1063. rv = uniform(size=inp.shape)
  1064. mask = rv > drop_prob
  1065. inp *= mask.astype(inp.dtype)
  1066. if training:
  1067. inp *= 1 / (1 - drop_prob)
  1068. return inp
  1069. def one_hot(inp: Tensor, num_classes: int) -> Tensor:
  1070. r"""
  1071. Performs one-hot encoding for the input tensor.
  1072. :param inp: input tensor.
  1073. :param num_classes: number of classes denotes the last dimension of the output tensor.
  1074. :return: output tensor.
  1075. Examples:
  1076. .. testcode::
  1077. import numpy as np
  1078. from megengine import tensor
  1079. import megengine.functional as F
  1080. x = tensor(np.arange(1, 4, dtype=np.int32))
  1081. out = F.one_hot(x, num_classes=4)
  1082. print(out.numpy())
  1083. Outputs:
  1084. .. testoutput::
  1085. [[0 1 0 0]
  1086. [0 0 1 0]
  1087. [0 0 0 1]]
  1088. """
  1089. zeros_tensor = zeros(list(inp.shape) + [num_classes], inp.dtype, inp.device)
  1090. ones_tensor = ones(list(inp.shape) + [1], inp.dtype, inp.device)
  1091. op = builtin.IndexingSetOneHot(axis=inp.ndim)
  1092. (result,) = apply(op, zeros_tensor, inp, ones_tensor)
  1093. return result
  1094. def embedding(
  1095. inp: Tensor,
  1096. weight: Tensor,
  1097. padding_idx: Optional[int] = None,
  1098. max_norm: Optional[float] = None,
  1099. norm_type: Optional[float] = None,
  1100. ):
  1101. """
  1102. Applies lookup table for embedding.
  1103. :param inp: tensor with indices.
  1104. :param weight: learnable weights which embeds from.
  1105. :param padding_idx: should be set to None, not supported now.
  1106. :param max_norm: should be set to None, not supported now.
  1107. :param norm_type: should be set to None, not supported now.
  1108. :return: output tensor.
  1109. Refer to :class:`~.Embedding` for more information.
  1110. """
  1111. if padding_idx is not None:
  1112. raise ValueError("Not support padding_idx Now!")
  1113. if max_norm is not None or norm_type is not None:
  1114. raise ValueError("Not support weight normlization Now!")
  1115. dest_shp = list(inp.shape) + [weight.shape[-1]]
  1116. return weight[inp.reshape(-1)].reshape(dest_shp)
  1117. def indexing_one_hot(
  1118. src: Tensor, index: Tensor, axis: int = 1, keepdims=False
  1119. ) -> Tensor:
  1120. r"""
  1121. One-hot indexing for some axes.
  1122. :param src: input tensor.
  1123. :param index: index tensor.
  1124. :param axis: axis on src for which values in index index. Default: 1
  1125. :param keepdims: whether not to remove the axis in result. Default: False
  1126. :return: output tensor.
  1127. Examples:
  1128. .. testcode::
  1129. import megengine.functional as F
  1130. from megengine import tensor
  1131. src = tensor([[1.0, 2.0]])
  1132. index = tensor([0])
  1133. val = F.indexing_one_hot(src, index)
  1134. print(val.numpy())
  1135. Outputs:
  1136. .. testoutput::
  1137. [1.]
  1138. """
  1139. assert isinstance(src, Tensor), "src must be of Tensor type"
  1140. op = builtin.IndexingOneHot(axis=axis)
  1141. index = convert_single_value(index, dtype="int32", device=src.device)
  1142. (result,) = apply(op, src, index)
  1143. if not keepdims:
  1144. result = squeeze(result, axis)
  1145. return result
  1146. def sliding_window(
  1147. inp: Tensor,
  1148. kernel_size: Union[int, Tuple[int, int]],
  1149. padding: Union[int, Tuple[int, int]] = 0,
  1150. stride: Union[int, Tuple[int, int]] = 1,
  1151. dilation: Union[int, Tuple[int, int]] = 1,
  1152. ) -> Tensor:
  1153. """
  1154. Extracts sliding local blocks from a batched input tensor.
  1155. Refer to :class:`~.SlidingWindow` for more information.
  1156. :param inp: input tensor.
  1157. :param kernel_size: size of the window.
  1158. :param padding: implicit zero padding added on both sides of input. Default: 0
  1159. :param stride: stride of the window. Default: 1
  1160. :param dilation: dilation of the window. Default: 1
  1161. :return: output tensor.
  1162. """
  1163. padding_h, padding_w = _pair(padding)
  1164. stride_h, stride_w = _pair_nonzero(stride)
  1165. dilation_h, dilation_w = _pair_nonzero(dilation)
  1166. window_h, window_w = _pair_nonzero(kernel_size)
  1167. op = builtin.Images2Neibs(
  1168. pad_h=padding_h,
  1169. pad_w=padding_w,
  1170. stride_h=stride_h,
  1171. stride_w=stride_w,
  1172. dilate_h=dilation_h,
  1173. dilate_w=dilation_w,
  1174. window_h=window_h,
  1175. window_w=window_w,
  1176. )
  1177. (output,) = apply(op, inp)
  1178. return output
  1179. def sliding_window_transpose(
  1180. inp: Tensor,
  1181. output_size: Union[int, Tuple[int, int]],
  1182. kernel_size: Union[int, Tuple[int, int]],
  1183. padding: Union[int, Tuple[int, int]] = 0,
  1184. stride: Union[int, Tuple[int, int]] = 1,
  1185. dilation: Union[int, Tuple[int, int]] = 1,
  1186. ) -> Tensor:
  1187. """
  1188. Sum over the sliding windows on the corresponding input location.
  1189. Refer to :class:`~.SlidingWindowTranspose` for more information.
  1190. :param inp: input tensor.
  1191. :param output_size: shape of output tensor.
  1192. :param kernel_size: size of the window.
  1193. :param padding: implicit zero padding added on both sides of input. Default: 0
  1194. :param stride: stride of the window. Default: 1
  1195. :param dilation: dilation of the window. Default: 1
  1196. :return: output tensor.
  1197. """
  1198. output_h, output_w = _pair_nonzero(output_size)
  1199. padding_h, padding_w = _pair(padding)
  1200. stride_h, stride_w = _pair_nonzero(stride)
  1201. dilation_h, dilation_w = _pair_nonzero(dilation)
  1202. window_h, window_w = _pair_nonzero(kernel_size)
  1203. expected_h = (
  1204. output_h + 2 * padding_h - dilation_h * (window_h - 1) - 1
  1205. ) // stride_h + 1
  1206. expected_w = (
  1207. output_w + 2 * padding_w - dilation_w * (window_w - 1) - 1
  1208. ) // stride_w + 1
  1209. assert inp.ndim == 6, "the input dimension of sliding_window_transpose should be 6"
  1210. assert (
  1211. inp.shape[2] == expected_h and inp.shape[3] == expected_w
  1212. ), "the input shape and output size do not match"
  1213. op = builtin.SlidingWindowTranspose(
  1214. out_h=output_h,
  1215. out_w=output_w,
  1216. pad_h=padding_h,
  1217. pad_w=padding_w,
  1218. stride_h=stride_h,
  1219. stride_w=stride_w,
  1220. dilate_h=dilation_h,
  1221. dilate_w=dilation_w,
  1222. window_h=window_h,
  1223. window_w=window_w,
  1224. )
  1225. (output,) = apply(op, inp)
  1226. return output
  1227. interpolate = deprecated_func("1.3", "megengine.functional.vision", "interpolate", True)
  1228. roi_pooling = deprecated_func("1.3", "megengine.functional.vision", "roi_pooling", True)
  1229. roi_align = deprecated_func("1.3", "megengine.functional.vision", "roi_align", True)
  1230. nms = deprecated_func("1.3", "megengine.functional.vision", "nms", True)
  1231. resize = deprecated_func("1.3", "megengine.functional.vision", "resize", True)
  1232. remap = deprecated_func("1.3", "megengine.functional.vision", "remap", True)
  1233. nvof = deprecated_func("1.3", "megengine.functional.vision", "nvof", True)
  1234. warp_affine = deprecated_func("1.3", "megengine.functional.vision", "warp_affine", True)
  1235. warp_perspective = deprecated_func(
  1236. "1.3", "megengine.functional.vision", "warp_perspective", True
  1237. )
  1238. from .quantized import conv_bias_activation # isort:skip
  1239. from .loss import * # isort:skip

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台