You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

math.py 25 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. import collections
  10. import math
  11. from typing import Iterable, Optional, Sequence, Tuple, Union
  12. from ..core._imperative_rt.core2 import Const, apply, dtype_promotion
  13. from ..core._imperative_rt.ops import SubgraphBuilder as _SubgraphBuilder
  14. from ..core.ops import builtin
  15. from ..core.tensor.array_method import _matmul
  16. from ..core.tensor.utils import _normalize_axis
  17. from ..tensor import Tensor
  18. from ..utils.deprecation import deprecated_kwargs_default
  19. from .elemwise import clip
  20. from .tensor import expand_dims, squeeze
  21. __all__ = [
  22. "argmax",
  23. "argmin",
  24. "argsort",
  25. "dot",
  26. "isinf",
  27. "isnan",
  28. "matinv",
  29. "matmul",
  30. "max",
  31. "mean",
  32. "min",
  33. "norm",
  34. "normalize",
  35. "prod",
  36. "sign",
  37. "sort",
  38. "std",
  39. "sum",
  40. "svd",
  41. "topk",
  42. "var",
  43. ]
  44. def isnan(inp: Tensor) -> Tensor:
  45. r"""Returns a new tensor representing if each element is ``NaN`` or not.
  46. Args:
  47. inp: input tensor.
  48. Returns:
  49. result tensor.
  50. Examples:
  51. .. testcode::
  52. from megengine import tensor
  53. import megengine.functional as F
  54. x = tensor([1, float("nan"), 0])
  55. print(F.isnan(x).numpy())
  56. Outputs:
  57. .. testoutput::
  58. [False True False]
  59. """
  60. return inp != inp
  61. def isinf(inp: Tensor) -> Tensor:
  62. r"""Returns a new tensor representing if each element is ``Inf`` or not.
  63. Args:
  64. inp: input tensor.
  65. Returns:
  66. result tensor.
  67. Examples:
  68. .. testcode::
  69. from megengine import tensor
  70. import megengine.functional as F
  71. x = tensor([1, float("inf"), 0])
  72. print(F.isinf(x).numpy())
  73. Outputs:
  74. .. testoutput::
  75. [False True False]
  76. """
  77. return abs(inp).astype("float32") == float("inf")
  78. def sign(inp: Tensor):
  79. r"""Returns a new tensor representing the sign of each element in input tensor.
  80. Args:
  81. inp: Tensor:
  82. Returns:
  83. the sign of input tensor.
  84. Examples:
  85. .. testcode::
  86. from megengine import tensor
  87. import megengine.functional as F
  88. x = tensor([1, -1, 0])
  89. print(F.sign(x).numpy())
  90. Outputs:
  91. .. testoutput::
  92. [ 1 -1 0]
  93. """
  94. return (inp > 0).astype(inp.dtype) - (inp < 0).astype(inp.dtype)
  95. def sum(
  96. inp: Tensor,
  97. axis: Optional[Union[int, Sequence[int]]] = None,
  98. keepdims: bool = False,
  99. ) -> Tensor:
  100. r"""Returns the sum of input tensor along given axis. If axis is a list of dimensions,
  101. reduce over all of them.
  102. Args:
  103. inp: input tensor.
  104. axis: dimension to reduce. If None, all dimensions will be reduced.
  105. Default: None
  106. keepdims: whether the output tensor has axis retained or not.
  107. Default: False
  108. Returns:
  109. output tensor.
  110. Examples:
  111. .. testcode::
  112. import numpy as np
  113. from megengine import tensor
  114. import megengine.functional as F
  115. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
  116. out = F.sum(x)
  117. print(out.numpy())
  118. Outputs:
  119. .. testoutput::
  120. 21
  121. """
  122. return inp.sum(axis=axis, keepdims=keepdims)
  123. def prod(
  124. inp: Tensor, axis: Optional[Union[int, Sequence[int]]] = None, keepdims=False
  125. ) -> Tensor:
  126. r"""Returns the product of input tensor along given axis. If axis is a list of dimensions,
  127. reduce over all of them.
  128. Args:
  129. inp: input tensor.
  130. axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  131. keepdims: whether the output tensor has axis retained or not. Default: False
  132. Returns:
  133. output tensor.
  134. Examples:
  135. .. testcode::
  136. import numpy as np
  137. from megengine import tensor
  138. import megengine.functional as F
  139. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
  140. out = F.prod(x)
  141. print(out.numpy())
  142. Outputs:
  143. .. testoutput::
  144. 720
  145. """
  146. return inp.prod(axis=axis, keepdims=keepdims)
  147. def mean(
  148. inp: Tensor,
  149. axis: Optional[Union[int, Sequence[int]]] = None,
  150. keepdims: bool = False,
  151. ) -> Tensor:
  152. r"""Returns the mean value of input tensor along
  153. given axis. If axis is a list of dimensions,
  154. reduce over all of them.
  155. Args:
  156. inp: input tensor.
  157. axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  158. keepdims: whether the output tensor has axis retained or not. Default: False
  159. Returns:
  160. output tensor.
  161. Examples:
  162. .. testcode::
  163. import numpy as np
  164. from megengine import tensor
  165. import megengine.functional as F
  166. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
  167. out = F.mean(x)
  168. print(out.numpy())
  169. Outputs:
  170. .. testoutput::
  171. 3.5
  172. """
  173. return inp.mean(axis=axis, keepdims=keepdims)
  174. def var(
  175. inp: Tensor,
  176. axis: Optional[Union[int, Sequence[int]]] = None,
  177. keepdims: bool = False,
  178. ) -> Tensor:
  179. r"""Returns the variance value of input tensor along
  180. given axis. If axis is a list of dimensions,
  181. reduce over all of them.
  182. Args:
  183. inp: input tensor.
  184. axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  185. keepdims: whether the output tensor has axis retained or not. Default: False
  186. Returns:
  187. output tensor.
  188. Examples:
  189. .. testcode::
  190. import numpy as np
  191. from megengine import tensor
  192. import megengine.functional as F
  193. data = tensor(np.arange(1, 7, dtype=np.float32).reshape(2, 3))
  194. out = F.var(data)
  195. print(out.numpy().round(decimals=4))
  196. Outputs:
  197. .. testoutput::
  198. 2.9167
  199. """
  200. if axis is None:
  201. m = mean(inp, axis=axis, keepdims=False)
  202. else:
  203. m = mean(inp, axis=axis, keepdims=True)
  204. v = inp - m
  205. return mean(v ** 2, axis=axis, keepdims=keepdims)
  206. def std(
  207. inp: Tensor,
  208. axis: Optional[Union[int, Sequence[int]]] = None,
  209. keepdims: bool = False,
  210. ) -> Tensor:
  211. r"""Returns the standard deviation of input tensor along
  212. given axis. If axis is a list of dimensions,
  213. reduce over all of them.
  214. Args:
  215. inp: input tensor.
  216. axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  217. keepdims: whether the output tensor has axis retained or not. Default: False
  218. Returns:
  219. output tensor.
  220. Examples:
  221. .. testcode::
  222. import numpy as np
  223. from megengine import tensor
  224. import megengine.functional as F
  225. data = tensor(np.arange(1, 7, dtype=np.float32).reshape(2, 3))
  226. out = F.std(data, axis=1)
  227. print(out.numpy().round(decimals=4))
  228. Outputs:
  229. .. testoutput::
  230. [0.8165 0.8165]
  231. """
  232. return var(inp, axis=axis, keepdims=keepdims) ** 0.5
  233. def min(
  234. inp: Tensor,
  235. axis: Optional[Union[int, Sequence[int]]] = None,
  236. keepdims: bool = False,
  237. ) -> Tensor:
  238. r"""Returns the min value of input tensor along
  239. given axis. If axis is a list of dimensions,
  240. reduce over all of them.
  241. Args:
  242. inp: input tensor.
  243. axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  244. keepdims: whether the output tensor has axis retained or not. Default: False
  245. Returns:
  246. output tensor.
  247. Examples:
  248. .. testcode::
  249. import numpy as np
  250. from megengine import tensor
  251. import megengine.functional as F
  252. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
  253. out = F.min(x)
  254. print(out.numpy())
  255. Outputs:
  256. .. testoutput::
  257. 1
  258. """
  259. return inp.min(axis=axis, keepdims=keepdims)
  260. def max(
  261. inp: Tensor,
  262. axis: Optional[Union[int, Sequence[int]]] = None,
  263. keepdims: bool = False,
  264. ) -> Tensor:
  265. r"""Returns the max value of the input tensor along
  266. given axis. If axis is a list of dimensions,
  267. reduce over all of them.
  268. Args:
  269. inp: input tensor.
  270. axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  271. keepdims: whether the output tensor has axis retained or not. Default: False
  272. Returns:
  273. output tensor.
  274. Examples:
  275. .. testcode::
  276. import numpy as np
  277. from megengine import tensor
  278. import megengine.functional as F
  279. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
  280. out = F.max(x)
  281. print(out.numpy())
  282. Outputs:
  283. .. testoutput::
  284. 6
  285. """
  286. return inp.max(axis=axis, keepdims=keepdims)
  287. def norm(
  288. inp: Tensor, ord: float = None, axis: int = None, keepdims=False,
  289. ):
  290. r"""Calculates ``p``-norm of input tensor along
  291. given axis.
  292. Args:
  293. inp: input tensor.
  294. ord: power of value applied to inp. Default: 2
  295. axis: dimension to reduce. If None, input must be a vector. Default: None
  296. keepdims: whether the output tensor has axis retained or not. Default: False
  297. Returns:
  298. output tensor.
  299. Examples:
  300. .. testcode::
  301. import numpy as np
  302. from megengine import tensor
  303. import megengine.functional as F
  304. x = tensor(np.arange(-3, 3, dtype=np.float32))
  305. out = F.norm(x)
  306. print(out.numpy().round(decimals=4))
  307. Outputs:
  308. .. testoutput::
  309. 4.3589
  310. """
  311. if axis is None:
  312. if inp.ndim != 1:
  313. raise TypeError("axis is required unless input is a vector")
  314. if ord is None:
  315. ord = 2
  316. if ord == 0:
  317. return sum(inp != 0, axis=axis, keepdims=keepdims)
  318. if ord == math.inf:
  319. return max(abs(inp))
  320. if ord == -math.inf:
  321. return min(abs(inp))
  322. return sum(abs(inp) ** ord, axis=axis, keepdims=keepdims) ** (1.0 / ord)
  323. def argmin(
  324. inp: Tensor,
  325. axis: Optional[Union[int, Sequence[int]]] = None,
  326. keepdims: bool = False,
  327. ) -> Tensor:
  328. r"""Returns the indices of the minimum values along
  329. given axis. If axis is a list of dimensions,
  330. reduce over all of them.
  331. Args:
  332. inp: input tensor.
  333. axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  334. keepdims: whether the output tensor has axis retained or not. Default: False
  335. Returns:
  336. output tensor.
  337. Examples:
  338. .. testcode::
  339. import numpy as np
  340. from megengine import tensor
  341. import megengine.functional as F
  342. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
  343. out = F.argmin(x)
  344. print(out.numpy())
  345. Outputs:
  346. .. testoutput::
  347. 0
  348. """
  349. if axis is None:
  350. assert not keepdims, "can not set axis=None and keepdims=True"
  351. inp = inp.flatten()
  352. axis = 0
  353. axis = _normalize_axis(inp.ndim, axis, reverse=True)
  354. if isinstance(axis, collections.abc.Iterable):
  355. for ai in axis:
  356. op = builtin.Argmin(axis=ai)
  357. (inp,) = apply(op, inp)
  358. if not keepdims:
  359. inp = squeeze(inp, ai)
  360. return inp
  361. op = builtin.Argmin(axis=axis)
  362. (result,) = apply(op, inp)
  363. if not keepdims:
  364. result = squeeze(result, axis)
  365. return result
  366. def argmax(
  367. inp: Tensor,
  368. axis: Optional[Union[int, Sequence[int]]] = None,
  369. keepdims: bool = False,
  370. ) -> Tensor:
  371. r"""Returns the indices of the maximum values along
  372. given axis. If axis is a list of dimensions,
  373. reduce over all of them.
  374. Args:
  375. inp: input tensor.
  376. axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  377. keepdims: whether the output tensor has axis retained or not. Default: False
  378. Returns:
  379. output tensor.
  380. Examples:
  381. .. testcode::
  382. import numpy as np
  383. from megengine import tensor
  384. import megengine.functional as F
  385. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
  386. out = F.argmax(x)
  387. print(out.numpy())
  388. Outputs:
  389. .. testoutput::
  390. 5
  391. """
  392. if axis is None:
  393. assert not keepdims, "can not set axis=None and keepdims=True"
  394. inp = inp.flatten()
  395. axis = 0
  396. axis = _normalize_axis(inp.ndim, axis, reverse=True)
  397. if isinstance(axis, collections.abc.Iterable):
  398. for ai in axis:
  399. op = builtin.Argmax(axis=ai)
  400. (inp,) = apply(op, inp)
  401. if not keepdims:
  402. inp = squeeze(inp, ai)
  403. return inp
  404. op = builtin.Argmax(axis=axis)
  405. (result,) = apply(op, inp)
  406. if not keepdims:
  407. result = squeeze(result, axis)
  408. return result
  409. def normalize(
  410. inp: Tensor, ord: float = None, axis: int = None, eps: float = 1e-12,
  411. ) -> Tensor:
  412. r"""Performs :math:`L_p` normalization of input tensor along
  413. given axis.
  414. For a tensor of shape :math:`(n_0, ..., n_{dim}, ..., n_k)`, each
  415. :math:`n_{dim}` -element vector :math:`v` along dimension :attr:`axis` is transformed as:
  416. .. math::
  417. v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}.
  418. Args:
  419. inp: input tensor.
  420. ord: power of value applied to input tensor. Default: 2
  421. axis: dimension to reduce.If None, input must be a vector. Default: None
  422. eps: a small value to avoid division by zero. Default: 1e-12
  423. Returns:
  424. normalized output tensor.
  425. """
  426. if axis is None:
  427. return inp / clip(norm(inp, ord, axis), lower=eps)
  428. else:
  429. return inp / clip(norm(inp, ord, axis, keepdims=True), lower=eps)
  430. def argsort(inp: Tensor, descending: bool = False) -> Tensor:
  431. r"""Returns the indices that would sort the input tensor.
  432. Args:
  433. inp: input tensor. If it's 2d, the result would be array of indices show how to sort each row in the input tensor.
  434. descending: sort in descending order, where the largest comes first. Default: False
  435. inp: Tensor:
  436. descending: bool:
  437. Returns:
  438. indices of int32 indicates how to sort the input.
  439. Examples:
  440. .. testcode::
  441. import numpy as np
  442. from megengine import tensor
  443. import megengine.functional as F
  444. x = tensor(np.array([1,2], dtype=np.float32))
  445. indices = F.argsort(x)
  446. print(indices.numpy())
  447. Outputs:
  448. .. testoutput::
  449. [0 1]
  450. """
  451. assert len(inp.shape) <= 2, "Input should be 1d or 2d"
  452. if descending:
  453. order = "descending"
  454. else:
  455. order = "ascending"
  456. op = builtin.Argsort(order=order)
  457. if len(inp.shape) == 1:
  458. inp = inp.reshape(1, -1)
  459. _, result = apply(op, inp)
  460. return result[0]
  461. _, result = apply(op, inp)
  462. return result
  463. def sort(inp: Tensor, descending: bool = False) -> Tuple[Tensor, Tensor]:
  464. r"""Returns sorted tensor and the indices would sort the input tensor.
  465. Args:
  466. inp: input tensor. If it's 2d, the result would be sorted by row.
  467. descending: sort in descending order, where the largest comes first. Default: False
  468. Returns:
  469. tuple of two tensors `(sorted_tensor, indices_of_int32)`.
  470. Examples:
  471. .. testcode::
  472. import numpy as np
  473. from megengine import tensor
  474. import megengine.functional as F
  475. x = tensor(np.array([1,2], dtype=np.float32))
  476. out, indices = F.sort(x)
  477. print(out.numpy())
  478. Outputs:
  479. .. testoutput::
  480. [1. 2.]
  481. """
  482. assert len(inp.shape) <= 2, "Input should be 1d or 2d"
  483. if descending:
  484. order = "descending"
  485. else:
  486. order = "ascending"
  487. op = builtin.Argsort(order=order)
  488. if len(inp.shape) == 1:
  489. inp = inp.reshape(1, -1)
  490. tns, ind = apply(op, inp)
  491. return tns[0], ind[0]
  492. tns, ind = apply(op, inp)
  493. return tns, ind
  494. @deprecated_kwargs_default("1.12", "descending", 3)
  495. def topk(
  496. inp: Tensor,
  497. k: int,
  498. descending: bool = False,
  499. kth_only: bool = False,
  500. no_sort: bool = False,
  501. ) -> Tuple[Tensor, Tensor]:
  502. r"""Selects the ``Top-K`` (by default) smallest elements of 2d matrix by row.
  503. Args:
  504. inp: input tensor. If input tensor is 2d, each row will be sorted.
  505. k: number of elements needed.
  506. descending: if True, return the largest elements instead. Default: False
  507. kth_only: if True, only the k-th element will be returned. Default: False
  508. no_sort: if True, the returned elements can be unordered. Default: False
  509. Returns:
  510. tuple of two tensors ``(topk_tensor, indices_of_int32)``
  511. Examples:
  512. .. testcode::
  513. import numpy as np
  514. from megengine import tensor
  515. import megengine.functional as F
  516. x = tensor(np.array([2, 4, 6, 8, 7, 5, 3, 1], dtype=np.float32))
  517. top, indices = F.topk(x, 5, descending=False)
  518. print(top.numpy(), indices.numpy())
  519. Outputs:
  520. .. testoutput::
  521. [1. 2. 3. 4. 5.] [7 0 6 1 5]
  522. """
  523. if descending:
  524. k = -k
  525. if kth_only:
  526. mode = "kth_only"
  527. elif no_sort:
  528. mode = "value_idx_nosort"
  529. else:
  530. mode = "value_idx_sorted"
  531. op = builtin.TopK(mode=mode)
  532. if not isinstance(k, Tensor):
  533. k = Const(k, "int32", inp.device, None)
  534. if len(inp.shape) == 1:
  535. if kth_only:
  536. (tns,) = apply(op, expand_dims(inp, 0), k)
  537. # FIXME:
  538. # could use a dedicated kernel
  539. # gradient may be routed to other indices if k-th value is not unique
  540. ind = argmax((tns == inp).astype("int8"))
  541. tns = squeeze(tns, 0)
  542. else:
  543. tns, ind = apply(op, expand_dims(inp, 0), k)
  544. tns = squeeze(tns, 0)
  545. ind = squeeze(ind, 0)
  546. else:
  547. if kth_only:
  548. (tns,) = apply(op, inp, k)
  549. # FIXME: same as above
  550. ind = argmax((expand_dims(tns, 1) == inp).astype("int8"), 1)
  551. else:
  552. tns, ind = apply(op, inp, k)
  553. return tns, ind
  554. def matinv(inp: Tensor) -> Tensor:
  555. r"""Computes the inverse of a batch of matrices; input must has shape [..., n, n].
  556. Args:
  557. inp: input tensor.
  558. Returns:
  559. output tensor.
  560. Examples:
  561. .. testcode::
  562. import numpy as np
  563. from megengine import tensor
  564. import megengine.functional as F
  565. data = tensor([[1.0, 0.0], [1.0, 1.0]])
  566. out = F.matinv(data)
  567. print(out.numpy())
  568. Outputs:
  569. .. testoutput::
  570. [[ 1. 0.]
  571. [-1. 1.]]
  572. """
  573. (result,) = apply(builtin.MatrixInverse(), inp)
  574. return result
  575. def matmul(
  576. inp1: Tensor,
  577. inp2: Tensor,
  578. transpose_a=False,
  579. transpose_b=False,
  580. compute_mode="default",
  581. format="default",
  582. ) -> Tensor:
  583. r"""Performs a matrix multiplication of the matrices ``inp1`` and ``inp2``.
  584. With different inputs dim, this function behaves differently:
  585. * Both 1-D tensor, simply forward to ``dot``.
  586. * Both 2-D tensor, normal matrix multiplication.
  587. * If one input tensor is 1-D, matrix vector multiplication.
  588. * If at least one tensor are 3-dimensional or >3-dimensional, the other tensor should have dim >= 2,
  589. the batched matrix-matrix is returned, and the tensor with smaller dimension will be broadcasted.
  590. For example:
  591. * inp1: `(n, k, m)`, inp2: `(n, m, p)`, return: `(n, k, p)`
  592. * inp1: `(n, k, m)`, inp2: `(m, p)`, return: `(n, k, p)`
  593. * inp1: `(n, j, k, m)`, inp2: `(n, j, m, p)`, return: `(n, j, k, p)`
  594. Args:
  595. inp1: first matrix to be multiplied.
  596. inp2: second matrix to be multiplied.
  597. Returns:
  598. output tensor.
  599. Examples:
  600. .. testcode::
  601. import numpy as np
  602. from megengine import tensor
  603. import megengine.functional as F
  604. data1 = tensor(np.arange(0, 6, dtype=np.float32).reshape(2, 3))
  605. data2 = tensor(np.arange(0, 6, dtype=np.float32).reshape(3, 2))
  606. out = F.matmul(data1, data2)
  607. print(out.numpy())
  608. Outputs:
  609. .. testoutput::
  610. [[10. 13.]
  611. [28. 40.]]
  612. """
  613. return _matmul(inp1, inp2, transpose_a, transpose_b, compute_mode, format)
  614. def dot(inp1: Tensor, inp2: Tensor) -> Tensor:
  615. r"""Computes dot-product of two vectors ``inp1`` and ``inp2``.
  616. inputs must be 1-dimensional or scalar. A scalar input is automatically broadcasted.
  617. Refer to :func:`~.matmul` for more general usage.
  618. Args:
  619. inp1: first vector.
  620. inp2: second vector.
  621. Returns:
  622. output value.
  623. Examples:
  624. .. testcode::
  625. import numpy as np
  626. from megengine import tensor
  627. import megengine.functional as F
  628. data1 = tensor(np.arange(0, 6, dtype=np.float32))
  629. data2 = tensor(np.arange(0, 6, dtype=np.float32))
  630. out = F.dot(data1, data2)
  631. print(out.numpy())
  632. Outputs:
  633. .. testoutput::
  634. 55.
  635. """
  636. op = builtin.Dot()
  637. assert (
  638. inp1.ndim <= 1 and inp2.ndim <= 1
  639. ), "Input tensors for dot must be 1-dimensional or scalar"
  640. (result,) = apply(op, inp1, inp2)
  641. return result
  642. def svd(inp: Tensor, full_matrices=False, compute_uv=True) -> Tensor:
  643. r"""Returns a singular value decomposition ``A = USVh`` of a matrix (or a stack of matrices) ``x`` , where ``U`` is a matrix (or a stack of matrices) with orthonormal columns, ``S`` is a vector of non-negative numbers (or stack of vectors), and ``Vh`` is a matrix (or a stack of matrices) with orthonormal rows.
  644. Args:
  645. x (Tensor): A input real tensor having the shape ``(..., M, N)`` with ``x.ndim >= 2`` .
  646. full_matrices (bool, optional): If ``False`` , ``U`` and ``Vh`` have the shapes ``(..., M, K)`` and ``(..., K, N)`` , respectively, where ``K = min(M, N)`` . If ``True`` , the shapes are ``(..., M, M)`` and ``(..., N, N)`` , respectively. Default: ``False`` .
  647. compute_uv (bool, optional): Whether or not to compute ``U`` and ``Vh`` in addition to ``S`` . Default: ``True`` .
  648. Note:
  649. * naive does not support ``full_matrices`` and ``compute_uv`` as ``True`` .
  650. Returns:
  651. Returns a tuple ( ``U`` , ``S`` , ``Vh`` ), which are SVD factors ``U`` , ``S``, ``Vh`` of input matrix ``x``. ( ``U`` , ``Vh`` only returned when ``compute_uv`` is True).
  652. ``U`` contains matrices orthonormal columns (i.e., the columns are left singular vectors). If ``full_matrices`` is ``True`` , the array must have shape ``(..., M, M)`` . If ``full_matrices`` is ``False`` , the array must have shape ``(..., M, K)`` , where ``K = min(M, N)`` .
  653. Examples:
  654. >>> import numpy as np
  655. >>> x = Tensor(np.random.randn(9, 6))
  656. >>> y = Tensor(np.random.randn(2, 7, 8, 3))
  657. Reconstruction based on reduced SVD, 2D case:
  658. >>> U, S, Vh = F.svd(x, full_matrices=False)
  659. >>> print(U._tuple_shape, S._tuple_shape, Vh._tuple_shape)
  660. (9, 6) (6,) (6, 6)
  661. Reconsturction based on reduced SVD, 4D case:
  662. >>> u, s, vh = F.svd(y, full_matrices=False)
  663. >>> print(u._tuple_shape, s._tuple_shape, vh._tuple_shape)
  664. (2, 7, 8, 3) (2, 7, 3) (2, 7, 3, 3)
  665. """
  666. op = builtin.SVD(full_matrices=full_matrices, compute_uv=compute_uv)
  667. U, S, Vh = apply(op, inp)
  668. return U, S, Vh
  669. def _check_non_finite(inps: Iterable[Tensor], scale=1.0) -> Tensor:
  670. r"""Check whether input contains infinite or nan value.
  671. Args:
  672. inp: a tensor to be checked.
  673. Returns:
  674. a int32 scalar tensor, 0 for False and 1 for True.
  675. """
  676. op = builtin.CheckNonFinite(scale=scale)
  677. oups = apply(op, *inps)
  678. out = oups[-1]
  679. for i in range(len(inps)):
  680. inps[i]._reset(oups[i])
  681. return out