You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

math.py 25 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. import collections
  10. import math
  11. from typing import Iterable, Optional, Sequence, Tuple, Union
  12. from ..core._imperative_rt.core2 import apply, dtype_promotion
  13. from ..core._imperative_rt.ops import SubgraphBuilder as _SubgraphBuilder
  14. from ..core.ops import builtin
  15. from ..core.ops.special import Const
  16. from ..core.tensor.array_method import _matmul
  17. from ..core.tensor.utils import _normalize_axis
  18. from ..tensor import Tensor
  19. from ..utils.deprecation import deprecated_kwargs_default
  20. from .elemwise import clip
  21. from .tensor import expand_dims, squeeze
  22. __all__ = [
  23. "argmax",
  24. "argmin",
  25. "argsort",
  26. "dot",
  27. "isinf",
  28. "isnan",
  29. "matinv",
  30. "matmul",
  31. "max",
  32. "mean",
  33. "min",
  34. "norm",
  35. "normalize",
  36. "prod",
  37. "sign",
  38. "sort",
  39. "std",
  40. "sum",
  41. "svd",
  42. "topk",
  43. "var",
  44. ]
  45. def isnan(inp: Tensor) -> Tensor:
  46. r"""Returns a new tensor representing if each element is ``NaN`` or not.
  47. Args:
  48. inp: input tensor.
  49. Returns:
  50. result tensor.
  51. Examples:
  52. .. testcode::
  53. from megengine import tensor
  54. import megengine.functional as F
  55. x = tensor([1, float("nan"), 0])
  56. print(F.isnan(x).numpy())
  57. Outputs:
  58. .. testoutput::
  59. [False True False]
  60. """
  61. return inp != inp
  62. def isinf(inp: Tensor) -> Tensor:
  63. r"""Returns a new tensor representing if each element is ``Inf`` or not.
  64. Args:
  65. inp: input tensor.
  66. Returns:
  67. result tensor.
  68. Examples:
  69. .. testcode::
  70. from megengine import tensor
  71. import megengine.functional as F
  72. x = tensor([1, float("inf"), 0])
  73. print(F.isinf(x).numpy())
  74. Outputs:
  75. .. testoutput::
  76. [False True False]
  77. """
  78. return abs(inp).astype("float32") == float("inf")
  79. def sign(inp: Tensor):
  80. r"""Returns a new tensor representing the sign of each element in input tensor.
  81. Args:
  82. inp: Tensor:
  83. Returns:
  84. the sign of input tensor.
  85. Examples:
  86. .. testcode::
  87. from megengine import tensor
  88. import megengine.functional as F
  89. x = tensor([1, -1, 0])
  90. print(F.sign(x).numpy())
  91. Outputs:
  92. .. testoutput::
  93. [ 1 -1 0]
  94. """
  95. return (inp > 0).astype(inp.dtype) - (inp < 0).astype(inp.dtype)
  96. def sum(
  97. inp: Tensor,
  98. axis: Optional[Union[int, Sequence[int]]] = None,
  99. keepdims: bool = False,
  100. ) -> Tensor:
  101. r"""Returns the sum of input tensor along given axis. If axis is a list of dimensions,
  102. reduce over all of them.
  103. Args:
  104. inp: input tensor.
  105. axis: dimension to reduce. If None, all dimensions will be reduced.
  106. Default: None
  107. keepdims: whether the output tensor has axis retained or not.
  108. Default: False
  109. Returns:
  110. output tensor.
  111. Examples:
  112. .. testcode::
  113. import numpy as np
  114. from megengine import tensor
  115. import megengine.functional as F
  116. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
  117. out = F.sum(x)
  118. print(out.numpy())
  119. Outputs:
  120. .. testoutput::
  121. 21
  122. """
  123. return inp.sum(axis=axis, keepdims=keepdims)
  124. def prod(
  125. inp: Tensor, axis: Optional[Union[int, Sequence[int]]] = None, keepdims=False
  126. ) -> Tensor:
  127. r"""Returns the product of input tensor along given axis. If axis is a list of dimensions,
  128. reduce over all of them.
  129. Args:
  130. inp: input tensor.
  131. axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  132. keepdims: whether the output tensor has axis retained or not. Default: False
  133. Returns:
  134. output tensor.
  135. Examples:
  136. .. testcode::
  137. import numpy as np
  138. from megengine import tensor
  139. import megengine.functional as F
  140. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
  141. out = F.prod(x)
  142. print(out.numpy())
  143. Outputs:
  144. .. testoutput::
  145. 720
  146. """
  147. return inp.prod(axis=axis, keepdims=keepdims)
  148. def mean(
  149. inp: Tensor,
  150. axis: Optional[Union[int, Sequence[int]]] = None,
  151. keepdims: bool = False,
  152. ) -> Tensor:
  153. r"""Returns the mean value of input tensor along
  154. given axis. If axis is a list of dimensions,
  155. reduce over all of them.
  156. Args:
  157. inp: input tensor.
  158. axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  159. keepdims: whether the output tensor has axis retained or not. Default: False
  160. Returns:
  161. output tensor.
  162. Examples:
  163. .. testcode::
  164. import numpy as np
  165. from megengine import tensor
  166. import megengine.functional as F
  167. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
  168. out = F.mean(x)
  169. print(out.numpy())
  170. Outputs:
  171. .. testoutput::
  172. 3.5
  173. """
  174. return inp.mean(axis=axis, keepdims=keepdims)
  175. def var(
  176. inp: Tensor,
  177. axis: Optional[Union[int, Sequence[int]]] = None,
  178. keepdims: bool = False,
  179. ) -> Tensor:
  180. r"""Returns the variance value of input tensor along
  181. given axis. If axis is a list of dimensions,
  182. reduce over all of them.
  183. Args:
  184. inp: input tensor.
  185. axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  186. keepdims: whether the output tensor has axis retained or not. Default: False
  187. Returns:
  188. output tensor.
  189. Examples:
  190. .. testcode::
  191. import numpy as np
  192. from megengine import tensor
  193. import megengine.functional as F
  194. data = tensor(np.arange(1, 7, dtype=np.float32).reshape(2, 3))
  195. out = F.var(data)
  196. print(out.numpy().round(decimals=4))
  197. Outputs:
  198. .. testoutput::
  199. 2.9167
  200. """
  201. if axis is None:
  202. m = mean(inp, axis=axis, keepdims=False)
  203. else:
  204. m = mean(inp, axis=axis, keepdims=True)
  205. v = inp - m
  206. return mean(v ** 2, axis=axis, keepdims=keepdims)
  207. def std(
  208. inp: Tensor,
  209. axis: Optional[Union[int, Sequence[int]]] = None,
  210. keepdims: bool = False,
  211. ) -> Tensor:
  212. r"""Returns the standard deviation of input tensor along
  213. given axis. If axis is a list of dimensions,
  214. reduce over all of them.
  215. Args:
  216. inp: input tensor.
  217. axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  218. keepdims: whether the output tensor has axis retained or not. Default: False
  219. Returns:
  220. output tensor.
  221. Examples:
  222. .. testcode::
  223. import numpy as np
  224. from megengine import tensor
  225. import megengine.functional as F
  226. data = tensor(np.arange(1, 7, dtype=np.float32).reshape(2, 3))
  227. out = F.std(data, axis=1)
  228. print(out.numpy().round(decimals=4))
  229. Outputs:
  230. .. testoutput::
  231. [0.8165 0.8165]
  232. """
  233. return var(inp, axis=axis, keepdims=keepdims) ** 0.5
  234. def min(
  235. inp: Tensor,
  236. axis: Optional[Union[int, Sequence[int]]] = None,
  237. keepdims: bool = False,
  238. ) -> Tensor:
  239. r"""Returns the min value of input tensor along
  240. given axis. If axis is a list of dimensions,
  241. reduce over all of them.
  242. Args:
  243. inp: input tensor.
  244. axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  245. keepdims: whether the output tensor has axis retained or not. Default: False
  246. Returns:
  247. output tensor.
  248. Examples:
  249. .. testcode::
  250. import numpy as np
  251. from megengine import tensor
  252. import megengine.functional as F
  253. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
  254. out = F.min(x)
  255. print(out.numpy())
  256. Outputs:
  257. .. testoutput::
  258. 1
  259. """
  260. return inp.min(axis=axis, keepdims=keepdims)
  261. def max(
  262. inp: Tensor,
  263. axis: Optional[Union[int, Sequence[int]]] = None,
  264. keepdims: bool = False,
  265. ) -> Tensor:
  266. r"""Returns the max value of the input tensor along
  267. given axis. If axis is a list of dimensions,
  268. reduce over all of them.
  269. Args:
  270. inp: input tensor.
  271. axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  272. keepdims: whether the output tensor has axis retained or not. Default: False
  273. Returns:
  274. output tensor.
  275. Examples:
  276. .. testcode::
  277. import numpy as np
  278. from megengine import tensor
  279. import megengine.functional as F
  280. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
  281. out = F.max(x)
  282. print(out.numpy())
  283. Outputs:
  284. .. testoutput::
  285. 6
  286. """
  287. return inp.max(axis=axis, keepdims=keepdims)
  288. def norm(
  289. inp: Tensor, ord: float = None, axis: int = None, keepdims=False,
  290. ):
  291. r"""Calculates ``p``-norm of input tensor along
  292. given axis.
  293. Args:
  294. inp: input tensor.
  295. ord: power of value applied to inp. Default: 2
  296. axis: dimension to reduce. If None, input must be a vector. Default: None
  297. keepdims: whether the output tensor has axis retained or not. Default: False
  298. Returns:
  299. output tensor.
  300. Examples:
  301. .. testcode::
  302. import numpy as np
  303. from megengine import tensor
  304. import megengine.functional as F
  305. x = tensor(np.arange(-3, 3, dtype=np.float32))
  306. out = F.norm(x)
  307. print(out.numpy().round(decimals=4))
  308. Outputs:
  309. .. testoutput::
  310. 4.3589
  311. """
  312. if axis is None:
  313. if inp.ndim != 1:
  314. raise TypeError("axis is required unless input is a vector")
  315. if ord is None:
  316. ord = 2
  317. if ord == 0:
  318. return sum(inp != 0, axis=axis, keepdims=keepdims)
  319. if ord == math.inf:
  320. return max(abs(inp))
  321. if ord == -math.inf:
  322. return min(abs(inp))
  323. return sum(abs(inp) ** ord, axis=axis, keepdims=keepdims) ** (1.0 / ord)
  324. def argmin(
  325. inp: Tensor,
  326. axis: Optional[Union[int, Sequence[int]]] = None,
  327. keepdims: bool = False,
  328. ) -> Tensor:
  329. r"""Returns the indices of the minimum values along
  330. given axis. If axis is a list of dimensions,
  331. reduce over all of them.
  332. Args:
  333. inp: input tensor.
  334. axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  335. keepdims: whether the output tensor has axis retained or not. Default: False
  336. Returns:
  337. output tensor.
  338. Examples:
  339. .. testcode::
  340. import numpy as np
  341. from megengine import tensor
  342. import megengine.functional as F
  343. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
  344. out = F.argmin(x)
  345. print(out.numpy())
  346. Outputs:
  347. .. testoutput::
  348. 0
  349. """
  350. if axis is None:
  351. assert not keepdims, "can not set axis=None and keepdims=True"
  352. inp = inp.flatten()
  353. axis = 0
  354. axis = _normalize_axis(inp.ndim, axis, reverse=True)
  355. if isinstance(axis, collections.abc.Iterable):
  356. for ai in axis:
  357. op = builtin.Argmin(axis=ai)
  358. (inp,) = apply(op, inp)
  359. if not keepdims:
  360. inp = squeeze(inp, ai)
  361. return inp
  362. op = builtin.Argmin(axis=axis)
  363. (result,) = apply(op, inp)
  364. if not keepdims:
  365. result = squeeze(result, axis)
  366. return result
  367. def argmax(
  368. inp: Tensor,
  369. axis: Optional[Union[int, Sequence[int]]] = None,
  370. keepdims: bool = False,
  371. ) -> Tensor:
  372. r"""Returns the indices of the maximum values along
  373. given axis. If axis is a list of dimensions,
  374. reduce over all of them.
  375. Args:
  376. inp: input tensor.
  377. axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  378. keepdims: whether the output tensor has axis retained or not. Default: False
  379. Returns:
  380. output tensor.
  381. Examples:
  382. .. testcode::
  383. import numpy as np
  384. from megengine import tensor
  385. import megengine.functional as F
  386. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
  387. out = F.argmax(x)
  388. print(out.numpy())
  389. Outputs:
  390. .. testoutput::
  391. 5
  392. """
  393. if axis is None:
  394. assert not keepdims, "can not set axis=None and keepdims=True"
  395. inp = inp.flatten()
  396. axis = 0
  397. axis = _normalize_axis(inp.ndim, axis, reverse=True)
  398. if isinstance(axis, collections.abc.Iterable):
  399. for ai in axis:
  400. op = builtin.Argmax(axis=ai)
  401. (inp,) = apply(op, inp)
  402. if not keepdims:
  403. inp = squeeze(inp, ai)
  404. return inp
  405. op = builtin.Argmax(axis=axis)
  406. (result,) = apply(op, inp)
  407. if not keepdims:
  408. result = squeeze(result, axis)
  409. return result
  410. def normalize(
  411. inp: Tensor, ord: float = None, axis: int = None, eps: float = 1e-12,
  412. ) -> Tensor:
  413. r"""Performs :math:`L_p` normalization of input tensor along
  414. given axis.
  415. For a tensor of shape :math:`(n_0, ..., n_{dim}, ..., n_k)`, each
  416. :math:`n_{dim}` -element vector :math:`v` along dimension :attr:`axis` is transformed as:
  417. .. math::
  418. v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}.
  419. Args:
  420. inp: input tensor.
  421. ord: power of value applied to input tensor. Default: 2
  422. axis: dimension to reduce.If None, input must be a vector. Default: None
  423. eps: a small value to avoid division by zero. Default: 1e-12
  424. Returns:
  425. normalized output tensor.
  426. """
  427. if axis is None:
  428. return inp / clip(norm(inp, ord, axis), lower=eps)
  429. else:
  430. return inp / clip(norm(inp, ord, axis, keepdims=True), lower=eps)
  431. def argsort(inp: Tensor, descending: bool = False) -> Tensor:
  432. r"""Returns the indices that would sort the input tensor.
  433. Args:
  434. inp: input tensor. If it's 2d, the result would be array of indices show how to sort each row in the input tensor.
  435. descending: sort in descending order, where the largest comes first. Default: False
  436. inp: Tensor:
  437. descending: bool:
  438. Returns:
  439. indices of int32 indicates how to sort the input.
  440. Examples:
  441. .. testcode::
  442. import numpy as np
  443. from megengine import tensor
  444. import megengine.functional as F
  445. x = tensor(np.array([1,2], dtype=np.float32))
  446. indices = F.argsort(x)
  447. print(indices.numpy())
  448. Outputs:
  449. .. testoutput::
  450. [0 1]
  451. """
  452. assert len(inp.shape) <= 2, "Input should be 1d or 2d"
  453. if descending:
  454. order = "descending"
  455. else:
  456. order = "ascending"
  457. op = builtin.Argsort(order=order)
  458. if len(inp.shape) == 1:
  459. inp = inp.reshape(1, -1)
  460. _, result = apply(op, inp)
  461. return result[0]
  462. _, result = apply(op, inp)
  463. return result
  464. def sort(inp: Tensor, descending: bool = False) -> Tuple[Tensor, Tensor]:
  465. r"""Returns sorted tensor and the indices would sort the input tensor.
  466. Args:
  467. inp: input tensor. If it's 2d, the result would be sorted by row.
  468. descending: sort in descending order, where the largest comes first. Default: False
  469. Returns:
  470. tuple of two tensors `(sorted_tensor, indices_of_int32)`.
  471. Examples:
  472. .. testcode::
  473. import numpy as np
  474. from megengine import tensor
  475. import megengine.functional as F
  476. x = tensor(np.array([1,2], dtype=np.float32))
  477. out, indices = F.sort(x)
  478. print(out.numpy())
  479. Outputs:
  480. .. testoutput::
  481. [1. 2.]
  482. """
  483. assert len(inp.shape) <= 2, "Input should be 1d or 2d"
  484. if descending:
  485. order = "descending"
  486. else:
  487. order = "ascending"
  488. op = builtin.Argsort(order=order)
  489. if len(inp.shape) == 1:
  490. inp = inp.reshape(1, -1)
  491. tns, ind = apply(op, inp)
  492. return tns[0], ind[0]
  493. tns, ind = apply(op, inp)
  494. return tns, ind
  495. @deprecated_kwargs_default("1.12", "descending", 3)
  496. def topk(
  497. inp: Tensor,
  498. k: int,
  499. descending: bool = False,
  500. kth_only: bool = False,
  501. no_sort: bool = False,
  502. ) -> Tuple[Tensor, Tensor]:
  503. r"""Selects the ``Top-K`` (by default) smallest elements of 2d matrix by row.
  504. Args:
  505. inp: input tensor. If input tensor is 2d, each row will be sorted.
  506. k: number of elements needed.
  507. descending: if True, return the largest elements instead. Default: False
  508. kth_only: if True, only the k-th element will be returned. Default: False
  509. no_sort: if True, the returned elements can be unordered. Default: False
  510. Returns:
  511. tuple of two tensors ``(topk_tensor, indices_of_int32)``
  512. Examples:
  513. .. testcode::
  514. import numpy as np
  515. from megengine import tensor
  516. import megengine.functional as F
  517. x = tensor(np.array([2, 4, 6, 8, 7, 5, 3, 1], dtype=np.float32))
  518. top, indices = F.topk(x, 5, descending=False)
  519. print(top.numpy(), indices.numpy())
  520. Outputs:
  521. .. testoutput::
  522. [1. 2. 3. 4. 5.] [7 0 6 1 5]
  523. """
  524. if descending:
  525. k = -k
  526. if kth_only:
  527. mode = "kth_only"
  528. elif no_sort:
  529. mode = "value_idx_nosort"
  530. else:
  531. mode = "value_idx_sorted"
  532. op = builtin.TopK(mode=mode)
  533. if not isinstance(k, Tensor):
  534. (k,) = Const(k, dtype="int32", device=inp.device)()
  535. if len(inp.shape) == 1:
  536. if kth_only:
  537. (tns,) = apply(op, expand_dims(inp, 0), k)
  538. # FIXME:
  539. # could use a dedicated kernel
  540. # gradient may be routed to other indices if k-th value is not unique
  541. ind = argmax((tns == inp).astype("int8"))
  542. tns = squeeze(tns, 0)
  543. else:
  544. tns, ind = apply(op, expand_dims(inp, 0), k)
  545. tns = squeeze(tns, 0)
  546. ind = squeeze(ind, 0)
  547. else:
  548. if kth_only:
  549. (tns,) = apply(op, inp, k)
  550. # FIXME: same as above
  551. ind = argmax((expand_dims(tns, 1) == inp).astype("int8"), 1)
  552. else:
  553. tns, ind = apply(op, inp, k)
  554. return tns, ind
  555. def matinv(inp: Tensor) -> Tensor:
  556. r"""Computes the inverse of a batch of matrices; input must has shape [..., n, n].
  557. Args:
  558. inp: input tensor.
  559. Returns:
  560. output tensor.
  561. Examples:
  562. .. testcode::
  563. import numpy as np
  564. from megengine import tensor
  565. import megengine.functional as F
  566. data = tensor([[1.0, 0.0], [1.0, 1.0]])
  567. out = F.matinv(data)
  568. print(out.numpy())
  569. Outputs:
  570. .. testoutput::
  571. [[ 1. 0.]
  572. [-1. 1.]]
  573. """
  574. (result,) = apply(builtin.MatrixInverse(), inp)
  575. return result
  576. def matmul(
  577. inp1: Tensor,
  578. inp2: Tensor,
  579. transpose_a=False,
  580. transpose_b=False,
  581. compute_mode="default",
  582. format="default",
  583. ) -> Tensor:
  584. r"""Performs a matrix multiplication of the matrices ``inp1`` and ``inp2``.
  585. With different inputs dim, this function behaves differently:
  586. * Both 1-D tensor, simply forward to ``dot``.
  587. * Both 2-D tensor, normal matrix multiplication.
  588. * If one input tensor is 1-D, matrix vector multiplication.
  589. * If at least one tensor are 3-dimensional or >3-dimensional, the other tensor should have dim >= 2,
  590. the batched matrix-matrix is returned, and the tensor with smaller dimension will be broadcasted.
  591. For example:
  592. * inp1: `(n, k, m)`, inp2: `(n, m, p)`, return: `(n, k, p)`
  593. * inp1: `(n, k, m)`, inp2: `(m, p)`, return: `(n, k, p)`
  594. * inp1: `(n, j, k, m)`, inp2: `(n, j, m, p)`, return: `(n, j, k, p)`
  595. Args:
  596. inp1: first matrix to be multiplied.
  597. inp2: second matrix to be multiplied.
  598. Returns:
  599. output tensor.
  600. Examples:
  601. .. testcode::
  602. import numpy as np
  603. from megengine import tensor
  604. import megengine.functional as F
  605. data1 = tensor(np.arange(0, 6, dtype=np.float32).reshape(2, 3))
  606. data2 = tensor(np.arange(0, 6, dtype=np.float32).reshape(3, 2))
  607. out = F.matmul(data1, data2)
  608. print(out.numpy())
  609. Outputs:
  610. .. testoutput::
  611. [[10. 13.]
  612. [28. 40.]]
  613. """
  614. return _matmul(inp1, inp2, transpose_a, transpose_b, compute_mode, format)
  615. def dot(inp1: Tensor, inp2: Tensor) -> Tensor:
  616. r"""Computes dot-product of two vectors ``inp1`` and ``inp2``.
  617. inputs must be 1-dimensional or scalar. A scalar input is automatically broadcasted.
  618. Refer to :func:`~.matmul` for more general usage.
  619. Args:
  620. inp1: first vector.
  621. inp2: second vector.
  622. Returns:
  623. output value.
  624. Examples:
  625. .. testcode::
  626. import numpy as np
  627. from megengine import tensor
  628. import megengine.functional as F
  629. data1 = tensor(np.arange(0, 6, dtype=np.float32))
  630. data2 = tensor(np.arange(0, 6, dtype=np.float32))
  631. out = F.dot(data1, data2)
  632. print(out.numpy())
  633. Outputs:
  634. .. testoutput::
  635. 55.
  636. """
  637. op = builtin.Dot()
  638. assert (
  639. inp1.ndim <= 1 and inp2.ndim <= 1
  640. ), "Input tensors for dot must be 1-dimensional or scalar"
  641. (result,) = apply(op, inp1, inp2)
  642. return result
  643. def svd(inp: Tensor, full_matrices=False, compute_uv=True) -> Tensor:
  644. r"""Returns a singular value decomposition ``A = USVh`` of a matrix (or a stack of matrices) ``x`` , where ``U`` is a matrix (or a stack of matrices) with orthonormal columns, ``S`` is a vector of non-negative numbers (or stack of vectors), and ``Vh`` is a matrix (or a stack of matrices) with orthonormal rows.
  645. Args:
  646. x (Tensor): A input real tensor having the shape ``(..., M, N)`` with ``x.ndim >= 2`` .
  647. full_matrices (bool, optional): If ``False`` , ``U`` and ``Vh`` have the shapes ``(..., M, K)`` and ``(..., K, N)`` , respectively, where ``K = min(M, N)`` . If ``True`` , the shapes are ``(..., M, M)`` and ``(..., N, N)`` , respectively. Default: ``False`` .
  648. compute_uv (bool, optional): Whether or not to compute ``U`` and ``Vh`` in addition to ``S`` . Default: ``True`` .
  649. Note:
  650. * naive does not support ``full_matrices`` and ``compute_uv`` as ``True`` .
  651. Returns:
  652. Returns a tuple ( ``U`` , ``S`` , ``Vh`` ), which are SVD factors ``U`` , ``S``, ``Vh`` of input matrix ``x``. ( ``U`` , ``Vh`` only returned when ``compute_uv`` is True).
  653. ``U`` contains matrices orthonormal columns (i.e., the columns are left singular vectors). If ``full_matrices`` is ``True`` , the array must have shape ``(..., M, M)`` . If ``full_matrices`` is ``False`` , the array must have shape ``(..., M, K)`` , where ``K = min(M, N)`` .
  654. Examples:
  655. >>> import numpy as np
  656. >>> x = Tensor(np.random.randn(9, 6))
  657. >>> y = Tensor(np.random.randn(2, 7, 8, 3))
  658. Reconstruction based on reduced SVD, 2D case:
  659. >>> U, S, Vh = F.svd(x, full_matrices=False)
  660. >>> print(U._tuple_shape, S._tuple_shape, Vh._tuple_shape)
  661. (9, 6) (6,) (6, 6)
  662. Reconsturction based on reduced SVD, 4D case:
  663. >>> u, s, vh = F.svd(y, full_matrices=False)
  664. >>> print(u._tuple_shape, s._tuple_shape, vh._tuple_shape)
  665. (2, 7, 8, 3) (2, 7, 3) (2, 7, 3, 3)
  666. """
  667. op = builtin.SVD(full_matrices=full_matrices, compute_uv=compute_uv)
  668. U, S, Vh = apply(op, inp)
  669. return U, S, Vh
  670. def _check_non_finite(inps: Iterable[Tensor], scale=1.0) -> Tensor:
  671. r"""Check whether input contains infinite or nan value.
  672. Args:
  673. inp: a tensor to be checked.
  674. Returns:
  675. a int32 scalar tensor, 0 for False and 1 for True.
  676. """
  677. op = builtin.CheckNonFinite(scale=scale)
  678. oups = apply(op, *inps)
  679. out = oups[-1]
  680. for i in range(len(inps)):
  681. inps[i]._reset(oups[i])
  682. return out