You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

math.py 18 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. import collections
  10. import functools
  11. import math
  12. import numbers
  13. from typing import Optional, Sequence, Tuple, Union
  14. from ..core._imperative_rt.core2 import apply
  15. from ..core.ops import builtin
  16. from ..core.ops.special import Const
  17. from ..core.tensor import utils
  18. from ..tensor import Tensor
  19. from .elemwise import clip, exp, log, log1p
  20. from .tensor import reshape, squeeze
  21. __all__ = [
  22. "argmax",
  23. "argmin",
  24. "argsort",
  25. "isinf",
  26. "isnan",
  27. "max",
  28. "mean",
  29. "min",
  30. "norm",
  31. "normalize",
  32. "prod",
  33. "sign",
  34. "sort",
  35. "std",
  36. "sum",
  37. "topk",
  38. "var",
  39. ]
  40. def isnan(inp: Tensor) -> Tensor:
  41. r"""
  42. Returns a new tensor representing if each element is ``NaN`` or not.
  43. :param inp: input tensor.
  44. :return: result tensor.
  45. Examples:
  46. .. testcode::
  47. from megengine import tensor
  48. import megengine.functional as F
  49. x = tensor([1, float("nan"), 0])
  50. print(F.isnan(x).numpy())
  51. Outputs:
  52. .. testoutput::
  53. [False True False]
  54. """
  55. return inp != inp
  56. def isinf(inp: Tensor) -> Tensor:
  57. r"""
  58. Returns a new tensor representing if each element is ``Inf`` or not.
  59. :param inp: input tensor.
  60. :return: result tensor.
  61. Examples:
  62. .. testcode::
  63. from megengine import tensor
  64. import megengine.functional as F
  65. x = tensor([1, float("inf"), 0])
  66. print(F.isinf(x).numpy())
  67. Outputs:
  68. .. testoutput::
  69. [False True False]
  70. """
  71. return abs(inp).astype("float32") == float("inf")
  72. def sign(inp: Tensor):
  73. r"""
  74. Returns a new tensor representing the sign of each element in input tensor.
  75. :param: input tensor.
  76. :return: the sign of input tensor.
  77. Examples:
  78. .. testcode::
  79. from megengine import tensor
  80. import megengine.functional as F
  81. x = tensor([1, -1, 0])
  82. print(F.sign(x).numpy())
  83. Outputs:
  84. .. testoutput::
  85. [ 1 -1 0]
  86. """
  87. return (inp > 0).astype(inp.dtype) - (inp < 0).astype(inp.dtype)
  88. def sum(
  89. inp: Tensor,
  90. axis: Optional[Union[int, Sequence[int]]] = None,
  91. keepdims: bool = False,
  92. ) -> Tensor:
  93. r"""
  94. Returns the sum of input tensor along given axis. If axis is a list of dimensions,
  95. reduce over all of them.
  96. :param inp: input tensor.
  97. :param axis: dimension to reduce. If None, all dimensions will be reduced.
  98. Default: None
  99. :param keepdims: whether the output tensor has axis retained or not.
  100. Default: False
  101. :return: output tensor.
  102. Examples:
  103. .. testcode::
  104. import numpy as np
  105. from megengine import tensor
  106. import megengine.functional as F
  107. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
  108. out = F.sum(x)
  109. print(out.numpy())
  110. Outputs:
  111. .. testoutput::
  112. 21
  113. """
  114. return inp.sum(axis=axis, keepdims=keepdims)
  115. def prod(
  116. inp: Tensor, axis: Optional[Union[int, Sequence[int]]] = None, keepdims=False
  117. ) -> Tensor:
  118. r"""
  119. Returns the product of input tensor along given axis. If axis is a list of dimensions,
  120. reduce over all of them.
  121. :param inp: input tensor.
  122. :param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  123. :param keepdims: whether the output tensor has axis retained or not. Default: False
  124. :return: output tensor.
  125. Examples:
  126. .. testcode::
  127. import numpy as np
  128. from megengine import tensor
  129. import megengine.functional as F
  130. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
  131. out = F.prod(x)
  132. print(out.numpy())
  133. Outputs:
  134. .. testoutput::
  135. 720
  136. """
  137. return inp.prod(axis=axis, keepdims=keepdims)
  138. def mean(
  139. inp: Tensor,
  140. axis: Optional[Union[int, Sequence[int]]] = None,
  141. keepdims: bool = False,
  142. ) -> Tensor:
  143. """
  144. Returns the mean value of input tensor along
  145. given axis. If axis is a list of dimensions,
  146. reduce over all of them.
  147. :param inp: input tensor.
  148. :param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  149. :param keepdims: whether the output tensor has axis retained or not. Default: False
  150. :return: output tensor.
  151. Examples:
  152. .. testcode::
  153. import numpy as np
  154. from megengine import tensor
  155. import megengine.functional as F
  156. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
  157. out = F.mean(x)
  158. print(out.numpy())
  159. Outputs:
  160. .. testoutput::
  161. 3.5
  162. """
  163. return inp.mean(axis=axis, keepdims=keepdims)
  164. def var(
  165. inp: Tensor,
  166. axis: Optional[Union[int, Sequence[int]]] = None,
  167. keepdims: bool = False,
  168. ) -> Tensor:
  169. """
  170. Returns the variance value of input tensor along
  171. given axis. If axis is a list of dimensions,
  172. reduce over all of them.
  173. :param inp: input tensor.
  174. :param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  175. :param keepdims: whether the output tensor has axis retained or not. Default: False
  176. :return: output tensor.
  177. Examples:
  178. .. testcode::
  179. import numpy as np
  180. from megengine import tensor
  181. import megengine.functional as F
  182. data = tensor(np.arange(1, 7, dtype=np.float32).reshape(2, 3))
  183. out = F.var(data)
  184. print(out.numpy().round(decimals=4))
  185. Outputs:
  186. .. testoutput::
  187. 2.9167
  188. """
  189. if axis is None:
  190. m = mean(inp, axis=axis, keepdims=False)
  191. else:
  192. m = mean(inp, axis=axis, keepdims=True)
  193. v = inp - m
  194. return mean(v ** 2, axis=axis, keepdims=keepdims)
  195. def std(
  196. inp: Tensor,
  197. axis: Optional[Union[int, Sequence[int]]] = None,
  198. keepdims: bool = False,
  199. ) -> Tensor:
  200. """
  201. Returns the standard deviation of input tensor along
  202. given axis. If axis is a list of dimensions,
  203. reduce over all of them.
  204. :param inp: input tensor.
  205. :param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  206. :param keepdims: whether the output tensor has axis retained or not. Default: False
  207. :return: output tensor.
  208. Examples:
  209. .. testcode::
  210. import numpy as np
  211. from megengine import tensor
  212. import megengine.functional as F
  213. data = tensor(np.arange(1, 7, dtype=np.float32).reshape(2, 3))
  214. out = F.std(data, axis=1)
  215. print(out.numpy().round(decimals=4))
  216. Outputs:
  217. .. testoutput::
  218. [0.8165 0.8165]
  219. """
  220. return var(inp, axis=axis, keepdims=keepdims) ** 0.5
  221. def min(
  222. inp: Tensor,
  223. axis: Optional[Union[int, Sequence[int]]] = None,
  224. keepdims: bool = False,
  225. ) -> Tensor:
  226. r"""
  227. Returns the min value of input tensor along
  228. given axis. If axis is a list of dimensions,
  229. reduce over all of them.
  230. :param inp: input tensor.
  231. :param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  232. :param keepdims: whether the output tensor has axis retained or not. Default: False
  233. :return: output tensor.
  234. Examples:
  235. .. testcode::
  236. import numpy as np
  237. from megengine import tensor
  238. import megengine.functional as F
  239. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
  240. out = F.min(x)
  241. print(out.numpy())
  242. Outputs:
  243. .. testoutput::
  244. 1
  245. """
  246. return inp.min(axis=axis, keepdims=keepdims)
  247. def max(
  248. inp: Tensor,
  249. axis: Optional[Union[int, Sequence[int]]] = None,
  250. keepdims: bool = False,
  251. ) -> Tensor:
  252. r"""
  253. Returns the max value of the input tensor along
  254. given axis. If axis is a list of dimensions,
  255. reduce over all of them.
  256. :param inp: input tensor.
  257. :param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  258. :param keepdims: whether the output tensor has axis retained or not. Default: False
  259. :return: output tensor.
  260. Examples:
  261. .. testcode::
  262. import numpy as np
  263. from megengine import tensor
  264. import megengine.functional as F
  265. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
  266. out = F.max(x)
  267. print(out.numpy())
  268. Outputs:
  269. .. testoutput::
  270. 6
  271. """
  272. return inp.max(axis=axis, keepdims=keepdims)
  273. def norm(
  274. inp: Tensor, ord: float = None, axis: int = None, keepdims=False,
  275. ):
  276. """
  277. Calculates ``p``-norm of input tensor along
  278. given axis.
  279. :param inp: input tensor.
  280. :param ord: power of value applied to inp. Default: 2
  281. :param axis: dimension to reduce. If None, input must be a vector. Default: None
  282. :param keepdims: whether the output tensor has axis retained or not. Default: False
  283. :return: output tensor.
  284. Examples:
  285. .. testcode::
  286. import numpy as np
  287. from megengine import tensor
  288. import megengine.functional as F
  289. x = tensor(np.arange(-3, 3, dtype=np.float32))
  290. out = F.norm(x)
  291. print(out.numpy().round(decimals=4))
  292. Outputs:
  293. .. testoutput::
  294. 4.3589
  295. """
  296. if axis is None:
  297. if inp.ndim != 1:
  298. raise TypeError("axis is required unless input is a vector")
  299. if ord is None:
  300. ord = 2
  301. if ord == 0:
  302. return sum(inp != 0, axis=axis, keepdims=keepdims)
  303. if ord == math.inf:
  304. return max(abs(inp))
  305. if ord == -math.inf:
  306. return min(abs(inp))
  307. return sum(abs(inp) ** ord, axis=axis, keepdims=keepdims) ** (1.0 / ord)
  308. def argmin(
  309. inp: Tensor,
  310. axis: Optional[Union[int, Sequence[int]]] = None,
  311. keepdims: bool = False,
  312. ) -> Tensor:
  313. r"""
  314. Returns the indices of the minimum values along
  315. given axis. If axis is a list of dimensions,
  316. reduce over all of them.
  317. :param inp: input tensor.
  318. :param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  319. :param keepdims: whether the output tensor has axis retained or not. Default: False
  320. :return: output tensor.
  321. Examples:
  322. .. testcode::
  323. import numpy as np
  324. from megengine import tensor
  325. import megengine.functional as F
  326. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
  327. out = F.argmin(x)
  328. print(out.numpy())
  329. Outputs:
  330. .. testoutput::
  331. 0
  332. """
  333. if isinstance(axis, collections.abc.Iterable):
  334. axis = list(axis)
  335. axis.sort(reverse=True)
  336. for ai in axis:
  337. op = builtin.Argmin(axis=ai)
  338. (inp,) = apply(op, inp)
  339. if not keepdims:
  340. inp = squeeze(inp, ai)
  341. return inp
  342. if axis is None:
  343. assert not keepdims, "can not set axis=None and keepdims=True"
  344. inp = inp.flatten()
  345. axis = 0
  346. op = builtin.Argmin(axis=axis)
  347. (result,) = apply(op, inp)
  348. if not keepdims:
  349. result = squeeze(result, axis)
  350. return result
  351. def argmax(
  352. inp: Tensor,
  353. axis: Optional[Union[int, Sequence[int]]] = None,
  354. keepdims: bool = False,
  355. ) -> Tensor:
  356. r"""
  357. Returns the indices of the maximum values along
  358. given axis. If axis is a list of dimensions,
  359. reduce over all of them.
  360. :param inp: input tensor.
  361. :param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  362. :param keepdims: whether the output tensor has axis retained or not. Default: False
  363. :return: output tensor.
  364. Examples:
  365. .. testcode::
  366. import numpy as np
  367. from megengine import tensor
  368. import megengine.functional as F
  369. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
  370. out = F.argmax(x)
  371. print(out.numpy())
  372. Outputs:
  373. .. testoutput::
  374. 5
  375. """
  376. if isinstance(axis, collections.abc.Iterable):
  377. axis = list(axis)
  378. axis.sort(reverse=True)
  379. for ai in axis:
  380. op = builtin.Argmax(axis=ai)
  381. (inp,) = apply(op, inp)
  382. if not keepdims:
  383. inp = squeeze(inp, ai)
  384. return inp
  385. if axis is None:
  386. assert not keepdims, "can not set axis=None and keepdims=True"
  387. inp = inp.flatten()
  388. axis = 0
  389. op = builtin.Argmax(axis=axis)
  390. (result,) = apply(op, inp)
  391. if not keepdims:
  392. result = squeeze(result, axis)
  393. return result
  394. def normalize(
  395. inp: Tensor, ord: float = None, axis: int = None, eps: float = 1e-12,
  396. ) -> Tensor:
  397. r"""
  398. Performs :math:`L_p` normalization of input tensor along
  399. given axis.
  400. For a tensor of shape :math:`(n_0, ..., n_{dim}, ..., n_k)`, each
  401. :math:`n_{dim}` -element vector :math:`v` along dimension :attr:`axis` is transformed as:
  402. .. math::
  403. v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}.
  404. :param inp: input tensor.
  405. :param ord: power of value applied to input tensor. Default: 2
  406. :param axis: dimension to reduce.If None, input must be a vector. Default: None
  407. :param eps: a small value to avoid division by zero. Default: 1e-12
  408. :return: normalized output tensor.
  409. """
  410. if axis is None:
  411. return inp / clip(norm(inp, ord, axis), lower=eps)
  412. else:
  413. return inp / clip(norm(inp, ord, axis, keepdims=True), lower=eps)
  414. def argsort(inp: Tensor, descending: bool = False) -> Tensor:
  415. r"""
  416. Returns the indices that would sort the input tensor.
  417. :param inp: input tensor. If it's 2d, the result would be array of indices show how to sort each row in the input tensor.
  418. :param descending: sort in descending order, where the largest comes first. Default: False
  419. :return: indices of int32 indicates how to sort the input.
  420. Examples:
  421. .. testcode::
  422. import numpy as np
  423. from megengine import tensor
  424. import megengine.functional as F
  425. x = tensor(np.array([1,2], dtype=np.float32))
  426. indices = F.argsort(x)
  427. print(indices.numpy())
  428. Outputs:
  429. .. testoutput::
  430. [0 1]
  431. """
  432. assert len(inp.shape) <= 2, "Input should be 1d or 2d"
  433. if descending:
  434. order = "DESCENDING"
  435. else:
  436. order = "ASCENDING"
  437. op = builtin.Argsort(order=order)
  438. if len(inp.shape) == 1:
  439. inp = inp.reshape(1, -1)
  440. _, result = apply(op, inp)
  441. return result[0]
  442. _, result = apply(op, inp)
  443. return result
  444. def sort(inp: Tensor, descending: bool = False) -> Tuple[Tensor, Tensor]:
  445. r"""
  446. Returns sorted tensor and the indices would sort the input tensor.
  447. :param inp: input tensor. If it's 2d, the result would be sorted by row.
  448. :param descending: sort in descending order, where the largest comes first. Default: False
  449. :return: tuple of two tensors `(sorted_tensor, indices_of_int32)`.
  450. Examples:
  451. .. testcode::
  452. import numpy as np
  453. from megengine import tensor
  454. import megengine.functional as F
  455. x = tensor(np.array([1,2], dtype=np.float32))
  456. out, indices = F.sort(x)
  457. print(out.numpy())
  458. Outputs:
  459. .. testoutput::
  460. [1. 2.]
  461. """
  462. assert len(inp.shape) <= 2, "Input should be 1d or 2d"
  463. if descending:
  464. order = "DESCENDING"
  465. else:
  466. order = "ASCENDING"
  467. op = builtin.Argsort(order=order)
  468. if len(inp.shape) == 1:
  469. inp = inp.reshape(1, -1)
  470. tns, ind = apply(op, inp)
  471. return tns[0], ind[0]
  472. tns, ind = apply(op, inp)
  473. return tns, ind
  474. def topk(
  475. inp: Tensor,
  476. k: int,
  477. descending: bool = False,
  478. kth_only: bool = False,
  479. no_sort: bool = False,
  480. ) -> Tuple[Tensor, Tensor]:
  481. r"""
  482. Selects the ``Top-K``(by default) smallest elements of 2d matrix by row.
  483. :param inp: input tensor. If input tensor is 2d, each row will be sorted.
  484. :param k: number of elements needed.
  485. :param descending: if True, return the largest elements instead. Default: False
  486. :param kth_only: if True, only the k-th element will be returned. Default: False
  487. :param no_sort: if True, the returned elements can be unordered. Default: False
  488. :return: tuple of two tensors `(topk_tensor, indices_of_int32)`.
  489. Examples:
  490. .. testcode::
  491. import numpy as np
  492. from megengine import tensor
  493. import megengine.functional as F
  494. x = tensor(np.array([2, 4, 6, 8, 7, 5, 3, 1], dtype=np.float32))
  495. top, indices = F.topk(x, 5)
  496. print(top.numpy(), indices.numpy())
  497. Outputs:
  498. .. testoutput::
  499. [1. 2. 3. 4. 5.] [7 0 6 1 5]
  500. """
  501. if descending:
  502. inp = -inp
  503. if kth_only:
  504. mode = "KTH_ONLY"
  505. elif no_sort:
  506. mode = "VALUE_IDX_NOSORT"
  507. else:
  508. mode = "VALUE_IDX_SORTED"
  509. op = builtin.TopK(mode=mode)
  510. if not isinstance(k, Tensor):
  511. (k,) = Const(k, dtype="int32", device=inp.device)()
  512. if len(inp.shape) == 1:
  513. inp = inp.reshape(1, -1)
  514. res = apply(op, inp, k)
  515. if kth_only:
  516. tns = res[0]
  517. else:
  518. tns, ind = res[0][0], res[1][0]
  519. else:
  520. res = apply(op, inp, k)
  521. if kth_only:
  522. tns = res
  523. else:
  524. tns, ind = res[0], res[1]
  525. if descending:
  526. tns = -tns
  527. return tns, ind

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台