You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

math.py 18 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. import collections
  10. import functools
  11. import math
  12. import numbers
  13. from typing import Optional, Sequence, Tuple, Union
  14. from ..core.ops import builtin
  15. from ..core.ops._internal import param_defs as P
  16. from ..core.ops.special import Const
  17. from ..core.tensor import utils
  18. from ..core.tensor.core import TensorBase, TensorWrapperBase, apply
  19. from ..tensor import Tensor
  20. from .elemwise import clip, exp, log, log1p
  21. from .tensor import reshape, squeeze
  22. __all__ = [
  23. "argmax",
  24. "argmin",
  25. "argsort",
  26. "isinf",
  27. "isnan",
  28. "max",
  29. "mean",
  30. "min",
  31. "norm",
  32. "normalize",
  33. "prod",
  34. "sign",
  35. "sort",
  36. "std",
  37. "sum",
  38. "topk",
  39. "var",
  40. ]
  41. def isnan(inp: Tensor) -> Tensor:
  42. r"""Returns a new tensor representing if each element is ``NaN`` or not.
  43. :param inp: input tensor.
  44. :return: result tensor.
  45. Examples:
  46. .. testcode::
  47. from megengine import tensor
  48. import megengine.functional as F
  49. x = tensor([1, float("nan"), 0])
  50. print(F.isnan(x).numpy())
  51. Outputs:
  52. .. testoutput::
  53. [False True False]
  54. """
  55. return inp != inp
  56. def isinf(inp: Tensor) -> Tensor:
  57. r"""Returns a new tensor representing if each element is ``Inf`` or not.
  58. :param inp: input tensor.
  59. :return: result tensor.
  60. Examples:
  61. .. testcode::
  62. from megengine import tensor
  63. import megengine.functional as F
  64. x = tensor([1, float("inf"), 0])
  65. print(F.isinf(x).numpy())
  66. Outputs:
  67. .. testoutput::
  68. [False True False]
  69. """
  70. return abs(inp).astype("float32") == float("inf")
  71. def sign(inp: Tensor):
  72. r"""Returns a new tensor representing the sign of each element in input tensor.
  73. :param: input tensor.
  74. :return: the sign of input tensor.
  75. Examples:
  76. .. testcode::
  77. from megengine import tensor
  78. import megengine.functional as F
  79. x = tensor([1, -1, 0])
  80. print(F.sign(x).numpy())
  81. Outputs:
  82. .. testoutput::
  83. [ 1 -1 0]
  84. """
  85. return (inp > 0).astype(inp.dtype) - (inp < 0).astype(inp.dtype)
  86. def sum(
  87. inp: Tensor,
  88. axis: Optional[Union[int, Sequence[int]]] = None,
  89. keepdims: bool = False,
  90. ) -> Tensor:
  91. r"""Returns the sum of input tensor along given axis. If axis is a list of dimensions,
  92. reduce over all of them.
  93. :param inp: input tensor.
  94. :param axis: dimension to reduce. If None, all dimensions will be reduced.
  95. Default: None
  96. :param keepdims: whether the output tensor has axis retained or not.
  97. Default: False
  98. :return: output tensor.
  99. Examples:
  100. .. testcode::
  101. import numpy as np
  102. from megengine import tensor
  103. import megengine.functional as F
  104. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
  105. out = F.sum(x)
  106. print(out.numpy())
  107. Outputs:
  108. .. testoutput::
  109. [21]
  110. """
  111. return inp.sum(axis=axis, keepdims=keepdims)
  112. def prod(
  113. inp: Tensor, axis: Optional[Union[int, Sequence[int]]] = None, keepdims=False
  114. ) -> Tensor:
  115. r"""Returns the product of input tensor along given axis. If axis is a list of dimensions,
  116. reduce over all of them.
  117. :param inp: input tensor.
  118. :param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  119. :param keepdims: whether the output tensor has axis retained or not. Default: False
  120. :return: output tensor.
  121. Examples:
  122. .. testcode::
  123. import numpy as np
  124. from megengine import tensor
  125. import megengine.functional as F
  126. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
  127. out = F.prod(x)
  128. print(out.numpy())
  129. Outputs:
  130. .. testoutput::
  131. [720]
  132. """
  133. return inp.prod(axis=axis, keepdims=keepdims)
  134. def mean(
  135. inp: Tensor,
  136. axis: Optional[Union[int, Sequence[int]]] = None,
  137. keepdims: bool = False,
  138. ) -> Tensor:
  139. """Returns the mean value of input tensor along
  140. given axis. If axis is a list of dimensions,
  141. reduce over all of them.
  142. :param inp: input tensor.
  143. :param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  144. :param keepdims: whether the output tensor has axis retained or not. Default: False
  145. :return: output tensor.
  146. Examples:
  147. .. testcode::
  148. import numpy as np
  149. from megengine import tensor
  150. import megengine.functional as F
  151. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2, 3))
  152. out = F.mean(x)
  153. print(out.numpy())
  154. Outputs:
  155. .. testoutput::
  156. [3.5]
  157. """
  158. return inp.astype("float32").mean(axis=axis, keepdims=keepdims)
  159. def var(
  160. inp: Tensor,
  161. axis: Optional[Union[int, Sequence[int]]] = None,
  162. keepdims: bool = False,
  163. ) -> Tensor:
  164. """Returns the variance value of input tensor along
  165. given axis. If axis is a list of dimensions,
  166. reduce over all of them.
  167. :param inp: input tensor.
  168. :param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  169. :param keepdims: whether the output tensor has axis retained or not. Default: False
  170. :return: output tensor.
  171. Examples:
  172. .. testcode::
  173. import numpy as np
  174. from megengine import tensor
  175. import megengine.functional as F
  176. data = tensor(np.arange(1, 7, dtype=np.float32).reshape(2, 3))
  177. out = F.var(data)
  178. print(out.numpy())
  179. Outputs:
  180. .. testoutput::
  181. [2.9167]
  182. """
  183. if axis is None:
  184. m = mean(inp, axis=axis, keepdims=False)
  185. else:
  186. m = mean(inp, axis=axis, keepdims=True)
  187. v = inp - m
  188. return mean(v ** 2, axis=axis, keepdims=keepdims)
  189. def std(
  190. inp: Tensor,
  191. axis: Optional[Union[int, Sequence[int]]] = None,
  192. keepdims: bool = False,
  193. ) -> Tensor:
  194. """Returns the standard deviation of input tensor along
  195. given axis. If axis is a list of dimensions,
  196. reduce over all of them.
  197. :param inp: input tensor.
  198. :param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  199. :param keepdims: whether the output tensor has axis retained or not. Default: False
  200. :return: output tensor.
  201. Examples:
  202. .. testcode::
  203. import numpy as np
  204. from megengine import tensor
  205. import megengine.functional as F
  206. data = tensor(np.arange(1, 7, dtype=np.float32).reshape(2, 3))
  207. out = F.std(data, axis=1)
  208. print(out.numpy())
  209. Outputs:
  210. .. testoutput::
  211. [0.8165 0.8165]
  212. """
  213. return var(inp, axis=axis, keepdims=keepdims) ** 0.5
  214. def min(
  215. inp: Tensor,
  216. axis: Optional[Union[int, Sequence[int]]] = None,
  217. keepdims: bool = False,
  218. ) -> Tensor:
  219. r"""Returns the min value of input tensor along
  220. given axis. If axis is a list of dimensions,
  221. reduce over all of them.
  222. :param inp: input tensor.
  223. :param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  224. :param keepdims: whether the output tensor has axis retained or not. Default: False
  225. :return: output tensor.
  226. Examples:
  227. .. testcode::
  228. import numpy as np
  229. from megengine import tensor
  230. import megengine.functional as F
  231. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
  232. out = F.min(x)
  233. print(out.numpy())
  234. Outputs:
  235. .. testoutput::
  236. [1]
  237. """
  238. return inp.min(axis=axis, keepdims=keepdims)
  239. def max(
  240. inp: Tensor,
  241. axis: Optional[Union[int, Sequence[int]]] = None,
  242. keepdims: bool = False,
  243. ) -> Tensor:
  244. r"""Returns the max value of the input tensor along
  245. given axis. If axis is a list of dimensions,
  246. reduce over all of them.
  247. :param inp: input tensor.
  248. :param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  249. :param keepdims: whether the output tensor has axis retained or not. Default: False
  250. :return: output tensor.
  251. Examples:
  252. .. testcode::
  253. import numpy as np
  254. from megengine import tensor
  255. import megengine.functional as F
  256. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
  257. out = F.max(x)
  258. print(out.numpy())
  259. Outputs:
  260. .. testoutput::
  261. [6]
  262. """
  263. return inp.max(axis=axis, keepdims=keepdims)
  264. def norm(
  265. inp: Tensor, ord: float = None, axis: int = None, keepdims=False,
  266. ):
  267. """Calculates ``p``-norm of input tensor along
  268. given axis.
  269. :param inp: input tensor.
  270. :param ord: power of value applied to inp. Default: 2
  271. :param axis: dimension to reduce. If None, input must be a vector. Default: None
  272. :param keepdims: whether the output tensor has axis retained or not. Default: False
  273. :return: output tensor.
  274. Examples:
  275. .. testcode::
  276. import numpy as np
  277. from megengine import tensor
  278. import megengine.functional as F
  279. x = tensor(np.arange(-3, 3, dtype=np.float32))
  280. out = F.norm(x)
  281. print(out.numpy())
  282. Outputs:
  283. .. testoutput::
  284. [4.3589]
  285. """
  286. if axis is None:
  287. if inp.ndim != 1:
  288. raise TypeError("axis is required unless input is a vector")
  289. if ord is None:
  290. ord = 2
  291. if ord == 0:
  292. return sum(inp != 0, axis=axis, keepdims=keepdims)
  293. if ord == math.inf:
  294. return max(abs(inp))
  295. if ord == -math.inf:
  296. return min(abs(inp))
  297. return sum(abs(inp) ** ord, axis=axis, keepdims=keepdims) ** (1.0 / ord)
  298. def argmin(
  299. inp: Tensor,
  300. axis: Optional[Union[int, Sequence[int]]] = None,
  301. keepdims: bool = False,
  302. ) -> Tensor:
  303. r"""Returns the indices of the minimum values along
  304. given axis. If axis is a list of dimensions,
  305. reduce over all of them.
  306. :param inp: input tensor.
  307. :param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  308. :param keepdims: whether the output tensor has axis retained or not. Default: False
  309. :return: output tensor.
  310. Examples:
  311. .. testcode::
  312. import numpy as np
  313. from megengine import tensor
  314. import megengine.functional as F
  315. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
  316. out = F.argmin(x)
  317. print(out.numpy())
  318. Outputs:
  319. .. testoutput::
  320. [0]
  321. """
  322. if isinstance(axis, collections.abc.Iterable):
  323. axis = list(axis)
  324. axis.sort(reverse=True)
  325. for ai in axis:
  326. op = builtin.Argmin(axis=ai)
  327. (inp,) = apply(op, inp)
  328. if not keepdims:
  329. inp = squeeze(inp, ai)
  330. return inp
  331. if axis is None:
  332. assert not keepdims, "can not set axis=None and keepdims=True"
  333. inp = inp.flatten()
  334. axis = 0
  335. op = builtin.Argmin(axis=axis)
  336. (result,) = apply(op, inp)
  337. if not keepdims:
  338. result = squeeze(result, axis)
  339. return result
  340. def argmax(
  341. inp: Tensor,
  342. axis: Optional[Union[int, Sequence[int]]] = None,
  343. keepdims: bool = False,
  344. ) -> Tensor:
  345. r"""Returns the indices of the maximum values along
  346. given axis. If axis is a list of dimensions,
  347. reduce over all of them.
  348. :param inp: input tensor.
  349. :param axis: dimension to reduce. If None, all dimensions will be reduced. Default: None
  350. :param keepdims: whether the output tensor has axis retained or not. Default: False
  351. :return: output tensor.
  352. Examples:
  353. .. testcode::
  354. import numpy as np
  355. from megengine import tensor
  356. import megengine.functional as F
  357. x = tensor(np.arange(1, 7, dtype=np.int32).reshape(2,3))
  358. out = F.argmax(x)
  359. print(out.numpy())
  360. Outputs:
  361. .. testoutput::
  362. [5]
  363. """
  364. if isinstance(axis, collections.abc.Iterable):
  365. axis = list(axis)
  366. axis.sort(reverse=True)
  367. for ai in axis:
  368. op = builtin.Argmax(axis=ai)
  369. (inp,) = apply(op, inp)
  370. if not keepdims:
  371. inp = squeeze(inp, ai)
  372. return inp
  373. if axis is None:
  374. assert not keepdims, "can not set axis=None and keepdims=True"
  375. inp = inp.flatten()
  376. axis = 0
  377. op = builtin.Argmax(axis=axis)
  378. (result,) = apply(op, inp)
  379. if not keepdims:
  380. result = squeeze(result, axis)
  381. return result
  382. def normalize(
  383. inp: Tensor, ord: float = None, axis: int = None, eps: float = 1e-12,
  384. ) -> Tensor:
  385. r"""Performs :math:`L_p` normalization of input tensor along
  386. given axis.
  387. For a tensor of shape :math:`(n_0, ..., n_{dim}, ..., n_k)`, each
  388. :math:`n_{dim}` -element vector :math:`v` along dimension :attr:`axis` is transformed as:
  389. .. math::
  390. v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}.
  391. :param inp: input tensor.
  392. :param ord: power of value applied to input tensor. Default: 2
  393. :param axis: dimension to reduce.If None, input must be a vector. Default: None
  394. :param eps: a small value to avoid division by zero. Default: 1e-12
  395. :return: normalized output tensor.
  396. """
  397. if axis is None:
  398. return inp / clip(norm(inp, ord, axis), lower=eps)
  399. else:
  400. return inp / clip(norm(inp, ord, axis, keepdims=True), lower=eps)
  401. def argsort(inp: Tensor, descending: bool = False) -> Tensor:
  402. r"""Returns the indices that would sort the input tensor.
  403. :param inp: input tensor. If it's 2d, the result would be array of indices show how to sort each row in the input tensor.
  404. :param descending: sort in descending order, where the largest comes first. Default: False
  405. :return: indices of int32 indicates how to sort the input.
  406. Examples:
  407. .. testcode::
  408. import numpy as np
  409. from megengine import tensor
  410. import megengine.functional as F
  411. x = tensor(np.array([1,2], dtype=np.float32))
  412. indices = F.argsort(x)
  413. print(indices.numpy())
  414. Outputs:
  415. .. testoutput::
  416. [0 1]
  417. """
  418. assert len(inp.shape) <= 2, "Input should be 1d or 2d"
  419. if descending:
  420. order = P.Argsort.Order.DESCENDING
  421. else:
  422. order = P.Argsort.Order.ASCENDING
  423. op = builtin.Argsort(order=order)
  424. if len(inp.shape) == 1:
  425. inp = inp.reshape(1, -1)
  426. _, result = apply(op, inp)
  427. return result[0]
  428. _, result = apply(op, inp)
  429. return result
  430. def sort(inp: Tensor, descending: bool = False) -> Tuple[Tensor, Tensor]:
  431. r"""Returns sorted tensor and the indices would sort the input tensor.
  432. :param inp: input tensor. If it's 2d, the result would be sorted by row.
  433. :param descending: sort in descending order, where the largest comes first. Default: False
  434. :return: tuple of two tensors `(sorted_tensor, indices_of_int32)`.
  435. Examples:
  436. .. testcode::
  437. import numpy as np
  438. from megengine import tensor
  439. import megengine.functional as F
  440. x = tensor(np.array([1,2], dtype=np.float32))
  441. out, indices = F.sort(x)
  442. print(out.numpy())
  443. Outputs:
  444. .. testoutput::
  445. [1. 2.]
  446. """
  447. assert len(inp.shape) <= 2, "Input should be 1d or 2d"
  448. if descending:
  449. order = P.Argsort.Order.DESCENDING
  450. else:
  451. order = P.Argsort.Order.ASCENDING
  452. op = builtin.Argsort(order=order)
  453. if len(inp.shape) == 1:
  454. inp = inp.reshape(1, -1)
  455. tns, ind = apply(op, inp)
  456. return tns[0], ind[0]
  457. tns, ind = apply(op, inp)
  458. return tns, ind
  459. def topk(
  460. inp: Tensor,
  461. k: int,
  462. descending: bool = False,
  463. kth_only: bool = False,
  464. no_sort: bool = False,
  465. ) -> Tuple[Tensor, Tensor]:
  466. r"""Selects the ``Top-K``(by default) smallest elements of 2d matrix by row.
  467. :param inp: input tensor. If input tensor is 2d, each row will be sorted.
  468. :param k: number of elements needed.
  469. :param descending: if True, return the largest elements instead. Default: False
  470. :param kth_only: if True, only the k-th element will be returned. Default: False
  471. :param no_sort: if True, the returned elements can be unordered. Default: False
  472. :return: tuple of two tensors `(topk_tensor, indices_of_int32)`.
  473. Examples:
  474. .. testcode::
  475. import numpy as np
  476. from megengine import tensor
  477. import megengine.functional as F
  478. x = tensor(np.array([2, 4, 6, 8, 7, 5, 3, 1], dtype=np.float32))
  479. top, indices = F.topk(x, 5)
  480. print(top.numpy(), indices.numpy())
  481. Outputs:
  482. .. testoutput::
  483. [1. 2. 3. 4. 5.] [7 0 6 1 5]
  484. """
  485. if descending:
  486. inp = -inp
  487. Mode = P.TopK.Mode
  488. if kth_only:
  489. mode = Mode.KTH_ONLY
  490. elif no_sort:
  491. mode = Mode.VALUE_IDX_NOSORT
  492. else:
  493. mode = Mode.VALUE_IDX_SORTED
  494. op = builtin.TopK(mode=mode)
  495. if not isinstance(k, (TensorBase, TensorWrapperBase)):
  496. (k,) = Const(k, dtype="int32", device=inp.device)(inp)
  497. if len(inp.shape) == 1:
  498. inp = inp.reshape(1, -1)
  499. res = apply(op, inp, k)
  500. if kth_only:
  501. tns = res[0]
  502. else:
  503. tns, ind = res[0][0], res[1][0]
  504. else:
  505. res = apply(op, inp, k)
  506. if kth_only:
  507. tns = res
  508. else:
  509. tns, ind = res[0], res[1]
  510. if descending:
  511. tns = -tns
  512. return tns, ind

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台