You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_math.py 9.5 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. from functools import partial
  10. import numpy as np
  11. import megengine.functional as F
  12. from megengine import tensor
  13. from megengine.test import assertTensorClose
  14. def _default_compare_fn(x, y):
  15. assertTensorClose(x.numpy(), y)
  16. def opr_test(cases, func, compare_fn=_default_compare_fn, ref_fn=None, **kwargs):
  17. """
  18. func: the function to run opr.
  19. compare_fn: the function to compare the result and expected, use assertTensorClose if None.
  20. ref_fn: the function to generate expected data, should assign output if None.
  21. cases: the list which have dict element, the list length should be 2 for dynamic shape test.
  22. and the dict should have input,
  23. and should have output if ref_fn is None.
  24. should use list for multiple inputs and outputs for each case.
  25. kwargs: The additional kwargs for opr func.
  26. simple examples:
  27. dtype = np.float32
  28. cases = [{"input": [10, 20]}, {"input": [20, 30]}]
  29. opr_test(cases,
  30. F.eye,
  31. ref_fn=lambda n, m: np.eye(n, m).astype(dtype),
  32. dtype=dtype)
  33. """
  34. def check_results(results, expected):
  35. if not isinstance(results, tuple):
  36. results = (results,)
  37. for r, e in zip(results, expected):
  38. compare_fn(r, e)
  39. def get_param(cases, idx):
  40. case = cases[idx]
  41. inp = case.get("input", None)
  42. outp = case.get("output", None)
  43. if inp is None:
  44. raise ValueError("the test case should have input")
  45. if not isinstance(inp, list):
  46. inp = (inp,)
  47. else:
  48. inp = tuple(inp)
  49. if ref_fn is not None and callable(ref_fn):
  50. outp = ref_fn(*inp)
  51. if outp is None:
  52. raise ValueError("the test case should have output or reference function")
  53. if not isinstance(outp, list):
  54. outp = (outp,)
  55. else:
  56. outp = tuple(outp)
  57. return inp, outp
  58. if len(cases) == 0:
  59. raise ValueError("should give one case at least")
  60. if not callable(func):
  61. raise ValueError("the input func should be callable")
  62. inp, outp = get_param(cases, 0)
  63. inp_tensor = [tensor(inpi) for inpi in inp]
  64. results = func(*inp_tensor, **kwargs)
  65. check_results(results, outp)
  66. def common_test_reduce(opr, ref_opr):
  67. data1_shape = (5, 6, 7)
  68. data2_shape = (2, 9, 12)
  69. data1 = np.random.random(data1_shape).astype(np.float32)
  70. data2 = np.random.random(data2_shape).astype(np.float32)
  71. cases = [{"input": data1}, {"input": data2}]
  72. if opr not in (F.argmin, F.argmax):
  73. # test default axis
  74. opr_test(cases, opr, ref_fn=ref_opr)
  75. # test all axises in range of input shape
  76. for axis in range(-3, 3):
  77. # test keepdims False
  78. opr_test(cases, opr, ref_fn=lambda x: ref_opr(x, axis=axis), axis=axis)
  79. # test keepdims True
  80. opr_test(
  81. cases,
  82. opr,
  83. ref_fn=lambda x: ref_opr(x, axis=axis, keepdims=True),
  84. axis=axis,
  85. keepdims=True,
  86. )
  87. else:
  88. # test defaut axis
  89. opr_test(cases, opr, ref_fn=lambda x: ref_opr(x).astype(np.int32))
  90. # test all axises in range of input shape
  91. for axis in range(0, 3):
  92. opr_test(
  93. cases,
  94. opr,
  95. ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
  96. axis=axis,
  97. )
  98. def test_sum():
  99. common_test_reduce(opr=F.sum, ref_opr=np.sum)
  100. def test_prod():
  101. common_test_reduce(opr=F.prod, ref_opr=np.prod)
  102. def test_mean():
  103. common_test_reduce(opr=F.mean, ref_opr=np.mean)
  104. def test_var():
  105. common_test_reduce(opr=F.var, ref_opr=np.var)
  106. def test_std():
  107. common_test_reduce(opr=F.std, ref_opr=np.std)
  108. def test_min():
  109. common_test_reduce(opr=F.min, ref_opr=np.min)
  110. def test_max():
  111. common_test_reduce(opr=F.max, ref_opr=np.max)
  112. def test_argmin():
  113. common_test_reduce(opr=F.argmin, ref_opr=np.argmin)
  114. def test_argmax():
  115. common_test_reduce(opr=F.argmax, ref_opr=np.argmax)
  116. def test_sqrt():
  117. d1_shape = (15,)
  118. d2_shape = (25,)
  119. d1 = np.random.random(d1_shape).astype(np.float32)
  120. d2 = np.random.random(d2_shape).astype(np.float32)
  121. cases = [{"input": d1}, {"input": d2}]
  122. opr_test(cases, F.sqrt, ref_fn=np.sqrt)
  123. def test_sort():
  124. data1_shape = (10, 3)
  125. data2_shape = (12, 2)
  126. data1 = np.random.random(data1_shape).astype(np.float32)
  127. data2 = np.random.random(data2_shape).astype(np.float32)
  128. output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
  129. output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
  130. cases = [
  131. {"input": data1, "output": output0},
  132. {"input": data2, "output": output1},
  133. ]
  134. opr_test(cases, F.sort)
  135. def test_normalize():
  136. cases = [
  137. {"input": np.random.random((2, 3, 12, 12)).astype(np.float32)} for i in range(2)
  138. ]
  139. def np_normalize(x, p=2, axis=None, eps=1e-12):
  140. if axis is None:
  141. norm = np.sum(x ** p) ** (1.0 / p)
  142. else:
  143. norm = np.sum(x ** p, axis=axis, keepdims=True) ** (1.0 / p)
  144. return x / np.clip(norm, a_min=eps, a_max=np.inf)
  145. # Test L-2 norm along all dimensions
  146. opr_test(cases, F.normalize, ref_fn=np_normalize)
  147. # Test L-1 norm along all dimensions
  148. opr_test(cases, partial(F.normalize, p=1), ref_fn=partial(np_normalize, p=1))
  149. # Test L-2 norm along the second dimension
  150. opr_test(cases, partial(F.normalize, axis=1), ref_fn=partial(np_normalize, axis=1))
  151. # Test some norm == 0
  152. cases[0]["input"][0, 0, 0, :] = 0
  153. cases[1]["input"][0, 0, 0, :] = 0
  154. opr_test(cases, partial(F.normalize, axis=3), ref_fn=partial(np_normalize, axis=3))
  155. def test_matmul():
  156. shape1 = 3
  157. shape2 = 3
  158. shape3 = (3, 5)
  159. shape4 = (5, 6)
  160. data1 = np.random.random(shape1).astype("float32")
  161. data2 = np.random.random(shape2).astype("float32")
  162. data3 = np.random.random(shape3).astype("float32")
  163. data4 = np.random.random(shape4).astype("float32")
  164. cases = [
  165. {"input": [data1, data2]},
  166. {"input": [data2, data3]},
  167. {"input": [data3, data4]},
  168. ]
  169. opr_test(cases, F.matmul, ref_fn=np.matmul)
  170. batch_size = 10
  171. shape1 = (batch_size, 2, 3)
  172. shape2 = (batch_size, 3, 4)
  173. shape3 = (batch_size, 10, 4, 5)
  174. data1 = np.random.random(shape1).astype("float32")
  175. data2 = np.random.random(shape2).astype("float32")
  176. data3 = np.random.random(shape3).astype("float32")
  177. cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
  178. for i in range(0, batch_size):
  179. def compare_fn(x, y):
  180. x.numpy()[i, ...] == y
  181. opr_test(
  182. cases,
  183. F.matmul,
  184. compare_fn=compare_fn,
  185. ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
  186. )
  187. # def test_logsumexp():
  188. # x = np.arange(10).astype(np.float32)
  189. # expected = np.log(np.sum(np.exp(x)))
  190. # cases = [{"input": x, "output": expected}]
  191. # compare_fn = partial(assertTensorClose, allow_special_values=True)
  192. # # large value check
  193. # n = 100
  194. # x = np.full(n, 10000, dtype=np.float32)
  195. # expected = 10000 + np.log(n)
  196. # cases.append({"input": x, "output": expected.astype(np.float32)})
  197. # opr_test(cases, F.logsumexp, axis=0, compare_fn=compare_fn)
  198. # # special value check
  199. # x = np.array([np.inf], dtype=np.float32)
  200. # expected = x
  201. # cases = [{"input": x, "output": expected}]
  202. # x = np.array([-np.inf, 0.0], dtype=np.float32)
  203. # expected = np.zeros(1).astype(np.float32)
  204. # cases.append({"input": x, "output": expected})
  205. # opr_test(cases, F.logsumexp, axis=0, compare_fn=compare_fn)
  206. # x = np.array([np.nan], dtype=np.float32)
  207. # expected = x
  208. # cases = [{"input": x, "output": expected}]
  209. # x = np.array([-np.inf, 1], dtype=np.float32)
  210. # expected = np.array([1.0], dtype=np.float32)
  211. # cases.append({"input": x, "output": expected})
  212. # opr_test(cases, F.logsumexp, axis=0, compare_fn=compare_fn)
  213. # # keepdims check
  214. # x = np.array([[1e10, 1e-10], [-1e10, -np.inf]], dtype=np.float32)
  215. # expected = np.array([[1e10], [-1e10]], dtype=np.float32)
  216. # cases = [{"input": x, "output": expected}]
  217. # x = np.array([[1e10, -1e-10, 1e-10], [1e10, 1e-10, np.inf]], dtype=np.float32)
  218. # expected = np.array([[1e10], [np.inf]], dtype=np.float32)
  219. # cases.append({"input": x, "output": expected})
  220. # opr_test(cases, F.logsumexp, axis=1, keepdims=True, compare_fn=compare_fn)
  221. # # multiple axes check
  222. # x = np.array([[1e10, 1e-10], [-1e10, -np.inf]], dtype=np.float32)
  223. # expected = np.array([1e10], dtype=np.float32)
  224. # cases = [{"input": x, "output": expected}]
  225. # x = np.array([[1e10, -1e-10, 1e-10], [1e10, 1e-10, np.inf]], dtype=np.float32)
  226. # expected = np.array([np.inf], dtype=np.float32)
  227. # cases.append({"input": x, "output": expected})
  228. # opr_test(cases, F.logsumexp, axis=(0, 1), keepdims=False, compare_fn=compare_fn)

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台