You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_math.py 8.1 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. from functools import partial
  10. import numpy as np
  11. import pytest
  12. from utils import opr_test
  13. import megengine.functional as F
  14. from megengine import jit, tensor
  15. def common_test_reduce(opr, ref_opr):
  16. data1_shape = (5, 6, 7)
  17. data2_shape = (2, 9, 12)
  18. data1 = np.random.random(data1_shape).astype(np.float32)
  19. data2 = np.random.random(data2_shape).astype(np.float32)
  20. cases = [
  21. {"input": data1},
  22. {"input": data2},
  23. {"input": np.array([[[1, 2, np.nan, 4], [8, 6, 5, 2], [2, 3, 4, 5]]])},
  24. ]
  25. if opr not in (F.argmin, F.argmax):
  26. # test default axis
  27. opr_test(cases, opr, ref_fn=ref_opr)
  28. # test all axises in range of input shape
  29. for axis in range(-3, 3):
  30. # test keepdims False
  31. opr_test(cases, opr, ref_fn=lambda x: ref_opr(x, axis=axis), axis=axis)
  32. # test keepdims True
  33. opr_test(
  34. cases,
  35. opr,
  36. ref_fn=lambda x: ref_opr(x, axis=axis, keepdims=True),
  37. axis=axis,
  38. keepdims=True,
  39. )
  40. else:
  41. # test defaut axis
  42. opr_test(cases, opr, ref_fn=lambda x: ref_opr(x).astype(np.int32))
  43. # test all axises in range of input shape
  44. for axis in range(0, 3):
  45. opr_test(
  46. cases,
  47. opr,
  48. ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
  49. axis=axis,
  50. )
  51. # test negative axis
  52. axis = axis - len(data1_shape)
  53. opr_test(
  54. cases,
  55. opr,
  56. ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
  57. axis=axis,
  58. )
  59. def test_sum():
  60. common_test_reduce(opr=F.sum, ref_opr=np.sum)
  61. def test_prod():
  62. common_test_reduce(opr=F.prod, ref_opr=np.prod)
  63. def test_mean():
  64. common_test_reduce(opr=F.mean, ref_opr=np.mean)
  65. def test_var():
  66. common_test_reduce(opr=F.var, ref_opr=np.var)
  67. def test_std():
  68. common_test_reduce(opr=F.std, ref_opr=np.std)
  69. def test_min():
  70. common_test_reduce(opr=F.min, ref_opr=np.min)
  71. def test_max():
  72. common_test_reduce(opr=F.max, ref_opr=np.max)
  73. def test_argmin():
  74. common_test_reduce(opr=F.argmin, ref_opr=np.argmin)
  75. def test_argmax():
  76. common_test_reduce(opr=F.argmax, ref_opr=np.argmax)
  77. def test_sqrt():
  78. d1_shape = (15,)
  79. d2_shape = (25,)
  80. d1 = np.random.random(d1_shape).astype(np.float32)
  81. d2 = np.random.random(d2_shape).astype(np.float32)
  82. cases = [{"input": d1}, {"input": d2}]
  83. opr_test(cases, F.sqrt, ref_fn=np.sqrt)
  84. def test_sort():
  85. data1_shape = (10, 3)
  86. data2_shape = (12, 2)
  87. data1 = np.random.random(data1_shape).astype(np.float32)
  88. data2 = np.random.random(data2_shape).astype(np.float32)
  89. output1 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
  90. output2 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
  91. cases = [
  92. {"input": data1, "output": output1},
  93. {"input": data2, "output": output2},
  94. ]
  95. opr_test(cases, F.sort)
  96. @pytest.mark.parametrize("is_symbolic", [None, False, True])
  97. def test_sort_empty(is_symbolic):
  98. data_shapes = [
  99. (0,),
  100. (10, 0),
  101. ]
  102. def fn(x):
  103. return F.sort(x)
  104. for shape in data_shapes:
  105. if is_symbolic is not None:
  106. fn_ = jit.trace(symbolic=is_symbolic)(fn)
  107. else:
  108. fn_ = fn
  109. data = np.random.random(shape).astype(np.float32)
  110. for _ in range(3):
  111. outs = fn_(tensor(data))
  112. ref_outs = (np.sort(data), np.argsort(data))
  113. assert len(ref_outs) == len(outs)
  114. for i in range(len(outs)):
  115. np.testing.assert_equal(outs[i].numpy(), ref_outs[i])
  116. if is_symbolic is None:
  117. break
  118. def test_normalize():
  119. cases = [
  120. {"input": np.random.random((2, 3, 12, 12)).astype(np.float32)} for i in range(2)
  121. ]
  122. def np_normalize(x, p=2, axis=None, eps=1e-12):
  123. if axis is None:
  124. norm = np.sum(x ** p) ** (1.0 / p)
  125. else:
  126. norm = np.sum(x ** p, axis=axis, keepdims=True) ** (1.0 / p)
  127. return x / np.clip(norm, a_min=eps, a_max=np.inf)
  128. # # Test L-2 norm along all dimensions
  129. # opr_test(cases, F.normalize, ref_fn=np_normalize)
  130. # # Test L-1 norm along all dimensions
  131. # opr_test(cases, partial(F.normalize, p=1), ref_fn=partial(np_normalize, p=1))
  132. # Test L-2 norm along the second dimension
  133. opr_test(cases, partial(F.normalize, axis=1), ref_fn=partial(np_normalize, axis=1))
  134. # Test some norm == 0
  135. cases[0]["input"][0, 0, 0, :] = 0
  136. cases[1]["input"][0, 0, 0, :] = 0
  137. opr_test(cases, partial(F.normalize, axis=3), ref_fn=partial(np_normalize, axis=3))
  138. def test_sum_neg_axis():
  139. shape = (2, 3)
  140. data = np.random.random(shape).astype(np.float32)
  141. for axis in (-1, -2, (-2, 1), (-1, 0)):
  142. get = F.sum(tensor(data), axis=axis)
  143. ref = np.sum(data, axis=axis)
  144. np.testing.assert_allclose(get.numpy(), ref, rtol=1e-6)
  145. with pytest.raises(AssertionError):
  146. F.sum(tensor(data), axis=(-1, 1))
  147. def test_non_finite():
  148. shape = (32, 3, 32, 32)
  149. data = np.random.random(shape).astype(np.float32)
  150. rst = F.math._check_non_finite(tensor(data))
  151. np.testing.assert_equal(rst.numpy(), [0])
  152. data[0][0][0][0] = float("inf")
  153. rst = F.math._check_non_finite(tensor(data))
  154. np.testing.assert_equal(rst.numpy(), [1])
  155. data[0][0][0][0] = float("nan")
  156. rst = F.math._check_non_finite(tensor(data))
  157. np.testing.assert_equal(rst.numpy(), [1])
  158. @pytest.mark.parametrize("descending", [True, False])
  159. @pytest.mark.parametrize("sorted", [True, False])
  160. @pytest.mark.parametrize("inp1d", [True, False])
  161. @pytest.mark.parametrize("kth_only", [True, False])
  162. def test_topk(descending, sorted, inp1d, kth_only):
  163. k = 3
  164. if inp1d:
  165. data = np.random.permutation(7)
  166. else:
  167. data = np.random.permutation(5 * 7).reshape(5, 7)
  168. data = data.astype(np.int32)
  169. def np_sort(x):
  170. if descending:
  171. return np.sort(x)[..., ::-1]
  172. return np.sort(x)
  173. res = F.topk(
  174. tensor(data), k, descending=descending, no_sort=(not sorted), kth_only=kth_only
  175. )
  176. values, indices = res
  177. values = values.numpy()
  178. indices = indices.numpy()
  179. if kth_only:
  180. np.testing.assert_equal(
  181. values, np.take_along_axis(data, indices[..., None], -1).squeeze(-1)
  182. )
  183. np.testing.assert_equal(values, np_sort(data)[..., k - 1])
  184. else:
  185. np.testing.assert_equal(values, np.take_along_axis(data, indices, -1))
  186. if not sorted:
  187. values = np_sort(values)
  188. np.testing.assert_equal(values, np_sort(data)[..., :k])
  189. @pytest.mark.parametrize("is_trace", [True, False])
  190. def test_reduce_on_empty_tensor(is_trace):
  191. dtypes = [np.float32, np.int32, np.bool]
  192. inputs = [
  193. (np.random.random((0,)), None),
  194. (np.random.random((3, 0, 2)), 1),
  195. (np.random.random((10, 10, 0, 10)), 0),
  196. ]
  197. def run_test(fn, ref_fn, input, dtype, axis=None, symbolic=False):
  198. if is_trace:
  199. fn = jit.trace(symbolic=symbolic)(fn)
  200. for i in range(3):
  201. out = fn(tensor(input, dtype=dtype), axis=axis).numpy()
  202. out_ref = ref_fn(input.astype(dtype), axis=axis)
  203. np.testing.assert_equal(out, out_ref)
  204. for dtype in dtypes:
  205. for inp, axis in inputs:
  206. run_test(F.sum, np.sum, inp, dtype, axis, True)
  207. run_test(F.sum, np.sum, inp, dtype, axis, False)
  208. run_test(F.prod, np.prod, inp, dtype, axis, True)
  209. run_test(F.prod, np.prod, inp, dtype, axis, False)

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台