You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_math.py 7.8 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. from functools import partial
  10. import numpy as np
  11. import pytest
  12. from utils import opr_test
  13. import megengine.functional as F
  14. from megengine import jit, tensor
  15. def common_test_reduce(opr, ref_opr):
  16. data1_shape = (5, 6, 7)
  17. data2_shape = (2, 9, 12)
  18. data1 = np.random.random(data1_shape).astype(np.float32)
  19. data2 = np.random.random(data2_shape).astype(np.float32)
  20. cases = [{"input": data1}, {"input": data2}]
  21. if opr not in (F.argmin, F.argmax):
  22. # test default axis
  23. opr_test(cases, opr, ref_fn=ref_opr)
  24. # test all axises in range of input shape
  25. for axis in range(-3, 3):
  26. # test keepdims False
  27. opr_test(cases, opr, ref_fn=lambda x: ref_opr(x, axis=axis), axis=axis)
  28. # test keepdims True
  29. opr_test(
  30. cases,
  31. opr,
  32. ref_fn=lambda x: ref_opr(x, axis=axis, keepdims=True),
  33. axis=axis,
  34. keepdims=True,
  35. )
  36. else:
  37. # test defaut axis
  38. opr_test(cases, opr, ref_fn=lambda x: ref_opr(x).astype(np.int32))
  39. # test all axises in range of input shape
  40. for axis in range(0, 3):
  41. opr_test(
  42. cases,
  43. opr,
  44. ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
  45. axis=axis,
  46. )
  47. # test negative axis
  48. axis = axis - len(data1_shape)
  49. opr_test(
  50. cases,
  51. opr,
  52. ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
  53. axis=axis,
  54. )
  55. def test_sum():
  56. common_test_reduce(opr=F.sum, ref_opr=np.sum)
  57. def test_prod():
  58. common_test_reduce(opr=F.prod, ref_opr=np.prod)
  59. def test_mean():
  60. common_test_reduce(opr=F.mean, ref_opr=np.mean)
  61. def test_var():
  62. common_test_reduce(opr=F.var, ref_opr=np.var)
  63. def test_std():
  64. common_test_reduce(opr=F.std, ref_opr=np.std)
  65. def test_min():
  66. common_test_reduce(opr=F.min, ref_opr=np.min)
  67. def test_max():
  68. common_test_reduce(opr=F.max, ref_opr=np.max)
  69. def test_argmin():
  70. common_test_reduce(opr=F.argmin, ref_opr=np.argmin)
  71. def test_argmax():
  72. common_test_reduce(opr=F.argmax, ref_opr=np.argmax)
  73. def test_sqrt():
  74. d1_shape = (15,)
  75. d2_shape = (25,)
  76. d1 = np.random.random(d1_shape).astype(np.float32)
  77. d2 = np.random.random(d2_shape).astype(np.float32)
  78. cases = [{"input": d1}, {"input": d2}]
  79. opr_test(cases, F.sqrt, ref_fn=np.sqrt)
  80. def test_sort():
  81. data1_shape = (10, 3)
  82. data2_shape = (12, 2)
  83. data1 = np.random.random(data1_shape).astype(np.float32)
  84. data2 = np.random.random(data2_shape).astype(np.float32)
  85. output1 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
  86. output2 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
  87. cases = [
  88. {"input": data1, "output": output1},
  89. {"input": data2, "output": output2},
  90. ]
  91. opr_test(cases, F.sort)
  92. @pytest.mark.parametrize("is_symbolic", [None, False, True])
  93. def test_sort_empty(is_symbolic):
  94. data_shapes = [
  95. (0,),
  96. (10, 0),
  97. ]
  98. def fn(x):
  99. return F.sort(x)
  100. for shape in data_shapes:
  101. if is_symbolic is not None:
  102. fn_ = jit.trace(symbolic=is_symbolic)(fn)
  103. else:
  104. fn_ = fn
  105. data = np.random.random(shape).astype(np.float32)
  106. for _ in range(3):
  107. outs = fn_(tensor(data))
  108. ref_outs = (np.sort(data), np.argsort(data))
  109. assert len(ref_outs) == len(outs)
  110. for i in range(len(outs)):
  111. np.testing.assert_equal(outs[i].numpy(), ref_outs[i])
  112. if is_symbolic is None:
  113. break
  114. def test_normalize():
  115. cases = [
  116. {"input": np.random.random((2, 3, 12, 12)).astype(np.float32)} for i in range(2)
  117. ]
  118. def np_normalize(x, p=2, axis=None, eps=1e-12):
  119. if axis is None:
  120. norm = np.sum(x ** p) ** (1.0 / p)
  121. else:
  122. norm = np.sum(x ** p, axis=axis, keepdims=True) ** (1.0 / p)
  123. return x / np.clip(norm, a_min=eps, a_max=np.inf)
  124. # # Test L-2 norm along all dimensions
  125. # opr_test(cases, F.normalize, ref_fn=np_normalize)
  126. # # Test L-1 norm along all dimensions
  127. # opr_test(cases, partial(F.normalize, p=1), ref_fn=partial(np_normalize, p=1))
  128. # Test L-2 norm along the second dimension
  129. opr_test(cases, partial(F.normalize, axis=1), ref_fn=partial(np_normalize, axis=1))
  130. # Test some norm == 0
  131. cases[0]["input"][0, 0, 0, :] = 0
  132. cases[1]["input"][0, 0, 0, :] = 0
  133. opr_test(cases, partial(F.normalize, axis=3), ref_fn=partial(np_normalize, axis=3))
  134. def test_sum_neg_axis():
  135. shape = (2, 3)
  136. data = np.random.random(shape).astype(np.float32)
  137. for axis in (-1, -2, (-2, 1), (-1, 0)):
  138. get = F.sum(tensor(data), axis=axis)
  139. ref = np.sum(data, axis=axis)
  140. np.testing.assert_allclose(get.numpy(), ref, rtol=1e-6)
  141. with pytest.raises(AssertionError):
  142. F.sum(tensor(data), axis=(-1, 1))
  143. def test_has_inf():
  144. shape = (32, 3, 32, 32)
  145. data = np.random.random(shape).astype(np.float32)
  146. rst = F.math._has_inf(tensor(data))
  147. np.testing.assert_equal(rst.numpy(), [0])
  148. data[0][0][0][0] = float("inf")
  149. rst = F.math._has_inf(tensor(data))
  150. np.testing.assert_equal(rst.numpy(), [1])
  151. @pytest.mark.parametrize("descending", [True, False])
  152. @pytest.mark.parametrize("sorted", [True, False])
  153. @pytest.mark.parametrize("inp1d", [True, False])
  154. @pytest.mark.parametrize("kth_only", [True, False])
  155. def test_topk(descending, sorted, inp1d, kth_only):
  156. k = 3
  157. if inp1d:
  158. data = np.random.permutation(7)
  159. else:
  160. data = np.random.permutation(5 * 7).reshape(5, 7)
  161. data = data.astype(np.int32)
  162. def np_sort(x):
  163. if descending:
  164. return np.sort(x)[..., ::-1]
  165. return np.sort(x)
  166. res = F.topk(
  167. tensor(data), k, descending=descending, no_sort=(not sorted), kth_only=kth_only
  168. )
  169. values, indices = res
  170. values = values.numpy()
  171. indices = indices.numpy()
  172. if kth_only:
  173. np.testing.assert_equal(
  174. values, np.take_along_axis(data, indices[..., None], -1).squeeze(-1)
  175. )
  176. np.testing.assert_equal(values, np_sort(data)[..., k - 1])
  177. else:
  178. np.testing.assert_equal(values, np.take_along_axis(data, indices, -1))
  179. if not sorted:
  180. values = np_sort(values)
  181. np.testing.assert_equal(values, np_sort(data)[..., :k])
  182. @pytest.mark.parametrize("is_trace", [True, False])
  183. def test_reduce_on_empty_tensor(is_trace):
  184. dtypes = [np.float32, np.int32, np.bool]
  185. inputs = [
  186. (np.random.random((0,)), None),
  187. (np.random.random((3, 0, 2)), 1),
  188. (np.random.random((10, 10, 0, 10)), 0),
  189. ]
  190. def run_test(fn, ref_fn, input, dtype, axis=None, symbolic=False):
  191. if is_trace:
  192. fn = jit.trace(symbolic=symbolic)(fn)
  193. for i in range(3):
  194. out = fn(tensor(input, dtype=dtype), axis=axis).numpy()
  195. out_ref = ref_fn(input.astype(dtype), axis=axis)
  196. np.testing.assert_equal(out, out_ref)
  197. for dtype in dtypes:
  198. for inp, axis in inputs:
  199. run_test(F.sum, np.sum, inp, dtype, axis, True)
  200. run_test(F.sum, np.sum, inp, dtype, axis, False)
  201. run_test(F.prod, np.prod, inp, dtype, axis, True)
  202. run_test(F.prod, np.prod, inp, dtype, axis, False)

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台