You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_math.py 5.5 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. from functools import partial
  10. import numpy as np
  11. from helpers import opr_test
  12. import megengine.functional as F
  13. from megengine.test import assertTensorClose
  14. def common_test_reduce(opr, ref_opr):
  15. data1_shape = (5, 6, 7)
  16. data2_shape = (2, 9, 12)
  17. data1 = np.random.random(data1_shape).astype(np.float32)
  18. data2 = np.random.random(data2_shape).astype(np.float32)
  19. cases = [{"input": data1}, {"input": data2}]
  20. if opr not in (F.argmin, F.argmax):
  21. # test default axis
  22. opr_test(cases, opr, ref_fn=ref_opr)
  23. # test all axises in range of input shape
  24. for axis in range(-3, 3):
  25. # test keepdims False
  26. opr_test(cases, opr, ref_fn=lambda x: ref_opr(x, axis=axis), axis=axis)
  27. # test keepdims True
  28. opr_test(
  29. cases,
  30. opr,
  31. ref_fn=lambda x: ref_opr(x, axis=axis, keepdims=True),
  32. axis=axis,
  33. keepdims=True,
  34. )
  35. else:
  36. # test defaut axis
  37. opr_test(cases, opr, ref_fn=lambda x: ref_opr(x).astype(np.int32))
  38. # test all axises in range of input shape
  39. for axis in range(0, 3):
  40. opr_test(
  41. cases,
  42. opr,
  43. ref_fn=lambda x: ref_opr(x, axis=axis).astype(np.int32),
  44. axis=axis,
  45. )
  46. def test_sum():
  47. common_test_reduce(opr=F.sum, ref_opr=np.sum)
  48. def test_prod():
  49. common_test_reduce(opr=F.prod, ref_opr=np.prod)
  50. def test_mean():
  51. common_test_reduce(opr=F.mean, ref_opr=np.mean)
  52. def test_min():
  53. common_test_reduce(opr=F.min, ref_opr=np.min)
  54. def test_max():
  55. common_test_reduce(opr=F.max, ref_opr=np.max)
  56. def test_argmin():
  57. common_test_reduce(opr=F.argmin, ref_opr=np.argmin)
  58. def test_argmax():
  59. common_test_reduce(opr=F.argmax, ref_opr=np.argmax)
  60. def test_sqrt():
  61. d1_shape = (15,)
  62. d2_shape = (25,)
  63. d1 = np.random.random(d1_shape).astype(np.float32)
  64. d2 = np.random.random(d2_shape).astype(np.float32)
  65. cases = [{"input": d1}, {"input": d2}]
  66. opr_test(cases, F.sqrt, ref_fn=np.sqrt)
  67. def test_normalize():
  68. cases = [
  69. {"input": np.random.random((2, 3, 12, 12)).astype(np.float32)} for i in range(2)
  70. ]
  71. def np_normalize(x, p=2, axis=None, eps=1e-12):
  72. if axis is None:
  73. norm = np.sum(x ** p) ** (1.0 / p)
  74. else:
  75. norm = np.sum(x ** p, axis=axis, keepdims=True) ** (1.0 / p)
  76. return x / np.clip(norm, a_min=eps, a_max=np.inf)
  77. # Test L-2 norm along all dimensions
  78. opr_test(cases, F.normalize, ref_fn=np_normalize)
  79. # Test L-1 norm along all dimensions
  80. opr_test(cases, partial(F.normalize, p=1), ref_fn=partial(np_normalize, p=1))
  81. # Test L-2 norm along the second dimension
  82. opr_test(cases, partial(F.normalize, axis=1), ref_fn=partial(np_normalize, axis=1))
  83. # Test some norm == 0
  84. cases[0]["input"][0, 0, 0, :] = 0
  85. cases[1]["input"][0, 0, 0, :] = 0
  86. opr_test(cases, partial(F.normalize, axis=3), ref_fn=partial(np_normalize, axis=3))
  87. def test_logsumexp():
  88. x = np.arange(10).astype(np.float32)
  89. expected = np.log(np.sum(np.exp(x)))
  90. cases = [{"input": x, "output": expected}]
  91. compare_fn = partial(assertTensorClose, allow_special_values=True)
  92. # large value check
  93. n = 100
  94. x = np.full(n, 10000, dtype=np.float32)
  95. expected = 10000 + np.log(n)
  96. cases.append({"input": x, "output": expected.astype(np.float32)})
  97. opr_test(cases, F.logsumexp, axis=0, compare_fn=compare_fn)
  98. # special value check
  99. x = np.array([np.inf], dtype=np.float32)
  100. expected = x
  101. cases = [{"input": x, "output": expected}]
  102. x = np.array([-np.inf, 0.0], dtype=np.float32)
  103. expected = np.zeros(1).astype(np.float32)
  104. cases.append({"input": x, "output": expected})
  105. opr_test(cases, F.logsumexp, axis=0, compare_fn=compare_fn)
  106. x = np.array([np.nan], dtype=np.float32)
  107. expected = x
  108. cases = [{"input": x, "output": expected}]
  109. x = np.array([-np.inf, 1], dtype=np.float32)
  110. expected = np.array([1.0], dtype=np.float32)
  111. cases.append({"input": x, "output": expected})
  112. opr_test(cases, F.logsumexp, axis=0, compare_fn=compare_fn)
  113. # keepdims check
  114. x = np.array([[1e10, 1e-10], [-1e10, -np.inf]], dtype=np.float32)
  115. expected = np.array([[1e10], [-1e10]], dtype=np.float32)
  116. cases = [{"input": x, "output": expected}]
  117. x = np.array([[1e10, -1e-10, 1e-10], [1e10, 1e-10, np.inf]], dtype=np.float32)
  118. expected = np.array([[1e10], [np.inf]], dtype=np.float32)
  119. cases.append({"input": x, "output": expected})
  120. opr_test(cases, F.logsumexp, axis=1, keepdims=True, compare_fn=compare_fn)
  121. # multiple axes check
  122. x = np.array([[1e10, 1e-10], [-1e10, -np.inf]], dtype=np.float32)
  123. expected = np.array([1e10], dtype=np.float32)
  124. cases = [{"input": x, "output": expected}]
  125. x = np.array([[1e10, -1e-10, 1e-10], [1e10, 1e-10, np.inf]], dtype=np.float32)
  126. expected = np.array([np.inf], dtype=np.float32)
  127. cases.append({"input": x, "output": expected})
  128. opr_test(cases, F.logsumexp, axis=(0, 1), keepdims=False, compare_fn=compare_fn)

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台