You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_functional.py 9.2 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. import numpy as np
  10. from helpers import opr_test
  11. import megengine.functional as F
  12. from megengine import Buffer, jit, tensor
  13. from megengine.test import assertTensorClose
  14. def test_flatten():
  15. data0_shape = (2, 3, 4, 5)
  16. data1_shape = (4, 5, 6, 7)
  17. data0 = np.random.random(data0_shape).astype(np.float32)
  18. data1 = np.random.random(data1_shape).astype(np.float32)
  19. def compare_fn(x, y):
  20. assert x.numpy().shape == y
  21. output0 = (2 * 3 * 4 * 5,)
  22. output1 = (4 * 5 * 6 * 7,)
  23. cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
  24. opr_test(cases, F.flatten, compare_fn=compare_fn)
  25. output0 = (2, 3 * 4 * 5)
  26. output1 = (4, 5 * 6 * 7)
  27. cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
  28. opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
  29. output0 = (2, 3, 4 * 5)
  30. output1 = (4, 5, 6 * 7)
  31. cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
  32. opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
  33. output0 = (2, 3 * 4, 5)
  34. output1 = (4, 5 * 6, 7)
  35. cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
  36. opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
  37. def test_where():
  38. maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
  39. xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
  40. yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
  41. maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
  42. xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
  43. yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
  44. cases = [{"input": [maskv0, xv0, yv0]}, {"input": [maskv1, xv1, yv1]}]
  45. opr_test(cases, F.where, ref_fn=np.where)
  46. def test_eye():
  47. dtype = np.float32
  48. cases = [{"input": [10, 20]}, {"input": [20, 30]}]
  49. opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
  50. def test_concat():
  51. def get_data_shape(length: int):
  52. return (length, 2, 3)
  53. data1 = np.random.random(get_data_shape(5)).astype("float32")
  54. data2 = np.random.random(get_data_shape(6)).astype("float32")
  55. data3 = np.random.random(get_data_shape(7)).astype("float32")
  56. def run(data1, data2):
  57. return F.concat([data1, data2])
  58. cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
  59. opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
  60. def test_matrix_mul():
  61. shape1 = (2, 3)
  62. shape2 = (3, 4)
  63. shape3 = (4, 5)
  64. data1 = np.random.random(shape1).astype("float32")
  65. data2 = np.random.random(shape2).astype("float32")
  66. data3 = np.random.random(shape3).astype("float32")
  67. cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
  68. opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
  69. def test_batched_matrix_mul():
  70. batch_size = 10
  71. shape1 = (batch_size, 2, 3)
  72. shape2 = (batch_size, 3, 4)
  73. shape3 = (batch_size, 4, 5)
  74. data1 = np.random.random(shape1).astype("float32")
  75. data2 = np.random.random(shape2).astype("float32")
  76. data3 = np.random.random(shape3).astype("float32")
  77. cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
  78. for i in range(0, batch_size):
  79. def compare_fn(x, y):
  80. x.numpy()[i, ...] == y
  81. opr_test(
  82. cases,
  83. F.batched_matrix_mul,
  84. compare_fn=compare_fn,
  85. ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
  86. )
  87. def test_sort():
  88. data1_shape = (10, 3)
  89. data2_shape = (12, 2)
  90. data1 = np.random.random(data1_shape).astype(np.float32)
  91. data2 = np.random.random(data2_shape).astype(np.float32)
  92. output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
  93. output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
  94. cases = [
  95. {"input": data1, "output": output0},
  96. {"input": data2, "output": output1},
  97. ]
  98. opr_test(cases, F.sort)
  99. def test_round():
  100. data1_shape = (15,)
  101. data2_shape = (25,)
  102. data1 = np.random.random(data1_shape).astype(np.float32)
  103. data2 = np.random.random(data2_shape).astype(np.float32)
  104. cases = [{"input": data1}, {"input": data2}]
  105. opr_test(cases, F.round, ref_fn=np.round)
  106. def test_broadcast_to():
  107. input1_shape = (20, 30)
  108. output1_shape = (30, 20, 30)
  109. data1 = np.random.random(input1_shape).astype(np.float32)
  110. input2_shape = (10, 20)
  111. output2_shape = (20, 10, 20)
  112. data2 = np.random.random(input2_shape).astype(np.float32)
  113. def compare_fn(x, y):
  114. assert x.numpy().shape == y
  115. cases = [
  116. {"input": [data1, output1_shape], "output": output1_shape},
  117. {"input": [data2, output2_shape], "output": output2_shape},
  118. ]
  119. opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
  120. def test_arange():
  121. cases = [
  122. {"input": [1, 9, 1]},
  123. {"input": [2, 10, 2]},
  124. ]
  125. opr_test(
  126. cases,
  127. F.arange,
  128. ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
  129. )
  130. cases = [
  131. {"input": [9, 1, -1]},
  132. {"input": [10, 2, -2]},
  133. ]
  134. opr_test(
  135. cases,
  136. F.arange,
  137. ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
  138. )
  139. cases = [
  140. {"input": [9.3, 1.2, -0.5]},
  141. {"input": [10.3, 2.1, -1.7]},
  142. ]
  143. opr_test(
  144. cases,
  145. F.arange,
  146. ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
  147. )
  148. def test_add_update():
  149. shape = (2, 3)
  150. v = np.random.random(shape).astype(np.float32)
  151. b = Buffer(v)
  152. u = F.add_update(b, 1)
  153. assertTensorClose(u.numpy(), v + 1)
  154. u = F.add_update(b, 1)
  155. assertTensorClose(u.numpy(), v + 2)
  156. x = np.ones((2, 2), dtype=np.float32)
  157. y = x * 0.5
  158. dest = tensor(x)
  159. delta = tensor(y)
  160. r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
  161. assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
  162. def test_add_update_params():
  163. b = np.random.random((2, 3)).astype(np.float32)
  164. y = Buffer(b)
  165. @jit.trace
  166. def f(x):
  167. return F.add_update(y, x)
  168. f(np.zeros((2, 3)).astype(np.float32))
  169. z = Buffer(np.zeros((2, 3)).astype(np.float32))
  170. F.add_update(y, z, beta=0.1)
  171. res = f(np.ones((2, 3)).astype(np.float32))
  172. assertTensorClose(res, b + 1)
  173. def test_cross_entropy_with_softmax():
  174. data1_shape = (1, 2)
  175. label1_shape = (1,)
  176. data2_shape = (1, 3)
  177. label2_shape = (1,)
  178. data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
  179. label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
  180. expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
  181. data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
  182. label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
  183. expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
  184. cases = [
  185. {"input": [data1, label1], "output": expect1,},
  186. {"input": [data2, label2], "output": expect2,},
  187. ]
  188. opr_test(cases, F.cross_entropy_with_softmax)
  189. def test_cross_entropy():
  190. data1_shape = (1, 2)
  191. label1_shape = (1,)
  192. data2_shape = (1, 3)
  193. label2_shape = (1,)
  194. data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
  195. label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
  196. expect1 = np.array([-np.log(0.5)], dtype=np.float32)
  197. data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
  198. label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
  199. expect2 = np.array([-np.log(0.4)], dtype=np.float32)
  200. cases = [
  201. {"input": [data1, label1], "output": expect1,},
  202. {"input": [data2, label2], "output": expect2,},
  203. ]
  204. opr_test(cases, F.cross_entropy)
  205. def test_binary_cross_entropy():
  206. data1_shape = (2, 2)
  207. label1_shape = (2, 2)
  208. data2_shape = (2, 3)
  209. label2_shape = (2, 3)
  210. def sigmoid(x):
  211. return 1 / (1 + np.exp(-x))
  212. def compare_fn(x, y):
  213. assertTensorClose(x.numpy(), y, max_err=5e-4)
  214. np.random.seed(123)
  215. data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
  216. label1 = np.random.uniform(size=label1_shape).astype(np.float32)
  217. expect1 = np.array([0.6361], dtype=np.float32)
  218. np.random.seed(123)
  219. data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
  220. label2 = np.random.uniform(size=label2_shape).astype(np.float32)
  221. expect2 = np.array([0.6750], dtype=np.float32)
  222. cases = [
  223. {"input": [data1, label1], "output": expect1,},
  224. {"input": [data2, label2], "output": expect2,},
  225. ]
  226. opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台