You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_functional.py 8.5 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. import numpy as np
  10. from helpers import opr_test
  11. import megengine.functional as F
  12. from megengine import Buffer, jit, tensor
  13. from megengine.test import assertTensorClose
  14. def test_flatten():
  15. data0_shape = (2, 3, 4, 5)
  16. data1_shape = (4, 5, 6, 7)
  17. data0 = np.random.random(data0_shape).astype(np.float32)
  18. data1 = np.random.random(data1_shape).astype(np.float32)
  19. def compare_fn(x, y):
  20. assert x.numpy().shape == y
  21. output0 = (2 * 3 * 4 * 5,)
  22. output1 = (4 * 5 * 6 * 7,)
  23. cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
  24. opr_test(cases, F.flatten, compare_fn=compare_fn)
  25. output0 = (2, 3 * 4 * 5)
  26. output1 = (4, 5 * 6 * 7)
  27. cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
  28. opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
  29. output0 = (2, 3, 4 * 5)
  30. output1 = (4, 5, 6 * 7)
  31. cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
  32. opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
  33. output0 = (2, 3 * 4, 5)
  34. output1 = (4, 5 * 6, 7)
  35. cases = [{"input": data0, "output": output0}, {"input": data1, "output": output1}]
  36. opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
  37. def test_where():
  38. maskv0 = np.array([[1, 0], [0, 1]], dtype=np.int32)
  39. xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
  40. yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
  41. maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.int32)
  42. xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
  43. yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
  44. cases = [{"input": [maskv0, xv0, yv0]}, {"input": [maskv1, xv1, yv1]}]
  45. opr_test(cases, F.where, ref_fn=np.where)
  46. def test_eye():
  47. dtype = np.float32
  48. cases = [{"input": [10, 20]}, {"input": [20, 30]}]
  49. opr_test(cases, F.eye, ref_fn=lambda n, m: np.eye(n, m).astype(dtype), dtype=dtype)
  50. def test_concat():
  51. def get_data_shape(length: int):
  52. return (length, 2, 3)
  53. data1 = np.random.random(get_data_shape(5)).astype("float32")
  54. data2 = np.random.random(get_data_shape(6)).astype("float32")
  55. data3 = np.random.random(get_data_shape(7)).astype("float32")
  56. def run(data1, data2):
  57. return F.concat([data1, data2])
  58. cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
  59. opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
  60. def test_matrix_mul():
  61. shape1 = (2, 3)
  62. shape2 = (3, 4)
  63. shape3 = (4, 5)
  64. data1 = np.random.random(shape1).astype("float32")
  65. data2 = np.random.random(shape2).astype("float32")
  66. data3 = np.random.random(shape3).astype("float32")
  67. cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
  68. opr_test(cases, F.matrix_mul, ref_fn=np.matmul)
  69. def test_batched_matrix_mul():
  70. batch_size = 10
  71. shape1 = (batch_size, 2, 3)
  72. shape2 = (batch_size, 3, 4)
  73. shape3 = (batch_size, 4, 5)
  74. data1 = np.random.random(shape1).astype("float32")
  75. data2 = np.random.random(shape2).astype("float32")
  76. data3 = np.random.random(shape3).astype("float32")
  77. cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
  78. for i in range(0, batch_size):
  79. def compare_fn(x, y):
  80. x.numpy()[i, ...] == y
  81. opr_test(
  82. cases,
  83. F.batched_matrix_mul,
  84. compare_fn=compare_fn,
  85. ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
  86. )
  87. def test_sort():
  88. data1_shape = (10, 3)
  89. data2_shape = (12, 2)
  90. data1 = np.random.random(data1_shape).astype(np.float32)
  91. data2 = np.random.random(data2_shape).astype(np.float32)
  92. output0 = [np.sort(data1), np.argsort(data1).astype(np.int32)]
  93. output1 = [np.sort(data2), np.argsort(data2).astype(np.int32)]
  94. cases = [
  95. {"input": data1, "output": output0},
  96. {"input": data2, "output": output1},
  97. ]
  98. opr_test(cases, F.sort)
  99. def test_round():
  100. data1_shape = (15,)
  101. data2_shape = (25,)
  102. data1 = np.random.random(data1_shape).astype(np.float32)
  103. data2 = np.random.random(data2_shape).astype(np.float32)
  104. cases = [{"input": data1}, {"input": data2}]
  105. opr_test(cases, F.round, ref_fn=np.round)
  106. def test_broadcast_to():
  107. input1_shape = (20, 30)
  108. output1_shape = (30, 20, 30)
  109. data1 = np.random.random(input1_shape).astype(np.float32)
  110. input2_shape = (10, 20)
  111. output2_shape = (20, 10, 20)
  112. data2 = np.random.random(input2_shape).astype(np.float32)
  113. def compare_fn(x, y):
  114. assert x.numpy().shape == y
  115. cases = [
  116. {"input": [data1, output1_shape], "output": output1_shape},
  117. {"input": [data2, output2_shape], "output": output2_shape},
  118. ]
  119. opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
  120. def test_add_update():
  121. shape = (2, 3)
  122. v = np.random.random(shape).astype(np.float32)
  123. b = Buffer(v)
  124. u = F.add_update(b, 1)
  125. assertTensorClose(u.numpy(), v + 1)
  126. u = F.add_update(b, 1)
  127. assertTensorClose(u.numpy(), v + 2)
  128. x = np.ones((2, 2), dtype=np.float32)
  129. y = x * 0.5
  130. dest = tensor(x)
  131. delta = tensor(y)
  132. r = F.add_update(dest, delta, alpha=tensor(0.9), beta=0.1, bias=0.1)
  133. assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
  134. def test_add_update_params():
  135. b = np.random.random((2, 3)).astype(np.float32)
  136. y = Buffer(b)
  137. @jit.trace
  138. def f(x):
  139. return F.add_update(y, x)
  140. f(np.zeros((2, 3)).astype(np.float32))
  141. z = Buffer(np.zeros((2, 3)).astype(np.float32))
  142. F.add_update(y, z, beta=0.1)
  143. res = f(np.ones((2, 3)).astype(np.float32))
  144. assertTensorClose(res, b + 1)
  145. def test_cross_entropy_with_softmax():
  146. data1_shape = (1, 2)
  147. label1_shape = (1,)
  148. data2_shape = (1, 3)
  149. label2_shape = (1,)
  150. data1 = np.array([1, 0.5], dtype=np.float32).reshape(data1_shape)
  151. label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
  152. expect1 = F.cross_entropy(F.softmax(tensor(data1)), tensor(label1)).numpy()
  153. data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
  154. label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
  155. expect2 = F.cross_entropy(F.softmax(tensor(data2)), tensor(label2)).numpy()
  156. cases = [
  157. {"input": [data1, label1], "output": expect1,},
  158. {"input": [data2, label2], "output": expect2,},
  159. ]
  160. opr_test(cases, F.cross_entropy_with_softmax)
  161. def test_cross_entropy():
  162. data1_shape = (1, 2)
  163. label1_shape = (1,)
  164. data2_shape = (1, 3)
  165. label2_shape = (1,)
  166. data1 = np.array([0.5, 0.5], dtype=np.float32).reshape(data1_shape)
  167. label1 = np.array([1], dtype=np.int32).reshape(label1_shape)
  168. expect1 = np.array([-np.log(0.5)], dtype=np.float32)
  169. data2 = np.array([0.3, 0.4, 0.3], dtype=np.float32).reshape(data2_shape)
  170. label2 = np.array([1], dtype=np.int32).reshape(label2_shape)
  171. expect2 = np.array([-np.log(0.4)], dtype=np.float32)
  172. cases = [
  173. {"input": [data1, label1], "output": expect1,},
  174. {"input": [data2, label2], "output": expect2,},
  175. ]
  176. opr_test(cases, F.cross_entropy)
  177. def test_binary_cross_entropy():
  178. data1_shape = (2, 2)
  179. label1_shape = (2, 2)
  180. data2_shape = (2, 3)
  181. label2_shape = (2, 3)
  182. def sigmoid(x):
  183. return 1 / (1 + np.exp(-x))
  184. def compare_fn(x, y):
  185. assertTensorClose(x.numpy(), y, max_err=5e-4)
  186. np.random.seed(123)
  187. data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
  188. label1 = np.random.uniform(size=label1_shape).astype(np.float32)
  189. expect1 = np.array([0.6361], dtype=np.float32)
  190. np.random.seed(123)
  191. data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
  192. label2 = np.random.uniform(size=label2_shape).astype(np.float32)
  193. expect2 = np.array([0.6750], dtype=np.float32)
  194. cases = [
  195. {"input": [data1, label1], "output": expect1,},
  196. {"input": [data2, label2], "output": expect2,},
  197. ]
  198. opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台