You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_tensor.py 8.8 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. import functools
  10. import numpy as np
  11. from megenginelite import *
  12. def require_cuda(func):
  13. """a decorator that disables a testcase if cuda is not enabled"""
  14. @functools.wraps(func)
  15. def wrapped(*args, **kwargs):
  16. if LiteGlobal.get_device_count(LiteDeviceType.LITE_CUDA):
  17. return func(*args, **kwargs)
  18. return wrapped
  19. def test_tensor_make():
  20. empty_layout = LiteLayout()
  21. assert empty_layout.ndim == 0
  22. assert empty_layout.data_type == int(LiteDataType.LITE_FLOAT)
  23. empty_tensor = LiteTensor()
  24. assert empty_tensor.layout.ndim == empty_layout.ndim
  25. assert empty_tensor.layout.data_type == empty_layout.data_type
  26. layout = LiteLayout([4, 16])
  27. layout = LiteLayout(dtype="float32")
  28. layout = LiteLayout([4, 16], "float32")
  29. layout = LiteLayout([4, 16], "float16")
  30. layout = LiteLayout([4, 16], np.float32)
  31. layout = LiteLayout([4, 16], np.int8)
  32. layout = LiteLayout([4, 16], LiteDataType.LITE_FLOAT)
  33. tensor = LiteTensor(layout)
  34. tensor = LiteTensor(layout, LiteDeviceType.LITE_CPU)
  35. assert tensor.layout == layout
  36. assert tensor.device_type == LiteDeviceType.LITE_CPU
  37. assert tensor.is_continue == True
  38. assert tensor.is_pinned_host == False
  39. assert tensor.nbytes == 4 * 16 * 4
  40. assert tensor.device_id == 0
  41. tensor = LiteTensor(layout, device_id=1)
  42. assert tensor.device_id == 1
  43. def test_tensor_set_data():
  44. layout = LiteLayout([2, 16], "int8")
  45. tensor = LiteTensor(layout)
  46. assert tensor.nbytes == 2 * 16
  47. data = [i for i in range(32)]
  48. tensor.set_data_by_copy(data)
  49. real_data = tensor.to_numpy()
  50. for i in range(32):
  51. assert real_data[i // 16][i % 16] == i
  52. arr = np.ones([2, 16], "int8")
  53. tensor.set_data_by_copy(arr)
  54. real_data = tensor.to_numpy()
  55. for i in range(32):
  56. assert real_data[i // 16][i % 16] == 1
  57. for i in range(32):
  58. arr[i // 16][i % 16] = i
  59. tensor.set_data_by_share(arr)
  60. real_data = tensor.to_numpy()
  61. for i in range(32):
  62. assert real_data[i // 16][i % 16] == i
  63. arr[0][8] = 100
  64. arr[1][3] = 20
  65. real_data = tensor.to_numpy()
  66. assert real_data[0][8] == 100
  67. assert real_data[1][3] == 20
  68. def test_fill_zero():
  69. layout = LiteLayout([4, 8], "int16")
  70. tensor1 = LiteTensor(layout)
  71. assert tensor1.nbytes == 4 * 8 * 2
  72. tensor1.set_data_by_copy([i for i in range(32)])
  73. real_data = tensor1.to_numpy()
  74. for i in range(32):
  75. assert real_data[i // 8][i % 8] == i
  76. tensor1.fill_zero()
  77. real_data = tensor1.to_numpy()
  78. for i in range(32):
  79. assert real_data[i // 8][i % 8] == 0
  80. def test_copy_from():
  81. layout = LiteLayout([4, 8], "int16")
  82. tensor1 = LiteTensor(layout)
  83. tensor2 = LiteTensor(layout)
  84. assert tensor1.nbytes == 4 * 8 * 2
  85. assert tensor2.nbytes == 4 * 8 * 2
  86. tensor1.set_data_by_copy([i for i in range(32)])
  87. tensor2.copy_from(tensor1)
  88. real_data = tensor2.to_numpy()
  89. for i in range(32):
  90. assert real_data[i // 8][i % 8] == i
  91. tensor1.set_data_by_copy([i + 5 for i in range(32)])
  92. tensor2.copy_from(tensor1)
  93. real_data = tensor2.to_numpy()
  94. for i in range(32):
  95. assert real_data[i // 8][i % 8] == i + 5
  96. def test_reshape():
  97. layout = LiteLayout([4, 8], "int16")
  98. tensor1 = LiteTensor(layout)
  99. assert tensor1.nbytes == 4 * 8 * 2
  100. tensor1.set_data_by_copy([i for i in range(32)])
  101. real_data = tensor1.to_numpy()
  102. for i in range(32):
  103. assert real_data[i // 8][i % 8] == i
  104. tensor1.reshape([8, 4])
  105. real_data = tensor1.to_numpy()
  106. for i in range(32):
  107. assert real_data[i // 4][i % 4] == i
  108. def test_slice():
  109. layout = LiteLayout([4, 8], "int32")
  110. tensor1 = LiteTensor(layout)
  111. assert tensor1.nbytes == 4 * 8 * 4
  112. tensor1.set_data_by_copy([i for i in range(32)])
  113. real_data_org = tensor1.to_numpy()
  114. for i in range(32):
  115. assert real_data_org[i // 8][i % 8] == i
  116. tensor2 = tensor1.slice([1, 4], [3, 8])
  117. assert tensor2.layout.shapes[0] == 2
  118. assert tensor2.layout.shapes[1] == 4
  119. assert tensor2.is_continue == False
  120. real_data = tensor2.to_numpy()
  121. for i in range(8):
  122. row = i // 4
  123. col = i % 4
  124. assert real_data[row][col] == real_data_org[row + 1][col + 4]
  125. def test_tensor_share_memory():
  126. layout = LiteLayout([4, 8], "int16")
  127. tensor1 = LiteTensor(layout)
  128. tensor2 = LiteTensor(layout)
  129. assert tensor1.nbytes == 4 * 8 * 2
  130. assert tensor2.nbytes == 4 * 8 * 2
  131. tensor1.set_data_by_copy([i for i in range(32)])
  132. tensor2.share_memory_with(tensor1)
  133. real_data = tensor2.to_numpy()
  134. for i in range(32):
  135. assert real_data[i // 8][i % 8] == i
  136. tensor1.set_data_by_copy([i + 5 for i in range(32)])
  137. real_data = tensor2.to_numpy()
  138. for i in range(32):
  139. assert real_data[i // 8][i % 8] == i + 5
  140. def test_tensor_share_ctype_memory():
  141. layout = LiteLayout([4, 8], "int16")
  142. tensor1 = LiteTensor(layout)
  143. assert tensor1.nbytes == 4 * 8 * 2
  144. arr = np.ones([4, 8], "int16")
  145. for i in range(32):
  146. arr[i // 8][i % 8] = i
  147. tensor1.set_data_by_share(arr.ctypes.data, 4 * 8 * 2)
  148. real_data = tensor1.to_numpy()
  149. for i in range(32):
  150. assert real_data[i // 8][i % 8] == i
  151. @require_cuda
  152. def test_tensor_share_ctype_memory_device():
  153. layout = LiteLayout([4, 8], "int16")
  154. tensor_cpu = LiteTensor(
  155. layout=layout, device_type=LiteDeviceType.LITE_CUDA, is_pinned_host=True
  156. )
  157. tensor_cuda1 = LiteTensor(layout=layout, device_type=LiteDeviceType.LITE_CUDA)
  158. tensor_cuda2 = LiteTensor(layout=layout, device_type=LiteDeviceType.LITE_CUDA)
  159. assert tensor_cpu.nbytes == 4 * 8 * 2
  160. assert tensor_cuda1.nbytes == 4 * 8 * 2
  161. assert tensor_cuda2.nbytes == 4 * 8 * 2
  162. arr = np.ones([4, 8], "int16")
  163. for i in range(32):
  164. arr[i // 8][i % 8] = i
  165. tensor_cpu.set_data_by_share(arr.ctypes.data, 4 * 8 * 2)
  166. tensor_cuda1.copy_from(tensor_cpu)
  167. device_mem = tensor_cuda1.get_ctypes_memory()
  168. tensor_cuda2.set_data_by_share(device_mem, tensor_cuda1.nbytes)
  169. real_data1 = tensor_cuda1.to_numpy()
  170. real_data2 = tensor_cuda2.to_numpy()
  171. for i in range(32):
  172. assert real_data1[i // 8][i % 8] == i
  173. assert real_data2[i // 8][i % 8] == i
  174. def test_tensor_share_memory_with():
  175. layout = LiteLayout([4, 32], "int16")
  176. tensor = LiteTensor(layout)
  177. assert tensor.nbytes == 4 * 32 * 2
  178. arr = np.ones([4, 32], "int16")
  179. for i in range(128):
  180. arr[i // 32][i % 32] = i
  181. tensor.set_data_by_share(arr)
  182. real_data = tensor.to_numpy()
  183. for i in range(128):
  184. assert real_data[i // 32][i % 32] == i
  185. tensor2 = LiteTensor(layout)
  186. tensor2.share_memory_with(tensor)
  187. real_data = tensor.to_numpy()
  188. real_data2 = tensor2.to_numpy()
  189. for i in range(128):
  190. assert real_data[i // 32][i % 32] == i
  191. assert real_data2[i // 32][i % 32] == i
  192. arr[1][18] = 5
  193. arr[3][7] = 345
  194. real_data = tensor2.to_numpy()
  195. assert real_data[1][18] == 5
  196. assert real_data[3][7] == 345
  197. def test_empty_tensor():
  198. empty_tensor = LiteTensor()
  199. assert empty_tensor.layout.ndim == 0
  200. assert empty_tensor.layout.data_type == int(LiteDataType.LITE_FLOAT)
  201. # check empty tensor to numpy
  202. data = empty_tensor.to_numpy()
  203. def test_tensor_by_set_copy_with_new_layout():
  204. layout = LiteLayout([4, 32], "int16")
  205. tensor = LiteTensor(layout)
  206. assert tensor.nbytes == 4 * 32 * 2
  207. arr = np.ones([8, 64], "int32")
  208. tensor.set_data_by_copy(arr)
  209. new_layout = tensor.layout
  210. assert new_layout.ndim == 2
  211. assert new_layout.shapes[0] == 8
  212. assert new_layout.shapes[1] == 64
  213. tensor = LiteTensor(layout)
  214. tensor.set_data_by_share(arr)
  215. new_layout = tensor.layout
  216. assert new_layout.ndim == 2
  217. assert new_layout.shapes[0] == 8
  218. assert new_layout.shapes[1] == 64
  219. def test_tensor_concat():
  220. layout = LiteLayout([4, 32], "int16")
  221. tensors = []
  222. arr = np.ones([4, 32], "int16")
  223. for j in range(4):
  224. for i in range(128):
  225. arr[i // 32][i % 32] = j
  226. tensor = LiteTensor(layout)
  227. tensor.set_data_by_copy(arr)
  228. tensors.append(tensor)
  229. new_tensor = LiteTensorConcat(tensors, 0)
  230. real_data = new_tensor.to_numpy()
  231. for j in range(4):
  232. for i in range(128):
  233. index = j * 128 + i
  234. assert real_data[index // 32][index % 32] == j

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台