You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_tensor.py 13 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. import os
  10. import platform
  11. import numpy as np
  12. import pytest
  13. from utils import opr_test
  14. import megengine.functional as F
  15. from megengine import tensor
  16. from megengine.core._trace_option import use_symbolic_shape
  17. from megengine.core.tensor.utils import astensor1d
  18. from megengine.distributed.helper import get_device_count_by_fork
  19. def test_eye():
  20. dtype = np.float32
  21. cases = [{"input": [10, 20]}, {"input": [30]}]
  22. for case in cases:
  23. np.testing.assert_allclose(
  24. F.eye(case["input"], dtype=dtype).numpy(),
  25. np.eye(*case["input"]).astype(dtype),
  26. )
  27. np.testing.assert_allclose(
  28. F.eye(*case["input"], dtype=dtype).numpy(),
  29. np.eye(*case["input"]).astype(dtype),
  30. )
  31. np.testing.assert_allclose(
  32. F.eye(tensor(case["input"]), dtype=dtype).numpy(),
  33. np.eye(*case["input"]).astype(dtype),
  34. )
  35. def test_concat():
  36. def get_data_shape(length: int):
  37. return (length, 2, 3)
  38. data1 = np.random.random(get_data_shape(5)).astype("float32")
  39. data2 = np.random.random(get_data_shape(6)).astype("float32")
  40. data3 = np.random.random(get_data_shape(7)).astype("float32")
  41. def run(data1, data2):
  42. return F.concat([data1, data2])
  43. cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
  44. opr_test(cases, run, ref_fn=lambda x, y: np.concatenate([x, y]))
  45. def test_concat_device():
  46. data1 = tensor(np.random.random((3, 2, 2)).astype("float32"), device="cpu0")
  47. data2 = tensor(np.random.random((2, 2, 2)).astype("float32"), device="cpu1")
  48. out = F.concat([data1, data2], device="cpu0")
  49. assert str(out.device).split(":")[0] == "cpu0"
  50. def test_stack():
  51. data1 = np.random.random((3, 2, 2)).astype("float32")
  52. data2 = np.random.random((3, 2, 2)).astype("float32")
  53. data3 = np.random.random((3, 2, 2)).astype("float32")
  54. cases = [{"input": [data1, data2]}, {"input": [data1, data3]}]
  55. for ai in range(3):
  56. def run(data1, data2):
  57. return F.stack([data1, data2], axis=ai)
  58. opr_test(cases, run, ref_fn=lambda x, y: np.stack([x, y], axis=ai))
  59. def test_split():
  60. data = np.random.random((2, 3, 4, 5)).astype(np.float32)
  61. inp = tensor(data)
  62. mge_out0 = F.split(inp, 2, axis=3)
  63. mge_out1 = F.split(inp, [3], axis=3)
  64. np_out = np.split(data, [3, 5], axis=3)
  65. assert len(mge_out0) == 2
  66. assert len(mge_out1) == 2
  67. np.testing.assert_equal(mge_out0[0].numpy(), np_out[0])
  68. np.testing.assert_equal(mge_out1[0].numpy(), np_out[0])
  69. np.testing.assert_equal(mge_out0[1].numpy(), np_out[1])
  70. np.testing.assert_equal(mge_out1[1].numpy(), np_out[1])
  71. try:
  72. F.split(inp, 4)
  73. assert False
  74. except ValueError as e:
  75. pass
  76. try:
  77. F.split(inp, [3, 3, 5], axis=3)
  78. assert False
  79. except ValueError as e:
  80. assert str(e) == "Invalid nsplits_or_secions: [3, 3, 5]"
  81. def test_reshape():
  82. x = np.arange(6, dtype="float32")
  83. xx = tensor(x)
  84. y = x.reshape(1, 2, 3)
  85. for shape in [
  86. (1, 2, 3),
  87. (1, -1, 3),
  88. (1, tensor(-1), 3),
  89. np.array([1, -1, 3], dtype="int32"),
  90. tensor([1, -1, 3]),
  91. ]:
  92. yy = F.reshape(xx, shape)
  93. np.testing.assert_equal(yy.numpy(), y)
  94. def test_reshape_shape_inference():
  95. x_shape_known = tensor([1, 2, 3, 4], dtype="float32")
  96. x_shape_unknown = F.broadcast_to(tensor([1.0]), shape=tensor([1, 1, 1, 1]).sum())
  97. tshp_unknown = astensor1d((tensor([2]), tensor([2])), x_shape_known)
  98. tshp_known = astensor1d((2, 2), x_shape_known)
  99. tshp_known_unspec = astensor1d((2, -1), x_shape_known)
  100. def check_shape(output, target):
  101. source = output.shape
  102. if isinstance(source, tensor):
  103. source = source.numpy()
  104. np.testing.assert_equal(source, target)
  105. def func(x, target_shape):
  106. return x.reshape(target_shape)
  107. cases = [
  108. {"input": [x_shape_known, tshp_unknown], "output": [(2, 2),]},
  109. {"input": [x_shape_unknown, tshp_unknown], "output": [(2, 2),]},
  110. {"input": [x_shape_known, tshp_known], "output": [(2, 2),]},
  111. {"input": [x_shape_known, tshp_known_unspec], "output": [(2, 2),]},
  112. {"input": [x_shape_unknown, tshp_known], "output": [(2, 2),]},
  113. {"input": [x_shape_unknown, tshp_known_unspec], "output": [(2, 2),]},
  114. ]
  115. opr_test(cases, func, compare_fn=check_shape, test_trace=True)
  116. def test_squeeze():
  117. x = np.arange(6, dtype="float32").reshape(1, 2, 3, 1)
  118. xx = tensor(x)
  119. for axis in [None, 3, -4, (3, -4)]:
  120. y = np.squeeze(x, axis)
  121. yy = F.squeeze(xx, axis)
  122. np.testing.assert_equal(y, yy.numpy())
  123. def test_expand_dims():
  124. x = np.arange(6, dtype="float32").reshape(2, 3)
  125. xx = tensor(x)
  126. for axis in [2, -3, (3, -4), (1, -4)]:
  127. y = np.expand_dims(x, axis)
  128. yy = F.expand_dims(xx, axis)
  129. np.testing.assert_equal(y, yy.numpy())
  130. def test_elemwise_dtype_promotion():
  131. x = np.random.rand(2, 3).astype("float32")
  132. y = np.random.rand(1, 3).astype("float16")
  133. xx = tensor(x)
  134. yy = tensor(y)
  135. z = xx * yy
  136. np.testing.assert_equal(z.numpy(), x * y)
  137. z = xx + y
  138. np.testing.assert_equal(z.numpy(), x + y)
  139. z = x - yy
  140. np.testing.assert_equal(z.numpy(), x - y)
  141. def test_linspace():
  142. cases = [
  143. {"input": [1, 9, 9]},
  144. {"input": [3, 10, 8]},
  145. ]
  146. opr_test(
  147. cases,
  148. F.linspace,
  149. ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
  150. )
  151. cases = [
  152. {"input": [9, 1, 9]},
  153. {"input": [10, 3, 8]},
  154. ]
  155. opr_test(
  156. cases,
  157. F.linspace,
  158. ref_fn=lambda start, end, step: np.linspace(start, end, step, dtype=np.float32),
  159. )
  160. cases = [
  161. {"input": [1, tensor(9), 9]},
  162. {"input": [tensor(1), 9, tensor(9)]},
  163. ]
  164. opr_test(
  165. cases,
  166. F.linspace,
  167. ref_fn=lambda start, end, step: np.linspace(1, 9, 9, dtype=np.float32),
  168. )
  169. def test_arange():
  170. cases = [
  171. {"input": [1, 9, 1]},
  172. {"input": [2, 10, 2]},
  173. ]
  174. opr_test(
  175. cases,
  176. F.arange,
  177. ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
  178. )
  179. cases = [
  180. {"input": [9, 1, -1]},
  181. {"input": [10, 2, -2]},
  182. ]
  183. opr_test(
  184. cases,
  185. F.arange,
  186. ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
  187. )
  188. cases = [
  189. {"input": [9.3, 1.2, -0.5]},
  190. {"input": [10.3, 2.1, -1.7]},
  191. ]
  192. opr_test(
  193. cases,
  194. F.arange,
  195. ref_fn=lambda start, end, step: np.arange(start, end, step, dtype=np.float32),
  196. )
  197. def test_round():
  198. data1_shape = (15,)
  199. data2_shape = (25,)
  200. data1 = np.random.random(data1_shape).astype(np.float32)
  201. data2 = np.random.random(data2_shape).astype(np.float32)
  202. cases = [{"input": data1}, {"input": data2}]
  203. opr_test(cases, F.round, ref_fn=np.round)
  204. def test_flatten():
  205. data0_shape = (2, 3, 4, 5)
  206. data1_shape = (4, 5, 6, 7)
  207. data0 = np.random.random(data0_shape).astype(np.float32)
  208. data1 = np.random.random(data1_shape).astype(np.float32)
  209. def compare_fn(x, y):
  210. assert x.shape[0] == y
  211. output0 = (2 * 3 * 4 * 5,)
  212. output1 = (4 * 5 * 6 * 7,)
  213. cases = [
  214. {"input": data0, "output": output0},
  215. {"input": data1, "output": output1},
  216. ]
  217. opr_test(cases, F.flatten, compare_fn=compare_fn)
  218. output0 = (2, 3 * 4 * 5)
  219. output1 = (4, 5 * 6 * 7)
  220. cases = [
  221. {"input": data0, "output": output0},
  222. {"input": data1, "output": output1},
  223. ]
  224. opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1)
  225. output0 = (2, 3, 4 * 5)
  226. output1 = (4, 5, 6 * 7)
  227. cases = [
  228. {"input": data0, "output": output0},
  229. {"input": data1, "output": output1},
  230. ]
  231. opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=2)
  232. output0 = (2, 3 * 4, 5)
  233. output1 = (4, 5 * 6, 7)
  234. cases = [
  235. {"input": data0, "output": output0},
  236. {"input": data1, "output": output1},
  237. ]
  238. opr_test(cases, F.flatten, compare_fn=compare_fn, start_axis=1, end_axis=2)
  239. def test_broadcast():
  240. input1_shape = (20, 30)
  241. output1_shape = (30, 20, 30)
  242. data1 = np.random.random(input1_shape).astype(np.float32)
  243. input2_shape = (10, 1)
  244. output2_shape = (20, 10, 20)
  245. data2 = np.random.random(input2_shape).astype(np.float32)
  246. input3_shape = (10, 10)
  247. output3_shape = (10, 10)
  248. data3 = np.random.random(input3_shape).astype(np.float32)
  249. def compare_fn(x, y):
  250. assert x.shape[0] == y
  251. cases = [
  252. {"input": [data1, output1_shape], "output": output1_shape},
  253. {"input": [data2, output2_shape], "output": output2_shape},
  254. {"input": [data3, output3_shape], "output": output3_shape},
  255. ]
  256. opr_test(cases, F.broadcast_to, compare_fn=compare_fn)
  257. x = F.ones((2, 1, 3))
  258. with pytest.raises(RuntimeError):
  259. F.broadcast_to(x, (2, 3, 4))
  260. with pytest.raises(RuntimeError):
  261. F.broadcast_to(x, (4, 1, 3))
  262. with pytest.raises(RuntimeError):
  263. F.broadcast_to(x, (1, 3))
  264. def test_utils_astensor1d():
  265. reference = tensor(0)
  266. # literal
  267. x = [1, 2, 3]
  268. for dtype in [None, "float32"]:
  269. xx = astensor1d(x, reference, dtype=dtype)
  270. assert type(xx) is tensor
  271. np.testing.assert_equal(xx.numpy(), x)
  272. # numpy array
  273. x = np.asarray([1, 2, 3], dtype="int32")
  274. for dtype in [None, "float32"]:
  275. xx = astensor1d(x, reference, dtype=dtype)
  276. assert type(xx) is tensor
  277. np.testing.assert_equal(xx.numpy(), x.astype(dtype) if dtype else x)
  278. # tensor
  279. x = tensor([1, 2, 3], dtype="int32")
  280. for dtype in [None, "float32"]:
  281. xx = astensor1d(x, reference, dtype=dtype)
  282. assert type(xx) is tensor
  283. np.testing.assert_equal(xx.numpy(), x.numpy())
  284. # mixed
  285. x = [1, tensor(2), 3]
  286. for dtype in [None, "float32"]:
  287. xx = astensor1d(x, reference, dtype=dtype)
  288. assert type(xx) is tensor
  289. np.testing.assert_equal(xx.numpy(), [1, 2, 3])
  290. def test_device():
  291. x = tensor([1, 2, 3], dtype="float32")
  292. y1 = F.eye(x.shape, dtype="float32")
  293. y2 = F.eye(x.shape, dtype="float32", device=None)
  294. np.testing.assert_almost_equal(y1.numpy(), y2.numpy())
  295. y3 = F.eye(x.shape, dtype="float32", device="xpux")
  296. y4 = F.eye(x.shape, dtype="float32", device=x.device)
  297. np.testing.assert_almost_equal(y3.numpy(), y4.numpy())
  298. y5 = F.full((3, 2), 4, device=x.device)
  299. y6 = F.full((3, 2), 4, device="xpux")
  300. np.testing.assert_almost_equal(y5.numpy(), y6.numpy())
  301. def test_identity():
  302. x = tensor(np.random.random((5, 10)).astype(np.float32))
  303. y = F.copy(x)
  304. np.testing.assert_equal(y.numpy(), x)
  305. def copy_test(dst, src):
  306. data = np.random.random((2, 3)).astype(np.float32)
  307. x = tensor(data, device=src)
  308. y = F.copy(x, dst)
  309. assert np.allclose(data, y.numpy())
  310. z = x.to(dst)
  311. assert np.allclose(data, z.numpy())
  312. @pytest.mark.require_ngpu(1)
  313. def test_copy_h2d():
  314. copy_test("cpu0", "gpu0")
  315. @pytest.mark.require_ngpu(1)
  316. def test_copy_d2h():
  317. copy_test("gpu0", "cpu0")
  318. @pytest.mark.require_ngpu(2)
  319. def test_copy_d2d():
  320. copy_test("gpu0", "gpu1")
  321. copy_test("gpu0:0", "gpu0:1")
  322. @pytest.mark.parametrize(
  323. "shape, repeats, axis",
  324. [
  325. ((2,), 2, 0),
  326. ((2, 3, 4, 5), 3, 0),
  327. ((2, 3, 4, 5), 4, 3),
  328. ((2,), 2, None),
  329. ((2, 3, 4, 5), 3, None),
  330. ((), 1, None),
  331. ((), 10, None),
  332. ],
  333. )
  334. def test_repeat(shape, repeats, axis):
  335. def repeat_func(inp):
  336. return F.repeat(inp=inp, repeats=repeats, axis=axis)
  337. if shape != ():
  338. cases = [
  339. {"input": np.random.randn(*shape).astype("float32")},
  340. ]
  341. else:
  342. cases = [{"input": np.array(1.23)}]
  343. opr_test(
  344. cases, repeat_func, ref_fn=lambda inp: np.repeat(inp, repeats, axis),
  345. )
  346. @pytest.mark.parametrize(
  347. "shape, reps",
  348. [
  349. ((2,), (2,)),
  350. ((2, 3, 4, 5), (1, 1, 1, 1)),
  351. ((2, 3, 4, 5), (1, 2, 3, 4)),
  352. ((2, 3, 4, 5), (2, 2, 2, 2, 2, 2, 2)),
  353. ],
  354. )
  355. def test_tile(shape, reps):
  356. def tile_func(inp):
  357. return F.tile(inp=inp, reps=reps)
  358. cases = [
  359. {"input": np.random.randn(*shape).astype("float32")},
  360. ]
  361. opr_test(
  362. cases, tile_func, ref_fn=lambda inp: np.tile(inp, reps),
  363. )

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台