You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_functional.py 15 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. import itertools
  10. import numpy as np
  11. import pytest
  12. from utils import opr_test
  13. import megengine.core.ops.builtin as builtin
  14. import megengine.core.tensor.dtype as dtype
  15. import megengine.functional as F
  16. from megengine import Parameter, Tensor, is_cuda_available, tensor
  17. from megengine.core._trace_option import use_tensor_shape
  18. from megengine.core.autodiff.grad import Grad
  19. from megengine.core.tensor.utils import make_shape_tuple
  20. from megengine.test import assertTensorClose
  21. def test_where():
  22. maskv0 = np.array([[1, 0], [0, 1]], dtype=np.bool_)
  23. xv0 = np.array([[1, np.inf], [np.nan, 4]], dtype=np.float32)
  24. yv0 = np.array([[5, 6], [7, 8]], dtype=np.float32)
  25. maskv1 = np.array([[1, 0, 1], [1, 0, 0], [1, 1, 0]], dtype=np.bool_)
  26. xv1 = np.array([[1, np.inf, 2], [0, np.nan, 4], [1, 5, 7]], dtype=np.float32)
  27. yv1 = np.array([[5, 6, 9], [2, 7, 8], [2, 1, 9]], dtype=np.float32)
  28. cases = [
  29. {"input": [maskv0, xv0, yv0]},
  30. {"input": [maskv1, xv1, yv1]},
  31. ]
  32. opr_test(cases, F.where, ref_fn=np.where)
  33. maskv2 = np.array([1, 1, 1], dtype=np.bool_)
  34. xv2 = np.array([1, 3, 2], dtype=np.float32)
  35. yv2 = np.array([5, 6, 9], dtype=np.float32)
  36. maskv3 = np.array([0, 0, 0], dtype=np.bool_)
  37. xv3 = np.array([1, 3, 2], dtype=np.float32)
  38. yv3 = np.array([5, 6, 9], dtype=np.float32)
  39. cases = [
  40. {"input": [maskv2, xv2, yv2]},
  41. {"input": [maskv3, xv3, yv3]},
  42. ]
  43. opr_test(cases, F.where, ref_fn=np.where)
  44. def test_dropout():
  45. data = tensor(np.ones(10, dtype=np.float32))
  46. out = F.dropout(data, 1.0 / 3.0, training=False)
  47. assert out.numpy().sum() >= 0.0
  48. def test_matmul():
  49. shape1 = 3
  50. shape2 = 3
  51. shape3 = (3, 5)
  52. shape4 = (5, 6)
  53. data1 = np.random.random(shape1).astype("float32")
  54. data2 = np.random.random(shape2).astype("float32")
  55. data3 = np.random.random(shape3).astype("float32")
  56. data4 = np.random.random(shape4).astype("float32")
  57. cases = [
  58. {"input": [data1, data2]},
  59. {"input": [data2, data3]},
  60. {"input": [data3, data4]},
  61. ]
  62. opr_test(cases, F.matmul, ref_fn=np.matmul)
  63. batch_size = 10
  64. shape1 = (batch_size, 2, 3)
  65. shape2 = (batch_size, 3, 4)
  66. shape3 = (batch_size, 10, 4, 5)
  67. data1 = np.random.random(shape1).astype("float32")
  68. data2 = np.random.random(shape2).astype("float32")
  69. data3 = np.random.random(shape3).astype("float32")
  70. cases = [{"input": [data1, data2]}, {"input": [data2, data3]}]
  71. for i in range(0, batch_size):
  72. def compare_fn(x, y):
  73. x.numpy()[i, ...] == y
  74. opr_test(
  75. cases,
  76. F.matmul,
  77. compare_fn=compare_fn,
  78. ref_fn=lambda x, y: np.matmul(x[i, ...], y[i, ...]),
  79. )
  80. def test_interpolate():
  81. def linear_interpolate():
  82. inp = tensor(np.arange(1, 3, dtype=np.float32).reshape(1, 1, 2))
  83. out = F.interpolate(inp, scale_factor=2.0, mode="LINEAR")
  84. out2 = F.interpolate(inp, 4, mode="LINEAR")
  85. assertTensorClose(
  86. out.numpy(), np.array([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32)
  87. )
  88. assertTensorClose(
  89. out2.numpy(), np.array([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32)
  90. )
  91. def many_batch_interpolate():
  92. inp = tensor(np.arange(1, 9, dtype=np.float32).reshape(2, 1, 2, 2))
  93. out = F.interpolate(inp, [4, 4])
  94. out2 = F.interpolate(inp, scale_factor=2.0)
  95. assertTensorClose(out.numpy(), out2.numpy())
  96. def assign_corner_interpolate():
  97. inp = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2))
  98. out = F.interpolate(inp, [4, 4], align_corners=True)
  99. out2 = F.interpolate(inp, scale_factor=2.0, align_corners=True)
  100. assertTensorClose(out.numpy(), out2.numpy())
  101. def error_shape_linear_interpolate():
  102. inp = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2))
  103. with pytest.raises(ValueError):
  104. F.interpolate(inp, scale_factor=2.0, mode="LINEAR")
  105. def inappropriate_scale_linear_interpolate():
  106. inp = tensor(np.arange(1, 3, dtype=np.float32).reshape(1, 1, 2))
  107. with pytest.raises(ValueError):
  108. F.interpolate(inp, scale_factor=[2.0, 3.0], mode="LINEAR")
  109. linear_interpolate()
  110. many_batch_interpolate()
  111. assign_corner_interpolate()
  112. error_shape_linear_interpolate()
  113. inappropriate_scale_linear_interpolate()
  114. def _save_to(self, name="grad"):
  115. def callback(tensor, grad):
  116. setattr(self, name, grad)
  117. return callback
  118. def _gen_roi_inp():
  119. inp_feat = np.random.randn(2, 32, 256, 256)
  120. rois = np.zeros((4, 5))
  121. rois[:, 0] = [0, 0, 1, 1]
  122. rois[:, 1:3] = np.random.rand(4, 2) * 100
  123. rois[:, 3:] = np.random.rand(4, 2) * 100 + 150
  124. inp_feat = tensor(inp_feat)
  125. rois = tensor(rois)
  126. return inp_feat, rois
  127. def test_roi_align():
  128. inp_feat, rois = _gen_roi_inp()
  129. grad = Grad().wrt(inp_feat, callback=_save_to(inp_feat))
  130. output_shape = (7, 7)
  131. out_feat = F.roi_align(
  132. inp_feat,
  133. rois,
  134. output_shape=output_shape,
  135. mode="average",
  136. spatial_scale=1.0 / 4,
  137. sample_points=2,
  138. aligned=True,
  139. )
  140. assert make_shape_tuple(out_feat.shape) == (
  141. rois.shape[0],
  142. inp_feat.shape[1],
  143. *output_shape,
  144. )
  145. grad(out_feat, tensor(F.ones_like(out_feat)))
  146. assert make_shape_tuple(inp_feat.grad.shape) == make_shape_tuple(inp_feat.shape)
  147. def test_roi_pooling():
  148. inp_feat, rois = _gen_roi_inp()
  149. grad = Grad().wrt(inp_feat, callback=_save_to(inp_feat))
  150. output_shape = (7, 7)
  151. out_feat = F.roi_pooling(
  152. inp_feat, rois, output_shape=output_shape, mode="max", scale=1.0 / 4,
  153. )
  154. assert make_shape_tuple(out_feat.shape) == (
  155. rois.shape[0],
  156. inp_feat.shape[1],
  157. *output_shape,
  158. )
  159. grad(out_feat, tensor(F.ones_like(out_feat)))
  160. assert make_shape_tuple(inp_feat.grad.shape) == make_shape_tuple(inp_feat.shape)
  161. def test_one_hot():
  162. def onehot_low_dimension():
  163. inp = tensor(np.arange(1, 4, dtype=np.int32))
  164. out = F.one_hot(inp, num_classes=4)
  165. assertTensorClose(
  166. out.numpy(), np.eye(4, dtype=np.int32)[np.arange(1, 4, dtype=np.int32)]
  167. )
  168. def onehot_high_dimension():
  169. arr = np.array(
  170. [[3, 2, 4, 4, 2, 4, 0, 4, 4, 1], [4, 1, 1, 3, 2, 2, 4, 2, 4, 3]],
  171. dtype=np.int32,
  172. )
  173. inp = tensor(arr)
  174. out = F.one_hot(inp, 10)
  175. assertTensorClose(out.numpy(), np.eye(10, dtype=np.int32)[arr])
  176. onehot_low_dimension()
  177. onehot_high_dimension()
  178. def test_add_update():
  179. shape = (2, 3)
  180. v = np.random.random(shape).astype(np.float32)
  181. b = Tensor(v)
  182. u = F.add_update(b, 1)
  183. assertTensorClose(u.numpy(), v + 1)
  184. u = F.add_update(b, 1)
  185. assertTensorClose(u.numpy(), v + 2)
  186. x = np.ones((2, 2), dtype=np.float32)
  187. y = x * 0.5
  188. dest = tensor(x)
  189. delta = tensor(y)
  190. r = F.add_update(dest, delta, alpha=0.9, beta=0.1, bias=0.1)
  191. assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1)
  192. def test_add_update_params():
  193. b = np.random.random((2, 3)).astype(np.float32)
  194. y = Tensor(b)
  195. # @jit.trace
  196. def f(x):
  197. return F.add_update(y, x)
  198. f(np.zeros((2, 3)).astype(np.float32))
  199. z = Tensor(np.zeros((2, 3)).astype(np.float32))
  200. F.add_update(y, z, beta=0.1)
  201. res = f(np.ones((2, 3)).astype(np.float32))
  202. assertTensorClose(res.numpy(), b + 1)
  203. def test_binary_cross_entropy():
  204. data1_shape = (2, 2)
  205. label1_shape = (2, 2)
  206. data2_shape = (2, 3)
  207. label2_shape = (2, 3)
  208. def sigmoid(x):
  209. return 1 / (1 + np.exp(-x))
  210. def compare_fn(x, y):
  211. assertTensorClose(x.numpy(), y, max_err=5e-4)
  212. np.random.seed(123)
  213. data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32))
  214. label1 = np.random.uniform(size=label1_shape).astype(np.float32)
  215. expect1 = np.array([0.6361], dtype=np.float32)
  216. np.random.seed(123)
  217. data2 = sigmoid(np.random.uniform(size=data2_shape).astype(np.float32))
  218. label2 = np.random.uniform(size=label2_shape).astype(np.float32)
  219. expect2 = np.array([0.6750], dtype=np.float32)
  220. cases = [
  221. {"input": [data1, label1], "output": expect1,},
  222. {"input": [data2, label2], "output": expect2,},
  223. ]
  224. opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn)
  225. def test_hinge_loss():
  226. np.random.seed(123)
  227. # case with L1 norm
  228. cases = []
  229. for shape in [(2, 2), (2, 3)]:
  230. data = np.random.uniform(size=shape).astype(np.float32)
  231. label = 2 * np.random.randint(0, 1, size=shape).astype(np.float32) - 1
  232. expect = np.clip(0, np.inf, 1 - data * label).sum(axis=1).mean()
  233. cases.append({"input": [data, label], "output": expect})
  234. opr_test(cases, F.hinge_loss)
  235. # cases with L2 norm
  236. cases = []
  237. for shape in [(2, 2), (2, 3)]:
  238. data = np.random.uniform(size=shape).astype(np.float32)
  239. label = 2 * np.random.randint(0, 1, size=shape).astype(np.float32) - 1
  240. expect = ((np.clip(0, np.inf, 1 - data * label) ** 2).sum(axis=1)).mean()
  241. cases.append({"input": [data, label], "output": expect})
  242. def hinge_loss_with_l2_norm(pred, label):
  243. return F.hinge_loss(pred, label, "L2")
  244. opr_test(cases, hinge_loss_with_l2_norm)
  245. def test_nms():
  246. x = np.array(
  247. [
  248. [0, 0, 100, 100],
  249. [10, 10, 100, 100],
  250. [50, 50, 100, 100],
  251. [100, 100, 150, 150],
  252. ],
  253. dtype=np.float32,
  254. )
  255. inp = tensor(x)
  256. scores = tensor([0.5, 0.8, 0.9, 0.6], dtype=np.float32)
  257. result = F.nms(inp, scores=scores, iou_thresh=0.5)
  258. np.testing.assert_equal(result.numpy(), np.array([2, 1, 3], dtype=np.int32))
  259. def test_batched_nms():
  260. x = np.array(
  261. [
  262. [0, 0, 100, 100],
  263. [0.5, 0.5, 1.5, 1.5],
  264. [20, 20, 100, 100],
  265. [0.5, 0.5, 1.0, 1.0],
  266. [10, 10, 100, 100],
  267. [0.5, 0.5, 1.0, 1.0],
  268. ],
  269. dtype=np.float32,
  270. )
  271. inp = tensor(x)
  272. scores = tensor([0.6, 0.9, 0.5, 0.6, 0.8, 0.7], dtype=np.float32)
  273. idxs = tensor([0, 1, 0, 1, 0, 1], dtype=np.int32)
  274. results = F.batched_nms(inp, scores=scores, idxs=idxs, iou_thresh=0.5)
  275. np.testing.assert_equal(results.numpy(), np.array([1, 4, 5], dtype=np.int32))
  276. @pytest.mark.skip(reason="cuda does not support nchw int8")
  277. def test_conv_bias():
  278. inp_scale = 1.5
  279. w_scale = 2.5
  280. outp_scale = 1.5
  281. inp_dtype = dtype.qint8(inp_scale)
  282. w_dtype = dtype.qint8(w_scale)
  283. b_dtype = dtype.qint32(inp_scale * w_scale)
  284. out_dtype = dtype.qint8(outp_scale)
  285. def run(
  286. N,
  287. IC,
  288. OC,
  289. IH,
  290. IW,
  291. KH,
  292. KW,
  293. PH,
  294. PW,
  295. SH,
  296. SW,
  297. has_bias=True,
  298. nonlinear_mode="IDENTITY",
  299. ):
  300. inp_v = np.random.normal(size=(N, IC, IH, IW))
  301. w_v = np.random.normal(size=(OC, IC, KW, KW))
  302. b_v = np.random.normal(size=(1, OC, 1, 1))
  303. inp_scale = dtype.get_scale(inp_dtype)
  304. w_scale = dtype.get_scale(w_dtype)
  305. b_scale = dtype.get_scale(b_dtype)
  306. inpv = dtype.convert_to_qint8(inp_v * inp_scale, inp_dtype)
  307. wv = dtype.convert_to_qint8(w_v * w_scale, w_dtype)
  308. bv = dtype.convert_to_qint32(b_v * b_scale, b_dtype)
  309. inp_int8 = tensor(inpv, dtype=inp_dtype)
  310. w_int8 = Parameter(wv, dtype=w_dtype)
  311. b_int32 = Parameter(bv, dtype=b_dtype)
  312. inp_fp32 = inp_int8.astype("float32")
  313. w_fp32 = w_int8.astype("float32")
  314. b_fp32 = b_int32.astype("float32")
  315. def convert_to_nchw4(var):
  316. var = F.reshape(
  317. var, (var.shape[0], var.shape[1] // 4, 4, var.shape[2], var.shape[3])
  318. )
  319. var = F.transpose(var, (0, 1, 3, 4, 2))
  320. return var
  321. def run_conv2d(inp, w, b):
  322. O = F.conv2d(
  323. inp, w, b if has_bias else None, stride=(SH, SW), padding=(PH, PW),
  324. )
  325. if nonlinear_mode == "RELU":
  326. return F.relu(O)
  327. else:
  328. return O
  329. def run_conv_bias(inp, w, b, format="NCHW"):
  330. b = b if has_bias else Parameter(np.zeros_like(b.numpy()))
  331. if format == "NCHW4":
  332. inp = convert_to_nchw4(inp)
  333. w = convert_to_nchw4(w)
  334. b = convert_to_nchw4(b)
  335. return F.conv_bias_activation(
  336. inp,
  337. w,
  338. b,
  339. stride=(SH, SW),
  340. padding=(PH, PW),
  341. format=format,
  342. dtype=out_dtype,
  343. nonlinear_mode=nonlinear_mode,
  344. )
  345. format = "NCHW4" if is_cuda_available() else "NCHW"
  346. expected = run_conv2d(inp_fp32, w_fp32, b_fp32)
  347. expected = expected.astype(out_dtype).astype("float32")
  348. result = run_conv_bias(inp_int8, w_int8, b_int32, format=format).astype(
  349. "float32"
  350. )
  351. if format == "NCHW4":
  352. result = F.transpose(result, (0, 1, 4, 2, 3))
  353. expected = F.flatten(expected)
  354. result = F.flatten(result)
  355. assertTensorClose(result.numpy(), expected.numpy(), max_err=outp_scale)
  356. run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False)
  357. run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False)
  358. run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False)
  359. run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1)
  360. run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1)
  361. run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2)
  362. run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, False, "RELU")
  363. run(10, 36, 8, 46, 26, 2, 2, 2, 1, 1, 2, True, "RELU")
  364. def test_condtake():
  365. x = np.array([[1, 2, 3], [4, 5, 6]])
  366. y = np.array([[True, False, True], [False, True, True]])
  367. xx = tensor(x)
  368. yy = tensor(y)
  369. val, idx = F.cond_take(yy, xx)
  370. np.testing.assert_equal(val.numpy(), x[y])
  371. np.testing.assert_equal(idx.numpy(), np.where(y.reshape(-1))[0])
  372. def test_condtake_is_same():
  373. op1 = builtin.CondTake()
  374. op2 = builtin.CondTake()
  375. assert op1 == op2
  376. def test_nms_is_same():
  377. op1 = builtin.NMSKeep(0.7, 100)
  378. op2 = builtin.NMSKeep(0.7, 100)
  379. op3 = builtin.NMSKeep(0.8, 100)
  380. op4 = builtin.NMSKeep(0.7, 200)
  381. assert op1 == op2
  382. assert op1 != op3
  383. assert op1 != op4
  384. assert op3 != op4

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台