You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_tracing.py 12 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. import io
  10. from tempfile import mkstemp
  11. import numpy as np
  12. import pytest
  13. import megengine.core.tensor.megbrain_graph as G
  14. import megengine.functional as F
  15. import megengine.utils.comp_graph_tools as cgtools
  16. from megengine import tensor
  17. from megengine.core._trace_option import set_symbolic_shape
  18. from megengine.core.ops import builtin as ops
  19. from megengine.core.ops.builtin import Elemwise
  20. from megengine.core.tensor.core import apply
  21. from megengine.core.tensor.raw_tensor import as_raw_tensor
  22. from megengine.core.tensor.utils import isscalar
  23. from megengine.functional import exp, log
  24. from megengine.jit import exclude_from_trace, trace
  25. from megengine.random import normal, uniform
  26. def test_trace():
  27. for symbolic in [False, True]:
  28. @trace(symbolic=symbolic)
  29. def f(x):
  30. op = ops.Elemwise(Elemwise.Mode.NEGATE)
  31. (y,) = apply(op, x)
  32. return y
  33. x = as_raw_tensor([1]).numpy()
  34. y = f.__wrapped__(as_raw_tensor(x)).numpy()
  35. for i in range(3):
  36. np.testing.assert_equal(f(as_raw_tensor(x)).numpy(), y)
  37. def test_exclude_from_trace():
  38. for symbolic in [False, True]:
  39. @trace(symbolic=symbolic)
  40. def f(x):
  41. neg = ops.Elemwise(Elemwise.Mode.NEGATE)
  42. (x,) = apply(neg, x)
  43. with exclude_from_trace():
  44. if i % 2:
  45. (x,) = apply(neg, x)
  46. (x,) = apply(neg, x)
  47. return x
  48. x = as_raw_tensor([1]).numpy()
  49. for i in range(3):
  50. y = f.__wrapped__(as_raw_tensor(x)).numpy()
  51. np.testing.assert_equal(f(as_raw_tensor(x)).numpy(), y)
  52. def test_print_in_trace():
  53. for symbolic in [False]: # cannot read value in symbolic mode
  54. @trace(symbolic=symbolic)
  55. def f(x):
  56. nonlocal buf
  57. neg = ops.Elemwise(Elemwise.Mode.NEGATE)
  58. (x,) = apply(neg, x)
  59. buf = x.numpy()
  60. (x,) = apply(neg, x)
  61. return x
  62. buf = None
  63. x = as_raw_tensor([1]).numpy()
  64. for i in range(3):
  65. y = f.__wrapped__(as_raw_tensor(x)).numpy()
  66. z = buf
  67. buf = None
  68. np.testing.assert_equal(f(as_raw_tensor(x)).numpy(), y)
  69. np.testing.assert_equal(z, buf)
  70. def test_dump():
  71. @trace(symbolic=True, capture_as_const=True)
  72. def f(a, b):
  73. op = ops.Elemwise(Elemwise.Mode.ADD)
  74. (y,) = apply(op, a, b)
  75. return y
  76. a = as_raw_tensor([2]).numpy()
  77. b = as_raw_tensor([4]).numpy()
  78. y = f.__wrapped__(as_raw_tensor(a), as_raw_tensor(b)).numpy()
  79. for i in range(3):
  80. np.testing.assert_equal(f(as_raw_tensor(a), as_raw_tensor(b)).numpy(), y)
  81. file = io.BytesIO()
  82. dump_info = f.dump(file)
  83. assert dump_info.nr_opr == 3
  84. np.testing.assert_equal(dump_info.inputs, ["arg_0", "arg_1"])
  85. np.testing.assert_equal(dump_info.outputs, ["ADD(arg_0,arg_1)[4]"])
  86. file.seek(0)
  87. result = cgtools.load_and_inference(file, [a, b])
  88. np.testing.assert_equal(result[0], y)
  89. def test_capture_dump():
  90. a = as_raw_tensor([2])
  91. @trace(symbolic=True, capture_as_const=True)
  92. def f(x):
  93. op = ops.Elemwise(Elemwise.Mode.MUL)
  94. (y,) = apply(op, x, a)
  95. return y
  96. x = as_raw_tensor([3]).numpy()
  97. y = f.__wrapped__(as_raw_tensor(x)).numpy()
  98. for i in range(3):
  99. np.testing.assert_equal(f(as_raw_tensor(x)).numpy(), y)
  100. file = io.BytesIO()
  101. f.dump(file)
  102. file.seek(0)
  103. result = cgtools.load_and_inference(file, [x])
  104. np.testing.assert_equal(result[0], y)
  105. def test_dump_volatile():
  106. p = as_raw_tensor([2])
  107. @trace(symbolic=True, capture_as_const=True)
  108. def f(x):
  109. op = ops.Elemwise(Elemwise.Mode.MUL)
  110. (y,) = apply(op, x, p)
  111. return y
  112. x = as_raw_tensor([3]).numpy()
  113. y = f.__wrapped__(as_raw_tensor(x)).numpy()
  114. for i in range(3):
  115. np.testing.assert_equal(f(as_raw_tensor(x)).numpy(), y)
  116. file = io.BytesIO()
  117. f.dump(file, optimize_for_inference=False)
  118. file.seek(0)
  119. cg, _, outputs = G.load_graph(file)
  120. (out,) = outputs
  121. assert (
  122. cgtools.get_owner_opr_type(cgtools.get_owner_opr_inputs(out)[1])
  123. == "ImmutableTensor"
  124. )
  125. def test_trace_profiler():
  126. for symbolic in [False, True]:
  127. @trace(symbolic=symbolic, profiling=True)
  128. def f(x):
  129. op = ops.Elemwise(Elemwise.Mode.NEGATE)
  130. (y,) = apply(op, x)
  131. return y
  132. x = as_raw_tensor([1]).numpy()
  133. y = f.__wrapped__(as_raw_tensor(x)).numpy()
  134. f(as_raw_tensor(x))
  135. f(as_raw_tensor(x)) # XXX: has to run twice
  136. out = f.get_profile()
  137. assert out.get("profiler")
  138. @pytest.mark.skip(reason="force opt_level=0 when building graph")
  139. def test_goptions():
  140. @trace(symbolic=True, opt_level=0, capture_as_const=True)
  141. def f(x):
  142. # directly return x / x will not trigger gopt
  143. # since there's no way to tell the two x are the same
  144. y = 2.0 * x
  145. return y / y
  146. @trace(symbolic=True, opt_level=1, capture_as_const=True)
  147. def g(x):
  148. y = 2.0 * x
  149. return y / y
  150. d = tensor(0.0)
  151. assert not np.isfinite(f(d).numpy())
  152. np.testing.assert_equal(g(d).numpy().item(), 1.0)
  153. @pytest.mark.skip(reason="force opt_level=0 when building graph")
  154. def test_goptions_log_sum_exp():
  155. @trace(symbolic=True, opt_level=0, capture_as_const=True)
  156. def f(x, y):
  157. return log(exp(x) + exp(y))
  158. @trace(symbolic=True, opt_level=1, capture_as_const=True)
  159. def g(x, y):
  160. return log(exp(x) + exp(y))
  161. val = 1.0e4
  162. d = tensor(val)
  163. o = tensor(0.0)
  164. assert not np.isfinite(f(d, o).numpy())
  165. np.testing.assert_almost_equal(g(d, o), val)
  166. @pytest.mark.skip(reason="could not use opt_level=0 with dump")
  167. def test_goptions_log_exp():
  168. @trace(symbolic=True, opt_level=0, capture_as_const=True)
  169. def f(x):
  170. return log(exp(x))
  171. @trace(symbolic=True, opt_level=1, capture_as_const=True)
  172. def g(x):
  173. return log(exp(x))
  174. f(tensor(1.0))
  175. _, out = mkstemp()
  176. f.dump(out, optimize_for_inference=False)
  177. *_, outputs = G.load_graph(out)
  178. oprs_1 = cgtools.get_oprs_seq(outputs)
  179. g(tensor(1.0))
  180. g.dump(out, optimize_for_inference=False)
  181. *_, outputs = G.load_graph(out)
  182. oprs_2 = cgtools.get_oprs_seq(outputs)
  183. assert len(oprs_1) - len(oprs_2) == 2
  184. def test_optimize_for_inference():
  185. @trace(symbolic=True, capture_as_const=True)
  186. def f(x):
  187. return exp(x)
  188. _, out = mkstemp()
  189. f(tensor(5.0))
  190. f.dump(out, enable_io16xc32=True)
  191. res = G.load_graph(out)
  192. computing_input = res.output_vars_list[0].owner.inputs[0]
  193. assert computing_input.dtype == np.float16
  194. def test_optimize_for_inference_broadcast():
  195. a = tensor(np.ones(1, dtype=np.float32))
  196. @trace(capture_as_const=True, symbolic_shape=True)
  197. def f():
  198. (b,) = apply(ops.Broadcast(), a, tensor([1, 10], dtype=np.int32))
  199. return b
  200. f()
  201. f.dump(io.BytesIO())
  202. def test_trace_cvt_bool():
  203. x = tensor([0], dtype=np.int32)
  204. @trace(symbolic=True)
  205. def f(x):
  206. a = x.shape
  207. b = a[0]
  208. assert isscalar(b)
  209. return b == 0
  210. for i in range(3):
  211. np.testing.assert_equal(f(x).numpy(), False)
  212. def test_trace_reshape():
  213. for symbolic in [False, True]:
  214. x1 = tensor(np.random.randn(2, 10, 10))
  215. x2 = tensor(np.random.randn(4, 10, 10))
  216. x3 = tensor(np.random.randn(8, 10, 10))
  217. @trace(symbolic=symbolic, capture_as_const=True)
  218. def f(x):
  219. y = x.reshape(x.shape[0], 100)
  220. return y
  221. f(x1)
  222. f(x2)
  223. f(x3)
  224. def test_trace_topk():
  225. x = tensor([5, 2, 7, 1, 0, 3, 2])
  226. @trace(symbolic=True)
  227. def f(x):
  228. y = F.topk(x, 3)
  229. np.testing.assert_equal(y[0].shape.numpy(), np.array([3,]))
  230. return y
  231. for i in range(3):
  232. f(x)
  233. def test_trace_warp_perspective():
  234. inp_shape = (1, 1, 4, 4)
  235. x = tensor(np.arange(16, dtype=np.float32).reshape(inp_shape))
  236. M_shape = (1, 3, 3)
  237. M = tensor(
  238. np.array(
  239. [[1.0, 0.0, 1.0], [0.0, 1.0, 1.0], [0.0, 0.0, 1.0]], dtype=np.float32
  240. ).reshape(M_shape)
  241. )
  242. @trace(symbolic=True)
  243. def f(x, M):
  244. out = F.warp_perspective(x, M, (2, 2))
  245. np.testing.assert_equal(out.shape.numpy(), np.array([1, 1, 2, 2]))
  246. return out
  247. for i in range(1):
  248. f(x, M)
  249. def test_raise_on_trace():
  250. step_count = 0
  251. catch_count = 0
  252. bad_step = 10
  253. class CatchMe(Exception):
  254. pass
  255. a = tensor([1, 2, 3, 4])
  256. b = tensor([5, 6, 7, 8])
  257. c = tensor([9, 0, 1, 2])
  258. @trace
  259. def add_abc(a, b, c):
  260. print("Hello")
  261. ps = a + b
  262. result = ps + c
  263. if step_count == bad_step:
  264. raise CatchMe("catch me")
  265. return result
  266. for i in range(100):
  267. try:
  268. d = add_abc(a, b, c)
  269. except CatchMe as e:
  270. catch_count += 1
  271. else:
  272. np.testing.assert_equal(d.numpy(), (a + b + c).numpy())
  273. step_count += 1
  274. assert catch_count == 1
  275. def test_trace_broadcast():
  276. for symbolic in [False, True]:
  277. x1 = tensor(np.random.randn(3, 1, 1))
  278. x2 = tensor(np.random.randn(1, 4, 1))
  279. x3 = tensor(np.random.randn(1, 1, 5))
  280. @trace(symbolic=symbolic, capture_as_const=True)
  281. def f(x):
  282. y = F.broadcast_to(x, (3, 4, 5))
  283. return y
  284. f(x1)
  285. f(x2)
  286. f(x3)
  287. def test_trace_nms():
  288. def make_inputs(n):
  289. boxes = np.zeros((n, 4))
  290. boxes[:, :2] = np.random.rand(n, 2) * 100
  291. boxes[:, 2:] = np.random.rand(n, 2) * 100 + 100
  292. scores = np.random.rand(n)
  293. return tensor(boxes), tensor(scores)
  294. @trace(symbolic=False)
  295. def f(boxes, scores):
  296. results = F.nn.nms(boxes, scores=scores, iou_thresh=0.5, max_output=20)
  297. with exclude_from_trace():
  298. _ = F.nn.nms(boxes, scores=scores, iou_thresh=0.5)
  299. return results
  300. f(*make_inputs(10))
  301. f(*make_inputs(20))
  302. f(*make_inputs(30))
  303. def test_trace_valid_broadcast():
  304. x1 = tensor(np.random.randn(1, 1))
  305. x2 = tensor(np.random.randn(1, 2))
  306. shape = (tensor([2]), tensor([2]))
  307. @trace(symbolic=False)
  308. def f(x, shape):
  309. y = F.broadcast_to(x, shape)
  310. return y
  311. f(x1, shape)
  312. f(x2, shape)
  313. def test_clip():
  314. x = tensor(np.random.randn(10, 10))
  315. @trace(symbolic=True)
  316. def f(x, lower, upper):
  317. y = F.clip(x, lower, upper)
  318. return y
  319. for i in range(3):
  320. f(x, tensor([0]), tensor([1]))
  321. # test returning noncontiguous tensor from trace
  322. def test_slice():
  323. @trace
  324. def f(x):
  325. return x[:, 1::2]
  326. x = F.arange(8).reshape(2, 4)
  327. f(x)
  328. y = f(x)
  329. np.testing.assert_array_equal(y.numpy(), x.numpy()[:, 1::2])
  330. y + y
  331. def test_random():
  332. def run_test(op):
  333. for symbolic_shape in [True, False]:
  334. @trace(symbolic=True, symbolic_shape=symbolic_shape)
  335. def f():
  336. out = op(size=[10, 10])
  337. out_shape = out.shape
  338. assert out_shape is not None
  339. if not isinstance(out_shape, tuple):
  340. assert out.shape.numpy() is not None
  341. return out
  342. for _ in range(3):
  343. f()
  344. run_test(uniform)
  345. run_test(normal)

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台