You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_elemwise.py 9.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. import numpy as np
  10. import pytest
  11. import megengine.functional as F
  12. import megengine.functional.elemwise as elemwise
  13. from megengine import tensor
  14. from megengine.core.tensor import dtype
  15. from megengine.functional.elemwise import Elemwise
  16. from megengine.jit import trace
  17. def test_abs():
  18. np.testing.assert_allclose(
  19. F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
  20. np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
  21. )
  22. np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
  23. def test_elemwise_mode_string():
  24. for key, mode in vars(Elemwise.Mode).items():
  25. if isinstance(mode, Elemwise.Mode):
  26. assert key == mode
  27. assert Elemwise(mode=key) == Elemwise(mode=mode)
  28. def test_multiply():
  29. np.testing.assert_allclose(
  30. F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
  31. )
  32. np.testing.assert_allclose(
  33. F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
  34. np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
  35. )
  36. np.testing.assert_allclose(
  37. F.mul(4.0, tensor([3.0, 4.0])).numpy(),
  38. np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
  39. )
  40. np.testing.assert_allclose(
  41. F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
  42. np.multiply(
  43. np.array([3.0, 4.0], dtype=np.float32),
  44. np.array([3.0, 4.0], dtype=np.float32),
  45. ),
  46. )
  47. def test_div():
  48. np.testing.assert_allclose(
  49. F.div(tensor([3, 4]), 2).numpy(),
  50. np.divide(np.array([3, 4], dtype=np.float32), 2),
  51. )
  52. np.testing.assert_allclose(
  53. (tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
  54. )
  55. def test_clamp():
  56. """Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
  57. `F.clip` will fall into wrong conditions unexpectedly.
  58. """
  59. x = np.linspace(-6, 6, dtype="float32")
  60. np.testing.assert_allclose(
  61. F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
  62. )
  63. np.testing.assert_allclose(
  64. F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
  65. )
  66. def test_isnan():
  67. for case in [[1, float("nan"), 0]]:
  68. np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
  69. def test_isinf():
  70. for case in [[1, float("inf"), 0]]:
  71. np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
  72. def test_sign():
  73. for case in [[1, -1, 0]]:
  74. x = tensor(case)
  75. np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
  76. def test_cosh():
  77. np.random.seed(42)
  78. x = np.random.randn(100).astype("float32")
  79. y_np = np.cosh(x)
  80. y_mge = F.cosh(tensor(x)).numpy()
  81. np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
  82. def test_sinh():
  83. np.random.seed(42)
  84. x = np.random.randn(100).astype("float32")
  85. y_np = np.sinh(x)
  86. y_mge = F.sinh(tensor(x)).numpy()
  87. np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
  88. def test_asinh():
  89. np.random.seed(42)
  90. x = np.random.randn(100).astype("float32")
  91. y_np = np.arcsinh(x)
  92. y_mge = F.asinh(tensor(x)).numpy()
  93. np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
  94. def test_acosh():
  95. x = np.arange(0, 10000).astype("float32") / 100 + 1
  96. y_np = np.arccosh(x)
  97. y_mge = F.acosh(tensor(x)).numpy()
  98. np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
  99. def test_atanh():
  100. np.random.seed(42)
  101. x = np.random.rand(100).astype("float32") * 2 - 1
  102. y_np = np.arctanh(x)
  103. y_mge = F.atanh(tensor(x)).numpy()
  104. np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
  105. def test_hswish():
  106. np.random.seed(42)
  107. x = np.random.randn(100).astype("float32")
  108. y_np = x * np.minimum(np.maximum(x + 3, 0), 6) / 6
  109. y_mge = F.hswish(tensor(x)).numpy()
  110. np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
  111. def test_hsigmoid():
  112. np.random.seed(42)
  113. x = np.random.randn(100).astype("float32")
  114. y_np = np.minimum(np.maximum(x + 3, 0), 6) / 6
  115. y_mge = F.hsigmoid(tensor(x)).numpy()
  116. np.testing.assert_equal(y_np, y_mge)
  117. def test_logical_oprs():
  118. x = np.array([[True, False], [False, True]])
  119. y = np.array([[True, True], [False, False]])
  120. xx = tensor(x)
  121. yy = tensor(y)
  122. np.testing.assert_equal(~x, (F.logical_not(xx)).numpy())
  123. np.testing.assert_equal(x & y, F.logical_and(xx, yy).numpy())
  124. np.testing.assert_equal(x | y, F.logical_or(xx, yy).numpy())
  125. np.testing.assert_equal(x ^ y, F.logical_xor(xx, yy).numpy())
  126. def test_qadd():
  127. inp_scale = 0.5
  128. outp_scale = 0.2
  129. x = np.arange(6).reshape(2, 3).astype("float32")
  130. y = np.arange(6).reshape(2, 3).astype("float32")
  131. x = tensor(x, dtype=dtype.qint8(inp_scale))
  132. y = tensor(y, dtype=dtype.qint8(inp_scale))
  133. result_mge = F.elemwise._elemwise_multi_type(
  134. x, y, mode="qadd", dtype=dtype.qint8(outp_scale)
  135. )
  136. result_mge = result_mge.astype("float32").numpy()
  137. result_expect = x.astype("float32").numpy() + y.astype("float32").numpy()
  138. np.testing.assert_almost_equal(result_mge, result_expect, decimal=6)
  139. def test_int32_input():
  140. x = tensor(np.array([1, 2, 3, 4, 5]), dtype="int32")
  141. for op_name in elemwise.__all__:
  142. op = getattr(elemwise, op_name)
  143. nargs = op.__code__.co_argcount
  144. if op_name == "clip":
  145. inp = (x, 0, 1)
  146. elif op_name.endswith("_shift"):
  147. inp = (x, 1)
  148. elif op_name.startswith("logical_"):
  149. continue
  150. else:
  151. inp = (x,) * nargs
  152. y = op(*inp)
  153. y.numpy()
  154. @pytest.mark.parametrize("is_trace", [True, False])
  155. def test_empty_tensor(is_trace):
  156. binary_func = []
  157. unary_func = []
  158. for op_name in elemwise.__all__:
  159. op = getattr(elemwise, op_name)
  160. nargs = op.__code__.co_argcount
  161. if op_name == "clip":
  162. unary_func.append(["clip", lambda x, f=op: f(x, lower=0, upper=1)])
  163. elif op_name.endswith("_shift"):
  164. unary_func.append(
  165. [op_name, lambda x, f=op: f(tensor(x.numpy(), dtype="int32"), 1)]
  166. )
  167. elif op_name.startswith("logical_"): # logical_xxx op only accept boolean type
  168. if nargs == 1:
  169. unary_func.append(
  170. [op_name, lambda x, f=op: f(tensor(x.numpy(), dtype="bool"))]
  171. )
  172. else:
  173. assert nargs == 2
  174. binary_func.append(
  175. [
  176. op_name,
  177. lambda x, y, f=op: f(
  178. tensor(x.numpy(), dtype="bool"),
  179. tensor(y.numpy(), dtype="bool"),
  180. ),
  181. ]
  182. )
  183. elif nargs == 1:
  184. unary_func.append([op_name, op])
  185. elif nargs == 2:
  186. binary_func.append([op_name, op])
  187. else:
  188. print(nargs)
  189. raise NotImplementedError
  190. def run_test(func, args, ref_shape, is_trace, sym=False):
  191. args = [tensor(t, dtype="float32") for t in args]
  192. if is_trace:
  193. func = trace(symbolic=sym)(func)
  194. for _ in range(3):
  195. out = func(*args)
  196. assert out.numpy().shape == ref_shape
  197. else:
  198. out = func(*args)
  199. assert out.numpy().shape == ref_shape
  200. print(out.numpy().shape)
  201. inps = [
  202. np.array([]).astype("float32"),
  203. np.random.randn(2, 0, 3).astype("float32"),
  204. 123,
  205. ]
  206. for op_name, op in unary_func:
  207. if is_trace:
  208. for sym in [True, False]:
  209. run_test(op, [inps[0],], inps[0].shape, True, sym)
  210. run_test(op, [inps[1],], inps[1].shape, True, sym)
  211. else:
  212. run_test(op, [inps[0],], inps[0].shape, False)
  213. run_test(op, [inps[1],], inps[1].shape, False)
  214. for op_name, op in binary_func:
  215. if is_trace:
  216. for sym in [True, False]:
  217. run_test(op, [inps[0], inps[0]], (inps[0] + inps[0]).shape, True, sym)
  218. run_test(op, [inps[1], inps[1]], (inps[1] + inps[1]).shape, True, sym)
  219. run_test(op, [inps[0], inps[2]], (inps[0] + inps[2]).shape, True, sym)
  220. run_test(op, [inps[1], inps[2]], (inps[1] + inps[2]).shape, True, sym)
  221. else:
  222. run_test(op, [inps[0], inps[0]], (inps[0] + inps[0]).shape, False)
  223. run_test(op, [inps[1], inps[1]], (inps[1] + inps[1]).shape, False)
  224. run_test(op, [inps[0], inps[2]], (inps[0] + inps[2]).shape, False)
  225. run_test(op, [inps[1], inps[2]], (inps[1] + inps[2]).shape, False)

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台