You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_elemwise.py 9.2 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. import numpy as np
  10. import pytest
  11. import megengine.functional as F
  12. import megengine.functional.elemwise as elemwise
  13. from megengine import tensor
  14. from megengine.core.tensor import dtype
  15. from megengine.functional.elemwise import Elemwise
  16. from megengine.jit import trace
  17. def test_abs():
  18. np.testing.assert_allclose(
  19. F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
  20. np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
  21. )
  22. np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
  23. def test_elemwise_mode_string():
  24. for key, mode in vars(Elemwise.Mode).items():
  25. if isinstance(mode, Elemwise.Mode):
  26. assert key == mode
  27. assert Elemwise(mode=key) == Elemwise(mode=mode)
  28. def test_multiply():
  29. np.testing.assert_allclose(
  30. F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
  31. )
  32. np.testing.assert_allclose(
  33. F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
  34. np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
  35. )
  36. np.testing.assert_allclose(
  37. F.mul(4.0, tensor([3.0, 4.0])).numpy(),
  38. np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
  39. )
  40. np.testing.assert_allclose(
  41. F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
  42. np.multiply(
  43. np.array([3.0, 4.0], dtype=np.float32),
  44. np.array([3.0, 4.0], dtype=np.float32),
  45. ),
  46. )
  47. def test_div():
  48. np.testing.assert_allclose(
  49. F.div(tensor([3, 4]), 2).numpy(),
  50. np.divide(np.array([3, 4], dtype=np.float32), 2),
  51. )
  52. np.testing.assert_allclose(
  53. (tensor([3, 4]) / 2).numpy(), np.divide(np.array([3, 4], dtype=np.float32), 2),
  54. )
  55. def test_clamp():
  56. """Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
  57. `F.clip` will fall into wrong conditions unexpectedly.
  58. """
  59. x = np.linspace(-6, 6, dtype="float32")
  60. np.testing.assert_allclose(
  61. F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
  62. )
  63. np.testing.assert_allclose(
  64. F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
  65. )
  66. def test_isnan():
  67. for case in [[1, float("nan"), 0]]:
  68. np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
  69. def test_isinf():
  70. for case in [[1, float("inf"), 0]]:
  71. np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
  72. def test_sign():
  73. for case in [[1, -1, 0]]:
  74. x = tensor(case)
  75. np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
  76. def test_cosh():
  77. np.random.seed(42)
  78. x = np.random.randn(100).astype("float32")
  79. y_np = np.cosh(x)
  80. y_mge = F.cosh(tensor(x)).numpy()
  81. np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
  82. def test_sinh():
  83. np.random.seed(42)
  84. x = np.random.randn(100).astype("float32")
  85. y_np = np.sinh(x)
  86. y_mge = F.sinh(tensor(x)).numpy()
  87. np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
  88. def test_asinh():
  89. np.random.seed(42)
  90. x = np.random.randn(100).astype("float32")
  91. y_np = np.arcsinh(x)
  92. y_mge = F.asinh(tensor(x)).numpy()
  93. np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
  94. def test_acosh():
  95. x = np.arange(0, 10000).astype("float32") / 100 + 1
  96. y_np = np.arccosh(x)
  97. y_mge = F.acosh(tensor(x)).numpy()
  98. np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
  99. def test_atanh():
  100. np.random.seed(42)
  101. x = np.random.rand(100).astype("float32") * 2 - 1
  102. y_np = np.arctanh(x)
  103. y_mge = F.atanh(tensor(x)).numpy()
  104. np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
  105. def test_hswish():
  106. np.random.seed(42)
  107. x = np.random.randn(100).astype("float32")
  108. y_np = x * np.minimum(np.maximum(x + 3, 0), 6) / 6
  109. y_mge = F.hswish(tensor(x)).numpy()
  110. np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
  111. def test_silu():
  112. x = np.array([-1.5, 0.0, 1.0, 1.5]).astype("float32")
  113. y_np = x / (1 + np.exp(-x))
  114. y_mge = F.silu(tensor(x)).numpy()
  115. np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
  116. def test_hsigmoid():
  117. np.random.seed(42)
  118. x = np.random.randn(100).astype("float32")
  119. y_np = np.minimum(np.maximum(x + 3, 0), 6) / 6
  120. y_mge = F.hsigmoid(tensor(x)).numpy()
  121. np.testing.assert_equal(y_np, y_mge)
  122. def test_logical_oprs():
  123. x = np.array([[True, False], [False, True]])
  124. y = np.array([[True, True], [False, False]])
  125. xx = tensor(x)
  126. yy = tensor(y)
  127. np.testing.assert_equal(~x, (F.logical_not(xx)).numpy())
  128. np.testing.assert_equal(x & y, F.logical_and(xx, yy).numpy())
  129. np.testing.assert_equal(x | y, F.logical_or(xx, yy).numpy())
  130. np.testing.assert_equal(x ^ y, F.logical_xor(xx, yy).numpy())
  131. def test_qadd():
  132. inp_scale = 0.5
  133. outp_scale = 0.2
  134. x = np.arange(6).reshape(2, 3).astype("float32")
  135. y = np.arange(6).reshape(2, 3).astype("float32")
  136. x = tensor(x, dtype=dtype.qint8(inp_scale))
  137. y = tensor(y, dtype=dtype.qint8(inp_scale))
  138. result_mge = F.elemwise._elemwise_multi_type(
  139. x, y, mode="qadd", dtype=dtype.qint8(outp_scale)
  140. )
  141. result_mge = result_mge.astype("float32").numpy()
  142. result_expect = x.astype("float32").numpy() + y.astype("float32").numpy()
  143. np.testing.assert_almost_equal(result_mge, result_expect, decimal=6)
  144. def test_int32_input():
  145. x = tensor(np.array([1, 2, 3, 4, 5]), dtype="int32")
  146. for op_name in elemwise.__all__:
  147. op = getattr(elemwise, op_name)
  148. nargs = op.__code__.co_argcount
  149. if op_name == "clip":
  150. inp = (x, 0, 1)
  151. elif op_name.endswith("_shift"):
  152. inp = (x, 1)
  153. elif op_name.startswith("logical_"):
  154. continue
  155. else:
  156. inp = (x,) * nargs
  157. y = op(*inp)
  158. y.numpy()
  159. @pytest.mark.parametrize("is_trace", [True, False])
  160. def test_empty_tensor(is_trace):
  161. binary_func = []
  162. unary_func = []
  163. for op_name in elemwise.__all__:
  164. op = getattr(elemwise, op_name)
  165. nargs = op.__code__.co_argcount
  166. if op_name == "clip":
  167. unary_func.append(["clip", lambda x, f=op: f(x, lower=0, upper=1)])
  168. elif op_name.endswith("_shift"):
  169. unary_func.append(
  170. [op_name, lambda x, f=op: f(tensor(x.numpy(), dtype="int32"), 1)]
  171. )
  172. elif op_name.startswith("logical_"): # logical_xxx op only accept boolean type
  173. if nargs == 1:
  174. unary_func.append(
  175. [op_name, lambda x, f=op: f(tensor(x.numpy(), dtype="bool"))]
  176. )
  177. else:
  178. assert nargs == 2
  179. binary_func.append(
  180. [
  181. op_name,
  182. lambda x, y, f=op: f(
  183. tensor(x.numpy(), dtype="bool"),
  184. tensor(y.numpy(), dtype="bool"),
  185. ),
  186. ]
  187. )
  188. elif nargs == 1:
  189. unary_func.append([op_name, op])
  190. elif nargs == 2:
  191. binary_func.append([op_name, op])
  192. else:
  193. raise NotImplementedError("nargs {}".format(nargs))
  194. def run_test(func, args, ref_shape, is_trace, sym=False):
  195. args = [tensor(t, dtype="float32") for t in args]
  196. if is_trace:
  197. func = trace(symbolic=sym)(func)
  198. for _ in range(3):
  199. out = func(*args)
  200. assert out.numpy().shape == ref_shape
  201. else:
  202. out = func(*args)
  203. assert out.numpy().shape == ref_shape, out.numpy().shape
  204. inps = [
  205. np.array([]).astype("float32"),
  206. np.random.randn(2, 0, 3).astype("float32"),
  207. 123,
  208. ]
  209. for op_name, op in unary_func:
  210. if is_trace:
  211. for sym in [True, False]:
  212. run_test(op, [inps[0],], inps[0].shape, True, sym)
  213. run_test(op, [inps[1],], inps[1].shape, True, sym)
  214. else:
  215. run_test(op, [inps[0],], inps[0].shape, False)
  216. run_test(op, [inps[1],], inps[1].shape, False)
  217. for op_name, op in binary_func:
  218. if is_trace:
  219. for sym in [True, False]:
  220. run_test(op, [inps[0], inps[0]], (inps[0] + inps[0]).shape, True, sym)
  221. run_test(op, [inps[1], inps[1]], (inps[1] + inps[1]).shape, True, sym)
  222. run_test(op, [inps[0], inps[2]], (inps[0] + inps[2]).shape, True, sym)
  223. run_test(op, [inps[1], inps[2]], (inps[1] + inps[2]).shape, True, sym)
  224. else:
  225. run_test(op, [inps[0], inps[0]], (inps[0] + inps[0]).shape, False)
  226. run_test(op, [inps[1], inps[1]], (inps[1] + inps[1]).shape, False)
  227. run_test(op, [inps[0], inps[2]], (inps[0] + inps[2]).shape, False)
  228. run_test(op, [inps[1], inps[2]], (inps[1] + inps[2]).shape, False)

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台