You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_elemwise.py 8.7 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. import numpy as np
  10. import pytest
  11. import megengine.functional as F
  12. import megengine.functional.elemwise as elemwise
  13. from megengine import tensor
  14. from megengine.core.tensor import dtype
  15. from megengine.functional.elemwise import Elemwise, _elwise
  16. from megengine.jit import trace
  17. def test_abs():
  18. np.testing.assert_allclose(
  19. F.abs(tensor([-3.0, -4.0, -5.0])).numpy(),
  20. np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)),
  21. )
  22. np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0)))
  23. def test_elemwise_mode_string():
  24. for key, mode in vars(Elemwise.Mode).items():
  25. if isinstance(mode, Elemwise.Mode):
  26. assert key == mode
  27. assert Elemwise(mode=key) == Elemwise(mode=mode)
  28. def test_multiply():
  29. np.testing.assert_allclose(
  30. F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0))
  31. )
  32. np.testing.assert_allclose(
  33. F.mul(tensor([3.0, 4.0]), 4.0).numpy(),
  34. np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0),
  35. )
  36. np.testing.assert_allclose(
  37. F.mul(4.0, tensor([3.0, 4.0])).numpy(),
  38. np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)),
  39. )
  40. np.testing.assert_allclose(
  41. F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(),
  42. np.multiply(
  43. np.array([3.0, 4.0], dtype=np.float32),
  44. np.array([3.0, 4.0], dtype=np.float32),
  45. ),
  46. )
  47. def test_clamp():
  48. """Fix an issue when `lower` or `upper` is 0, it will be recognized as `False` and
  49. `F.clip` will fall into wrong conditions unexpectedly.
  50. """
  51. x = np.linspace(-6, 6, dtype="float32")
  52. np.testing.assert_allclose(
  53. F.clip(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)
  54. )
  55. np.testing.assert_allclose(
  56. F.clip(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)
  57. )
  58. def test_isnan():
  59. for case in [[1, float("nan"), 0]]:
  60. np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case))
  61. def test_isinf():
  62. for case in [[1, float("inf"), 0]]:
  63. np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case))
  64. def test_sign():
  65. for case in [[1, -1, 0]]:
  66. x = tensor(case)
  67. np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype))
  68. def test_cosh():
  69. np.random.seed(42)
  70. x = np.random.randn(100).astype("float32")
  71. y_np = np.cosh(x)
  72. y_mge = F.cosh(tensor(x)).numpy()
  73. np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
  74. def test_sinh():
  75. np.random.seed(42)
  76. x = np.random.randn(100).astype("float32")
  77. y_np = np.sinh(x)
  78. y_mge = F.sinh(tensor(x)).numpy()
  79. np.testing.assert_allclose(y_np, y_mge, rtol=1e-5)
  80. def test_asinh():
  81. np.random.seed(42)
  82. x = np.random.randn(100).astype("float32")
  83. y_np = np.arcsinh(x)
  84. y_mge = F.asinh(tensor(x)).numpy()
  85. np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
  86. def test_acosh():
  87. x = np.arange(0, 10000).astype("float32") / 100 + 1
  88. y_np = np.arccosh(x)
  89. y_mge = F.acosh(tensor(x)).numpy()
  90. np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
  91. def test_atanh():
  92. np.random.seed(42)
  93. x = np.random.rand(100).astype("float32") * 2 - 1
  94. y_np = np.arctanh(x)
  95. y_mge = F.atanh(tensor(x)).numpy()
  96. np.testing.assert_almost_equal(y_np, y_mge, decimal=5)
  97. def test_hswish():
  98. np.random.seed(42)
  99. x = np.random.randn(100).astype("float32")
  100. y_np = x * np.minimum(np.maximum(x + 3, 0), 6) / 6
  101. y_mge = F.hswish(tensor(x)).numpy()
  102. np.testing.assert_almost_equal(y_np, y_mge, decimal=6)
  103. def test_hsigmoid():
  104. np.random.seed(42)
  105. x = np.random.randn(100).astype("float32")
  106. y_np = np.minimum(np.maximum(x + 3, 0), 6) / 6
  107. y_mge = F.hsigmoid(tensor(x)).numpy()
  108. np.testing.assert_equal(y_np, y_mge)
  109. def test_logical_oprs():
  110. x = np.array([[True, False], [False, True]])
  111. y = np.array([[True, True], [False, False]])
  112. xx = tensor(x)
  113. yy = tensor(y)
  114. np.testing.assert_equal(~x, (F.logical_not(xx)).numpy())
  115. np.testing.assert_equal(x & y, F.logical_and(xx, yy).numpy())
  116. np.testing.assert_equal(x | y, F.logical_or(xx, yy).numpy())
  117. np.testing.assert_equal(x ^ y, F.logical_xor(xx, yy).numpy())
  118. def test_qadd():
  119. inp_scale = 0.5
  120. outp_scale = 0.2
  121. x = np.arange(6).reshape(2, 3).astype("float32")
  122. y = np.arange(6).reshape(2, 3).astype("float32")
  123. x = tensor(x, dtype=dtype.qint8(inp_scale))
  124. y = tensor(y, dtype=dtype.qint8(inp_scale))
  125. result_mge = F.elemwise._elemwise_multi_type(
  126. x, y, mode="qadd", dtype=dtype.qint8(outp_scale)
  127. )
  128. result_mge = result_mge.astype("float32").numpy()
  129. result_expect = x.astype("float32").numpy() + y.astype("float32").numpy()
  130. np.testing.assert_almost_equal(result_mge, result_expect, decimal=6)
  131. def test_int32_input():
  132. x = tensor(np.array([1, 2, 3, 4, 5]), dtype="int32")
  133. for op_name in elemwise.__all__:
  134. op = getattr(elemwise, op_name)
  135. nargs = op.__code__.co_argcount
  136. if op_name == "clip":
  137. inp = (x, 0, 1)
  138. elif op_name.endswith("_shift"):
  139. inp = (x, 1)
  140. elif op_name.startswith("logical_"):
  141. continue
  142. else:
  143. inp = (x,) * nargs
  144. y = op(*inp)
  145. y.numpy()
  146. @pytest.mark.parametrize("is_trace", [True, False])
  147. def test_empty_tensor(is_trace):
  148. binary_func = []
  149. unary_func = []
  150. for op_name in elemwise.__all__:
  151. op = getattr(elemwise, op_name)
  152. nargs = op.__code__.co_argcount
  153. if op_name == "clip":
  154. unary_func.append(["clip", lambda x, f=op: f(x, lower=0, upper=1)])
  155. elif op_name.endswith("_shift"):
  156. unary_func.append(
  157. [op_name, lambda x, f=op: f(tensor(x.numpy(), dtype="int32"), 1)]
  158. )
  159. elif op_name.startswith("logical_"): # logical_xxx op only accept boolean type
  160. if nargs == 1:
  161. unary_func.append(
  162. [op_name, lambda x, f=op: f(tensor(x.numpy(), dtype="bool"))]
  163. )
  164. else:
  165. assert nargs == 2
  166. binary_func.append(
  167. [
  168. op_name,
  169. lambda x, y, f=op: f(
  170. tensor(x.numpy(), dtype="bool"),
  171. tensor(y.numpy(), dtype="bool"),
  172. ),
  173. ]
  174. )
  175. elif nargs == 1:
  176. unary_func.append([op_name, op])
  177. elif nargs == 2:
  178. binary_func.append([op_name, op])
  179. else:
  180. print(nargs)
  181. raise NotImplementedError
  182. def run_test(func, args, ref_shape, is_trace, sym=False):
  183. args = [tensor(t, dtype="float32") for t in args]
  184. if is_trace:
  185. func = trace(symbolic=sym)(func)
  186. for _ in range(3):
  187. out = func(*args)
  188. assert out.numpy().shape == ref_shape
  189. else:
  190. out = func(*args)
  191. assert out.numpy().shape == ref_shape
  192. print(out.numpy().shape)
  193. inps = [
  194. np.array([]).astype("float32"),
  195. np.random.randn(2, 0, 3).astype("float32"),
  196. 123,
  197. ]
  198. for op_name, op in unary_func:
  199. if is_trace:
  200. for sym in [True, False]:
  201. run_test(op, [inps[0],], inps[0].shape, True, sym)
  202. run_test(op, [inps[1],], inps[1].shape, True, sym)
  203. else:
  204. run_test(op, [inps[0],], inps[0].shape, False)
  205. run_test(op, [inps[1],], inps[1].shape, False)
  206. for op_name, op in binary_func:
  207. if is_trace:
  208. for sym in [True, False]:
  209. run_test(op, [inps[0], inps[0]], (inps[0] + inps[0]).shape, True, sym)
  210. run_test(op, [inps[1], inps[1]], (inps[1] + inps[1]).shape, True, sym)
  211. run_test(op, [inps[0], inps[2]], (inps[0] + inps[2]).shape, True, sym)
  212. run_test(op, [inps[1], inps[2]], (inps[1] + inps[2]).shape, True, sym)
  213. else:
  214. run_test(op, [inps[0], inps[0]], (inps[0] + inps[0]).shape, False)
  215. run_test(op, [inps[1], inps[1]], (inps[1] + inps[1]).shape, False)
  216. run_test(op, [inps[0], inps[2]], (inps[0] + inps[2]).shape, False)
  217. run_test(op, [inps[1], inps[2]], (inps[1] + inps[2]).shape, False)

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台