From 60076f47151e91f49159d53dcfa032151707ec5d Mon Sep 17 00:00:00 2001 From: Megvii Engine Team Date: Sat, 29 Aug 2020 17:51:34 +0800 Subject: [PATCH] test(mge/parampack): add parampack related opr test GitOrigin-RevId: 550c6b6443c36ff7afcbaeae1f4c2d5fdc29c7d3 --- imperative/python/megengine/functional/__init__.py | 2 +- imperative/python/megengine/functional/tensor.py | 8 ++--- .../python/test/unit/functional/test_tensor.py | 39 ++++++++++++++++++++++ 3 files changed, 44 insertions(+), 5 deletions(-) diff --git a/imperative/python/megengine/functional/__init__.py b/imperative/python/megengine/functional/__init__.py index 14fef9b3..cc999e2a 100644 --- a/imperative/python/megengine/functional/__init__.py +++ b/imperative/python/megengine/functional/__init__.py @@ -25,7 +25,7 @@ from .math import * from .nn import * from .quantized import conv_bias_activation from .tensor import * -from .utils import accuracy, zero_grad +from .utils import accuracy, copy, zero_grad # delete namespace # pylint: disable=undefined-variable diff --git a/imperative/python/megengine/functional/tensor.py b/imperative/python/megengine/functional/tensor.py index d0f53bf4..d1cd1110 100644 --- a/imperative/python/megengine/functional/tensor.py +++ b/imperative/python/megengine/functional/tensor.py @@ -980,9 +980,9 @@ def param_pack_concat(inps: List, offsets: Tensor, offsets_val: List) -> Tensor: Returns concat Tensor, only used for parampack. :param inps: Input tensors - :param offsets: offsets of inputs, length of 2 * n, + :param offsets: device value of offsets + :param offsets_val: offsets of inputs, length of 2 * n, format [begin0, end0, begin1, end1]. - :param offsets_val: device value of offsets :return: split tensors Examples: @@ -995,8 +995,8 @@ def param_pack_concat(inps: List, offsets: Tensor, offsets_val: List) -> Tensor: a = tensor(np.ones((1,), np.int32)) b = tensor(np.ones((3, 3), np.int32)) - offsets = [0, 1, 1, 10] - offsets_val = tensor(offsets, np.int32) + offsets_val = [0, 1, 1, 10] + offsets = tensor(offsets, np.int32) c = F.param_pack_concat([a, b], offsets, offsets_val) print(c.numpy()) diff --git a/imperative/python/test/unit/functional/test_tensor.py b/imperative/python/test/unit/functional/test_tensor.py index e153b624..8d06d699 100644 --- a/imperative/python/test/unit/functional/test_tensor.py +++ b/imperative/python/test/unit/functional/test_tensor.py @@ -314,3 +314,42 @@ def test_device(): y5 = F.full((3, 2), 4, device=x.device) y6 = F.full((3, 2), 4, device="xpux") np.testing.assert_almost_equal(y5.numpy(), y6.numpy()) + + +def copy_test(dst, src): + data = np.random.random((2, 3)).astype(np.float32) + x = tensor(data, device=src) + y = F.copy(x, dst) + assert np.allclose(data, y.numpy()) + + +@pytest.mark.skipif(not is_cuda_available(), reason="CUDA is disabled") +def test_copy_h2d(): + copy_test("cpu0", "gpu0") + + +@pytest.mark.skipif(not is_cuda_available(), reason="CUDA is disabled") +def test_copy_d2h(): + copy_test("gpu0", "cpu0") + + +@pytest.mark.skipif(not is_cuda_available(), reason="CUDA is disabled") +def test_copy_d2d(): + copy_test("gpu0", "gpu1") + copy_test("gpu0:0", "gpu0:1") + + +def test_param_pack_split(): + a = tensor(np.ones((10,), np.int32)) + b, c = F.param_pack_split(a, [0, 1, 1, 10], [(1,), (3, 3)]) + assert np.allclose(b.numpy(), a.numpy()[1]) + assert np.allclose(c.numpy(), a.numpy()[1:].reshape(3, 3)) + + +def test_param_pack_concat(): + a = tensor(np.ones((1,), np.int32)) + b = tensor(np.ones((3, 3), np.int32)) + offsets_val = [0, 1, 1, 10] + offsets = tensor(offsets_val, np.int32) + c = F.param_pack_concat([a, b], offsets, offsets_val) + assert np.allclose(np.concatenate([a.numpy(), b.numpy().flatten()]), c.numpy())