Browse Source

test(mge/parampack): add parampack related opr test

GitOrigin-RevId: 550c6b6443
tags/v1.0.0-rc1
Megvii Engine Team 4 years ago
parent
commit
60076f4715
3 changed files with 44 additions and 5 deletions
  1. +1
    -1
      imperative/python/megengine/functional/__init__.py
  2. +4
    -4
      imperative/python/megengine/functional/tensor.py
  3. +39
    -0
      imperative/python/test/unit/functional/test_tensor.py

+ 1
- 1
imperative/python/megengine/functional/__init__.py View File

@@ -25,7 +25,7 @@ from .math import *
from .nn import * from .nn import *
from .quantized import conv_bias_activation from .quantized import conv_bias_activation
from .tensor import * from .tensor import *
from .utils import accuracy, zero_grad
from .utils import accuracy, copy, zero_grad


# delete namespace # delete namespace
# pylint: disable=undefined-variable # pylint: disable=undefined-variable


+ 4
- 4
imperative/python/megengine/functional/tensor.py View File

@@ -980,9 +980,9 @@ def param_pack_concat(inps: List, offsets: Tensor, offsets_val: List) -> Tensor:
Returns concat Tensor, only used for parampack. Returns concat Tensor, only used for parampack.


:param inps: Input tensors :param inps: Input tensors
:param offsets: offsets of inputs, length of 2 * n,
:param offsets: device value of offsets
:param offsets_val: offsets of inputs, length of 2 * n,
format [begin0, end0, begin1, end1]. format [begin0, end0, begin1, end1].
:param offsets_val: device value of offsets
:return: split tensors :return: split tensors


Examples: Examples:
@@ -995,8 +995,8 @@ def param_pack_concat(inps: List, offsets: Tensor, offsets_val: List) -> Tensor:


a = tensor(np.ones((1,), np.int32)) a = tensor(np.ones((1,), np.int32))
b = tensor(np.ones((3, 3), np.int32)) b = tensor(np.ones((3, 3), np.int32))
offsets = [0, 1, 1, 10]
offsets_val = tensor(offsets, np.int32)
offsets_val = [0, 1, 1, 10]
offsets = tensor(offsets, np.int32)
c = F.param_pack_concat([a, b], offsets, offsets_val) c = F.param_pack_concat([a, b], offsets, offsets_val)
print(c.numpy()) print(c.numpy())




+ 39
- 0
imperative/python/test/unit/functional/test_tensor.py View File

@@ -314,3 +314,42 @@ def test_device():
y5 = F.full((3, 2), 4, device=x.device) y5 = F.full((3, 2), 4, device=x.device)
y6 = F.full((3, 2), 4, device="xpux") y6 = F.full((3, 2), 4, device="xpux")
np.testing.assert_almost_equal(y5.numpy(), y6.numpy()) np.testing.assert_almost_equal(y5.numpy(), y6.numpy())


def copy_test(dst, src):
data = np.random.random((2, 3)).astype(np.float32)
x = tensor(data, device=src)
y = F.copy(x, dst)
assert np.allclose(data, y.numpy())


@pytest.mark.skipif(not is_cuda_available(), reason="CUDA is disabled")
def test_copy_h2d():
copy_test("cpu0", "gpu0")


@pytest.mark.skipif(not is_cuda_available(), reason="CUDA is disabled")
def test_copy_d2h():
copy_test("gpu0", "cpu0")


@pytest.mark.skipif(not is_cuda_available(), reason="CUDA is disabled")
def test_copy_d2d():
copy_test("gpu0", "gpu1")
copy_test("gpu0:0", "gpu0:1")


def test_param_pack_split():
a = tensor(np.ones((10,), np.int32))
b, c = F.param_pack_split(a, [0, 1, 1, 10], [(1,), (3, 3)])
assert np.allclose(b.numpy(), a.numpy()[1])
assert np.allclose(c.numpy(), a.numpy()[1:].reshape(3, 3))


def test_param_pack_concat():
a = tensor(np.ones((1,), np.int32))
b = tensor(np.ones((3, 3), np.int32))
offsets_val = [0, 1, 1, 10]
offsets = tensor(offsets_val, np.int32)
c = F.param_pack_concat([a, b], offsets, offsets_val)
assert np.allclose(np.concatenate([a.numpy(), b.numpy().flatten()]), c.numpy())

Loading…
Cancel
Save