GitOrigin-RevId: 2bf8c42cfd
release-1.1
@@ -1071,13 +1071,12 @@ def interpolate( | |||||
import numpy as np | import numpy as np | ||||
from megengine import tensor | from megengine import tensor | ||||
import megengine.functional as F | import megengine.functional as F | ||||
from megengine.test import assertTensorClose | |||||
x = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2)) | x = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2)) | ||||
out = F.interpolate(x, [4, 4], align_corners=False) | out = F.interpolate(x, [4, 4], align_corners=False) | ||||
print(out.numpy()) | print(out.numpy()) | ||||
out2 = F.interpolate(x, scale_factor=2.) | out2 = F.interpolate(x, scale_factor=2.) | ||||
assertTensorClose(out.numpy(), out2.numpy()) | |||||
np.testing.assert_allclose(out.numpy(), out2.numpy()) | |||||
Outputs: | Outputs: | ||||
@@ -1,67 +0,0 @@ | |||||
# -*- coding: utf-8 -*- | |||||
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") | |||||
# | |||||
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved. | |||||
# | |||||
# Unless required by applicable law or agreed to in writing, | |||||
# software distributed under the License is distributed on an | |||||
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
import numpy as np | |||||
def assertTensorClose( | |||||
v0, v1, *, max_err: float = 1e-6, allow_special_values: bool = False, name=None | |||||
): | |||||
""" | |||||
:param allow_special_values: whether to allow :attr:`v0` and :attr:`v1` to contain inf and nan values. | |||||
:param max_err: relative error | |||||
""" | |||||
__tracebackhide__ = True # pylint: disable=unused-variable | |||||
assert ( | |||||
v0.dtype == v1.dtype | |||||
), "Two Tensor must have same dtype, but the inputs are {} and {}".format( | |||||
v0.dtype, v1.dtype | |||||
) | |||||
v0 = np.ascontiguousarray(v0, dtype=np.float32).copy() | |||||
v1 = np.ascontiguousarray(v1, dtype=np.float32).copy() | |||||
if allow_special_values: | |||||
# check nan and rm it | |||||
v0_nan_mask = np.isnan(v0) | |||||
if np.any(v0_nan_mask): | |||||
assert np.array_equiv(v0_nan_mask, np.isnan(v1)), (v0, v1) | |||||
v0[v0_nan_mask] = 0 | |||||
v1[v0_nan_mask] = 0 | |||||
# check inf and rm it | |||||
v0_inf_mask = v0 == float("inf") | |||||
if np.any(v0_inf_mask): | |||||
assert np.array_equiv(v0_inf_mask, v1 == float("inf")), (v0, v1) | |||||
v0[v0_inf_mask] = 0 | |||||
v1[v0_inf_mask] = 0 | |||||
# check -inf and rm it | |||||
v0_inf_mask = v0 == float("-inf") | |||||
if np.any(v0_inf_mask): | |||||
assert np.array_equiv(v0_inf_mask, v1 == float("-inf")), (v0, v1) | |||||
v0[v0_inf_mask] = 0 | |||||
v1[v0_inf_mask] = 0 | |||||
else: | |||||
assert np.isfinite(v0.sum()) and np.isfinite(v1.sum()), (v0, v1) | |||||
assert v0.shape == v1.shape, "Two tensor must have same shape({} v.s. {})".format( | |||||
v0.shape, v1.shape | |||||
) | |||||
vdiv = np.max([np.abs(v0), np.abs(v1), np.ones_like(v0)], axis=0) | |||||
err = np.abs(v0 - v1) / vdiv | |||||
check = err > max_err | |||||
if check.sum(): | |||||
idx = tuple(i[0] for i in np.nonzero(check)) | |||||
if name is None: | |||||
name = "tensor" | |||||
else: | |||||
name = "tensor {}".format(name) | |||||
raise AssertionError( | |||||
"{} not equal: " | |||||
"shape={} nonequal_idx={} v0={} v1={} err={}".format( | |||||
name, v0.shape, idx, v0[idx], v1[idx], err[idx] | |||||
) | |||||
) |
@@ -14,7 +14,8 @@ def opr_test(cases, func, compare_fn=_default_compare_fn, ref_fn=None, **kwargs) | |||||
and should have output if ref_fn is None. | and should have output if ref_fn is None. | ||||
should use list for multiple inputs and outputs for each case. | should use list for multiple inputs and outputs for each case. | ||||
:param func: the function to run opr. | :param func: the function to run opr. | ||||
:param compare_fn: the function to compare the result and expected, use assertTensorClose if None. | |||||
:param compare_fn: the function to compare the result and expected, use | |||||
``np.testing.assert_allclose`` if None. | |||||
:param ref_fn: the function to generate expected data, should assign output if None. | :param ref_fn: the function to generate expected data, should assign output if None. | ||||
Examples: | Examples: | ||||
@@ -24,7 +24,6 @@ from megengine.jit import SublinearMemoryConfig | |||||
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module | from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module | ||||
from megengine.optimizer import SGD | from megengine.optimizer import SGD | ||||
from megengine.tensor import Tensor | from megengine.tensor import Tensor | ||||
from megengine.test import assertTensorClose | |||||
def get_gpu_name(): | def get_gpu_name(): | ||||
@@ -172,13 +171,13 @@ def run_train( | |||||
loss = train_func(data, label, net, opt, gm) | loss = train_func(data, label, net, opt, gm) | ||||
opt.step() | opt.step() | ||||
assertTensorClose(loss.numpy(), checkpoint["loss"], max_err=max_err) | |||||
np.testing.assert_allclose(loss.numpy(), checkpoint["loss"], atol=max_err) | |||||
for param, param_ref in zip( | for param, param_ref in zip( | ||||
net.state_dict().items(), checkpoint["net_updated"].items() | net.state_dict().items(), checkpoint["net_updated"].items() | ||||
): | ): | ||||
assert param[0] == param_ref[0] | assert param[0] == param_ref[0] | ||||
assertTensorClose(param[1], param_ref[1], max_err=max_err) | |||||
np.testing.assert_allclose(param[1], param_ref[1], atol=max_err) | |||||
def run_eval( | def run_eval( | ||||
@@ -209,7 +208,7 @@ def run_eval( | |||||
for _ in range(3): | for _ in range(3): | ||||
new_value = eval_fun(data, net=net) | new_value = eval_fun(data, net=net) | ||||
assertTensorClose(new_value.numpy(), refer_value.numpy(), max_err=max_err) | |||||
np.testing.assert_allclose(new_value.numpy(), refer_value.numpy(), atol=max_err) | |||||
def test_correctness(): | def test_correctness(): | ||||
@@ -27,7 +27,6 @@ from megengine.functional.debug_param import set_conv_execution_strategy | |||||
from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module | from megengine.module import AvgPool2d, BatchNorm2d, Conv2d, Linear, Module | ||||
from megengine.optimizer import SGD | from megengine.optimizer import SGD | ||||
from megengine.tensor import Tensor | from megengine.tensor import Tensor | ||||
from megengine.test import assertTensorClose | |||||
p_num = 4 | p_num = 4 | ||||
@@ -181,7 +180,7 @@ def run_test( | |||||
loss = train(data_train, label_train, net, opt, gm) | loss = train(data_train, label_train, net, opt, gm) | ||||
assertTensorClose(loss.numpy(), checkpoint["loss"], max_err=max_err) | |||||
np.testing.assert_allclose(loss.numpy(), checkpoint["loss"], atol=max_err) | |||||
if dist.get_rank(): | if dist.get_rank(): | ||||
return | return | ||||
@@ -189,7 +188,7 @@ def run_test( | |||||
net.state_dict().items(), checkpoint["net_updated"].items() | net.state_dict().items(), checkpoint["net_updated"].items() | ||||
): | ): | ||||
assert param[0] == param_ref[0] | assert param[0] == param_ref[0] | ||||
assertTensorClose(param[1], param_ref[1], max_err=max_err) | |||||
np.testing.assert_allclose(param[1], param_ref[1], atol=max_err) | |||||
procs = [] | procs = [] | ||||
for rank in range(p_num): | for rank in range(p_num): | ||||
@@ -1,4 +1,3 @@ | |||||
# -*- coding: utf-8 -*- | |||||
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License") | # MegEngine is Licensed under the Apache License, Version 2.0 (the "License") | ||||
# | # | ||||
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved. | # Copyright (c) 2014-2020 Megvii Inc. All rights reserved. | ||||
@@ -11,7 +11,6 @@ from concurrent.futures import Future | |||||
import numpy as np | import numpy as np | ||||
import megengine.functional as F | import megengine.functional as F | ||||
from megengine.core._imperative_rt import DeviceTensorND | |||||
from megengine.core.tensor import megbrain_graph as mgb_graph | from megengine.core.tensor import megbrain_graph as mgb_graph | ||||
from megengine.core.tensor.raw_tensor import as_raw_tensor | from megengine.core.tensor.raw_tensor import as_raw_tensor | ||||
@@ -10,34 +10,33 @@ import numpy as np | |||||
import megengine.functional as F | import megengine.functional as F | ||||
from megengine import tensor | from megengine import tensor | ||||
from megengine.test import assertTensorClose | |||||
def test_abs(): | def test_abs(): | ||||
assertTensorClose( | |||||
np.testing.assert_allclose( | |||||
F.abs(tensor([-3.0, -4.0, -5.0])).numpy(), | F.abs(tensor([-3.0, -4.0, -5.0])).numpy(), | ||||
np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)), | np.abs(np.array([-3.0, -4.0, -5.0], dtype=np.float32)), | ||||
) | ) | ||||
assertTensorClose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0))) | |||||
np.testing.assert_allclose(F.abs(-3.0).numpy(), np.abs(np.float32(-3.0))) | |||||
def test_multiply(): | def test_multiply(): | ||||
assertTensorClose( | |||||
np.testing.assert_allclose( | |||||
F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0)) | F.mul(-3.0, -4.0).numpy(), np.multiply(np.float32(-3.0), np.float32(-4.0)) | ||||
) | ) | ||||
assertTensorClose( | |||||
np.testing.assert_allclose( | |||||
F.mul(tensor([3.0, 4.0]), 4.0).numpy(), | F.mul(tensor([3.0, 4.0]), 4.0).numpy(), | ||||
np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0), | np.multiply(np.array([3.0, 4.0], dtype=np.float32), 4.0), | ||||
) | ) | ||||
assertTensorClose( | |||||
np.testing.assert_allclose( | |||||
F.mul(4.0, tensor([3.0, 4.0])).numpy(), | F.mul(4.0, tensor([3.0, 4.0])).numpy(), | ||||
np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)), | np.multiply(4.0, np.array([3.0, 4.0], dtype=np.float32)), | ||||
) | ) | ||||
assertTensorClose( | |||||
np.testing.assert_allclose( | |||||
F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(), | F.mul(tensor([3.0, 4.0]), tensor([3.0, 4.0])).numpy(), | ||||
np.multiply( | np.multiply( | ||||
np.array([3.0, 4.0], dtype=np.float32), | np.array([3.0, 4.0], dtype=np.float32), | ||||
@@ -51,24 +50,28 @@ def test_clamp(): | |||||
`F.clamp` will fall into wrong conditions unexpectedly. | `F.clamp` will fall into wrong conditions unexpectedly. | ||||
""" | """ | ||||
x = np.linspace(-6, 6, dtype="float32") | x = np.linspace(-6, 6, dtype="float32") | ||||
assertTensorClose(F.clamp(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6)) | |||||
assertTensorClose(F.clamp(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0)) | |||||
np.testing.assert_allclose( | |||||
F.clamp(tensor(x) + 3, 0, 6).numpy(), np.clip(x + 3, 0, 6) | |||||
) | |||||
np.testing.assert_allclose( | |||||
F.clamp(tensor(x) - 3, -6, 0).numpy(), np.clip(x - 3, -6, 0) | |||||
) | |||||
def test_isnan(): | def test_isnan(): | ||||
for case in [[1, float("nan"), 0]]: | for case in [[1, float("nan"), 0]]: | ||||
assertTensorClose(F.isnan(tensor(case)).numpy(), np.isnan(case)) | |||||
np.testing.assert_allclose(F.isnan(tensor(case)).numpy(), np.isnan(case)) | |||||
def test_isinf(): | def test_isinf(): | ||||
for case in [[1, float("inf"), 0]]: | for case in [[1, float("inf"), 0]]: | ||||
assertTensorClose(F.isinf(tensor(case)).numpy(), np.isinf(case)) | |||||
np.testing.assert_allclose(F.isinf(tensor(case)).numpy(), np.isinf(case)) | |||||
def test_sign(): | def test_sign(): | ||||
for case in [[1, -1, 0]]: | for case in [[1, -1, 0]]: | ||||
x = tensor(case) | x = tensor(case) | ||||
assertTensorClose(F.sign(x).numpy(), np.sign(case).astype(x.dtype)) | |||||
np.testing.assert_allclose(F.sign(x).numpy(), np.sign(case).astype(x.dtype)) | |||||
def test_cosh(): | def test_cosh(): | ||||
@@ -19,7 +19,6 @@ from megengine import Parameter, Tensor, is_cuda_available, tensor | |||||
from megengine.core._trace_option import use_tensor_shape | from megengine.core._trace_option import use_tensor_shape | ||||
from megengine.core.autodiff.grad import Grad | from megengine.core.autodiff.grad import Grad | ||||
from megengine.core.tensor.utils import make_shape_tuple | from megengine.core.tensor.utils import make_shape_tuple | ||||
from megengine.test import assertTensorClose | |||||
def test_where(): | def test_where(): | ||||
@@ -105,10 +104,10 @@ def test_interpolate(): | |||||
out = F.interpolate(inp, scale_factor=2.0, mode="LINEAR") | out = F.interpolate(inp, scale_factor=2.0, mode="LINEAR") | ||||
out2 = F.interpolate(inp, 4, mode="LINEAR") | out2 = F.interpolate(inp, 4, mode="LINEAR") | ||||
assertTensorClose( | |||||
np.testing.assert_allclose( | |||||
out.numpy(), np.array([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32) | out.numpy(), np.array([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32) | ||||
) | ) | ||||
assertTensorClose( | |||||
np.testing.assert_allclose( | |||||
out2.numpy(), np.array([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32) | out2.numpy(), np.array([[[1.0, 1.25, 1.75, 2.0]]], dtype=np.float32) | ||||
) | ) | ||||
@@ -118,7 +117,7 @@ def test_interpolate(): | |||||
out = F.interpolate(inp, [4, 4]) | out = F.interpolate(inp, [4, 4]) | ||||
out2 = F.interpolate(inp, scale_factor=2.0) | out2 = F.interpolate(inp, scale_factor=2.0) | ||||
assertTensorClose(out.numpy(), out2.numpy()) | |||||
np.testing.assert_allclose(out.numpy(), out2.numpy()) | |||||
def assign_corner_interpolate(): | def assign_corner_interpolate(): | ||||
inp = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2)) | inp = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2)) | ||||
@@ -126,7 +125,7 @@ def test_interpolate(): | |||||
out = F.interpolate(inp, [4, 4], align_corners=True) | out = F.interpolate(inp, [4, 4], align_corners=True) | ||||
out2 = F.interpolate(inp, scale_factor=2.0, align_corners=True) | out2 = F.interpolate(inp, scale_factor=2.0, align_corners=True) | ||||
assertTensorClose(out.numpy(), out2.numpy()) | |||||
np.testing.assert_allclose(out.numpy(), out2.numpy()) | |||||
def error_shape_linear_interpolate(): | def error_shape_linear_interpolate(): | ||||
inp = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2)) | inp = tensor(np.arange(1, 5, dtype=np.float32).reshape(1, 1, 2, 2)) | ||||
@@ -212,7 +211,7 @@ def test_one_hot(): | |||||
inp = tensor(np.arange(1, 4, dtype=np.int32)) | inp = tensor(np.arange(1, 4, dtype=np.int32)) | ||||
out = F.one_hot(inp, num_classes=4) | out = F.one_hot(inp, num_classes=4) | ||||
assertTensorClose( | |||||
np.testing.assert_allclose( | |||||
out.numpy(), np.eye(4, dtype=np.int32)[np.arange(1, 4, dtype=np.int32)] | out.numpy(), np.eye(4, dtype=np.int32)[np.arange(1, 4, dtype=np.int32)] | ||||
) | ) | ||||
@@ -225,7 +224,7 @@ def test_one_hot(): | |||||
inp = tensor(arr) | inp = tensor(arr) | ||||
out = F.one_hot(inp, 10) | out = F.one_hot(inp, 10) | ||||
assertTensorClose(out.numpy(), np.eye(10, dtype=np.int32)[arr]) | |||||
np.testing.assert_allclose(out.numpy(), np.eye(10, dtype=np.int32)[arr]) | |||||
onehot_low_dimension() | onehot_low_dimension() | ||||
onehot_high_dimension() | onehot_high_dimension() | ||||
@@ -237,16 +236,16 @@ def test_add_update(): | |||||
b = Tensor(v) | b = Tensor(v) | ||||
u = F.add_update(b, 1) | u = F.add_update(b, 1) | ||||
assertTensorClose(u.numpy(), v + 1) | |||||
np.testing.assert_allclose(u.numpy(), v + 1, atol=1e-6) | |||||
u = F.add_update(b, 1) | u = F.add_update(b, 1) | ||||
assertTensorClose(u.numpy(), v + 2) | |||||
np.testing.assert_allclose(u.numpy(), v + 2, atol=1e-6) | |||||
x = np.ones((2, 2), dtype=np.float32) | x = np.ones((2, 2), dtype=np.float32) | ||||
y = x * 0.5 | y = x * 0.5 | ||||
dest = tensor(x) | dest = tensor(x) | ||||
delta = tensor(y) | delta = tensor(y) | ||||
r = F.add_update(dest, delta, alpha=0.9, beta=0.1, bias=0.1) | r = F.add_update(dest, delta, alpha=0.9, beta=0.1, bias=0.1) | ||||
assertTensorClose(r.numpy(), x * 0.9 + y * 0.1 + 0.1) | |||||
np.testing.assert_allclose(r.numpy(), x * 0.9 + y * 0.1 + 0.1, atol=1e-6) | |||||
def test_add_update_params(): | def test_add_update_params(): | ||||
@@ -263,7 +262,7 @@ def test_add_update_params(): | |||||
F.add_update(y, z, beta=0.1) | F.add_update(y, z, beta=0.1) | ||||
res = f(np.ones((2, 3)).astype(np.float32)) | res = f(np.ones((2, 3)).astype(np.float32)) | ||||
assertTensorClose(res.numpy(), b + 1) | |||||
np.testing.assert_allclose(res.numpy(), b + 1) | |||||
def test_binary_cross_entropy(): | def test_binary_cross_entropy(): | ||||
@@ -276,7 +275,7 @@ def test_binary_cross_entropy(): | |||||
return 1 / (1 + np.exp(-x)) | return 1 / (1 + np.exp(-x)) | ||||
def compare_fn(x, y): | def compare_fn(x, y): | ||||
assertTensorClose(x.numpy(), y, max_err=5e-4) | |||||
np.testing.assert_allclose(x.numpy(), y, atol=5e-4) | |||||
np.random.seed(123) | np.random.seed(123) | ||||
data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32)) | data1 = sigmoid(np.random.uniform(size=data1_shape).astype(np.float32)) | ||||
@@ -444,7 +443,7 @@ def test_conv_bias(): | |||||
result = F.transpose(result, (0, 1, 4, 2, 3)) | result = F.transpose(result, (0, 1, 4, 2, 3)) | ||||
expected = F.flatten(expected) | expected = F.flatten(expected) | ||||
result = F.flatten(result) | result = F.flatten(result) | ||||
assertTensorClose(result.numpy(), expected.numpy(), max_err=outp_scale) | |||||
np.testing.assert_allclose(result.numpy(), expected.numpy(), atol=outp_scale) | |||||
run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False) | run(1, 4, 4, 24, 33, 1, 1, 2, 3, 1, 1, False) | ||||
run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False) | run(10, 12, 24, 46, 46, 1, 1, 2, 1, 3, 1, False) | ||||
@@ -13,7 +13,6 @@ from utils import opr_test | |||||
import megengine.functional as F | import megengine.functional as F | ||||
from megengine import tensor | from megengine import tensor | ||||
from megengine.test import assertTensorClose | |||||
def common_test_reduce(opr, ref_opr): | def common_test_reduce(opr, ref_opr): | ||||
@@ -18,14 +18,13 @@ from megengine import tensor | |||||
from megengine.core._trace_option import use_tensor_shape | from megengine.core._trace_option import use_tensor_shape | ||||
from megengine.core.tensor.utils import astensor1d | from megengine.core.tensor.utils import astensor1d | ||||
from megengine.distributed.helper import get_device_count_by_fork | from megengine.distributed.helper import get_device_count_by_fork | ||||
from megengine.test import assertTensorClose | |||||
def test_eye(): | def test_eye(): | ||||
dtype = np.float32 | dtype = np.float32 | ||||
cases = [{"input": [10, 20]}, {"input": [20, 30]}] | cases = [{"input": [10, 20]}, {"input": [20, 30]}] | ||||
for case in cases: | for case in cases: | ||||
assertTensorClose( | |||||
np.testing.assert_allclose( | |||||
F.eye(case["input"], dtype=dtype).numpy(), | F.eye(case["input"], dtype=dtype).numpy(), | ||||
np.eye(*case["input"]).astype(dtype), | np.eye(*case["input"]).astype(dtype), | ||||
) | ) | ||||
@@ -10,7 +10,6 @@ import numpy as np | |||||
import megengine as mge | import megengine as mge | ||||
from megengine.module import LeakyReLU | from megengine.module import LeakyReLU | ||||
from megengine.test import assertTensorClose | |||||
def test_leaky_relu(): | def test_leaky_relu(): | ||||
@@ -21,4 +20,4 @@ def test_leaky_relu(): | |||||
output = leaky_relu(mge.tensor(data)) | output = leaky_relu(mge.tensor(data)) | ||||
np_output = np.maximum(0, data) + negative_slope * np.minimum(0, data) | np_output = np.maximum(0, data) + negative_slope * np.minimum(0, data) | ||||
assertTensorClose(output.numpy(), np_output, max_err=0) | |||||
np.testing.assert_equal(output.numpy(), np_output) |
@@ -17,7 +17,6 @@ import megengine.distributed as dist | |||||
from megengine import Tensor | from megengine import Tensor | ||||
from megengine.core._trace_option import use_tensor_shape | from megengine.core._trace_option import use_tensor_shape | ||||
from megengine.module import BatchNorm1d, BatchNorm2d, SyncBatchNorm | from megengine.module import BatchNorm1d, BatchNorm2d, SyncBatchNorm | ||||
from megengine.test import assertTensorClose | |||||
@pytest.mark.skipif( | @pytest.mark.skipif( | ||||
@@ -47,9 +46,9 @@ def test_syncbn(): | |||||
for i in range(steps): | for i in range(steps): | ||||
yv = bn(Tensor(data[i])) | yv = bn(Tensor(data[i])) | ||||
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6) | |||||
assertTensorClose(running_mean, bn.running_mean.numpy(), max_err=5e-6) | |||||
assertTensorClose(running_var, bn.running_var.numpy(), max_err=5e-6) | |||||
np.testing.assert_allclose(yv.numpy(), yv_expect, atol=5e-6) | |||||
np.testing.assert_allclose(bn.running_mean.numpy(), running_mean, atol=5e-6) | |||||
np.testing.assert_allclose(bn.running_var.numpy(), running_var, atol=5e-6) | |||||
xv = [] | xv = [] | ||||
for i in range(steps): | for i in range(steps): | ||||
@@ -119,12 +118,12 @@ def test_batchnorm(): | |||||
yv = bn(Tensor(xv)) | yv = bn(Tensor(xv)) | ||||
yv_expect = (xv - mean) / sd | yv_expect = (xv - mean) / sd | ||||
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6) | |||||
assertTensorClose( | |||||
running_mean.reshape(-1), bn.running_mean.numpy().reshape(-1), max_err=5e-6 | |||||
np.testing.assert_allclose(yv.numpy(), yv_expect, atol=5e-6) | |||||
np.testing.assert_allclose( | |||||
bn.running_mean.numpy().reshape(-1), running_mean.reshape(-1), atol=5e-6 | |||||
) | ) | ||||
assertTensorClose( | |||||
running_var.reshape(-1), bn.running_var.numpy().reshape(-1), max_err=5e-6 | |||||
np.testing.assert_allclose( | |||||
bn.running_var.numpy().reshape(-1), running_var.reshape(-1), atol=5e-6 | |||||
) | ) | ||||
# test set 'training' flag to False | # test set 'training' flag to False | ||||
@@ -135,11 +134,11 @@ def test_batchnorm(): | |||||
data = Tensor(xv) | data = Tensor(xv) | ||||
yv1 = bn(data) | yv1 = bn(data) | ||||
yv2 = bn(data) | yv2 = bn(data) | ||||
assertTensorClose(yv1.numpy(), yv2.numpy(), max_err=0) | |||||
assertTensorClose(mean_backup, bn.running_mean.numpy(), max_err=0) | |||||
assertTensorClose(var_backup, bn.running_var.numpy(), max_err=0) | |||||
np.testing.assert_equal(yv1.numpy(), yv2.numpy()) | |||||
np.testing.assert_equal(mean_backup, bn.running_mean.numpy()) | |||||
np.testing.assert_equal(var_backup, bn.running_var.numpy()) | |||||
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps) | yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps) | ||||
assertTensorClose(yv_expect, yv1.numpy(), max_err=5e-6) | |||||
np.testing.assert_allclose(yv1.numpy(), yv_expect, atol=5e-6) | |||||
@pytest.mark.skipif( | @pytest.mark.skipif( | ||||
@@ -173,12 +172,12 @@ def test_syncbn1d(): | |||||
yv = bn(Tensor(xv)) | yv = bn(Tensor(xv)) | ||||
yv_expect = (xv - mean) / sd | yv_expect = (xv - mean) / sd | ||||
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6) | |||||
assertTensorClose( | |||||
running_mean.reshape(-1), bn.running_mean.numpy().reshape(-1), max_err=5e-6 | |||||
np.testing.assert_allclose(yv.numpy(), yv_expect, atol=5e-6) | |||||
np.testing.assert_allclose( | |||||
bn.running_mean.numpy().reshape(-1), running_mean.reshape(-1), atol=5e-6 | |||||
) | ) | ||||
assertTensorClose( | |||||
running_var.reshape(-1), bn.running_var.numpy().reshape(-1), max_err=5e-6 | |||||
np.testing.assert_allclose( | |||||
bn.running_var.numpy().reshape(-1), running_var.reshape(-1), atol=5e-6 | |||||
) | ) | ||||
# test set 'training' flag to False | # test set 'training' flag to False | ||||
@@ -189,11 +188,11 @@ def test_syncbn1d(): | |||||
data = Tensor(xv) | data = Tensor(xv) | ||||
yv1 = bn(data) | yv1 = bn(data) | ||||
yv2 = bn(data) | yv2 = bn(data) | ||||
assertTensorClose(yv1.numpy(), yv2.numpy(), max_err=0) | |||||
assertTensorClose(mean_backup, bn.running_mean.numpy(), max_err=0) | |||||
assertTensorClose(var_backup, bn.running_var.numpy(), max_err=0) | |||||
np.testing.assert_equal(yv1.numpy(), yv2.numpy()) | |||||
np.testing.assert_equal(mean_backup, bn.running_mean.numpy()) | |||||
np.testing.assert_equal(var_backup, bn.running_var.numpy()) | |||||
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps) | yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps) | ||||
assertTensorClose(yv_expect, yv1.numpy(), max_err=5e-6) | |||||
np.testing.assert_allclose(yv1.numpy(), yv_expect, atol=5e-6) | |||||
def test_batchnorm2d(): | def test_batchnorm2d(): | ||||
@@ -221,9 +220,9 @@ def test_batchnorm2d(): | |||||
yv = bn(Tensor(xv)) | yv = bn(Tensor(xv)) | ||||
yv_expect = (xv - mean) / sd | yv_expect = (xv - mean) / sd | ||||
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6) | |||||
assertTensorClose(running_mean, bn.running_mean.numpy(), max_err=5e-6) | |||||
assertTensorClose(running_var, bn.running_var.numpy(), max_err=5e-6) | |||||
np.testing.assert_allclose(yv.numpy(), yv_expect, atol=5e-6) | |||||
np.testing.assert_allclose(bn.running_mean.numpy(), running_mean, atol=5e-6) | |||||
np.testing.assert_allclose(bn.running_var.numpy(), running_var, atol=5e-6) | |||||
# test set 'training' flag to False | # test set 'training' flag to False | ||||
mean_backup = bn.running_mean.numpy() | mean_backup = bn.running_mean.numpy() | ||||
@@ -233,11 +232,11 @@ def test_batchnorm2d(): | |||||
data = Tensor(xv) | data = Tensor(xv) | ||||
yv1 = bn(data) | yv1 = bn(data) | ||||
yv2 = bn(data) | yv2 = bn(data) | ||||
assertTensorClose(yv1.numpy(), yv2.numpy(), max_err=0) | |||||
assertTensorClose(mean_backup, bn.running_mean.numpy(), max_err=0) | |||||
assertTensorClose(var_backup, bn.running_var.numpy(), max_err=0) | |||||
np.testing.assert_equal(yv1.numpy(), yv2.numpy()) | |||||
np.testing.assert_equal(mean_backup, bn.running_mean.numpy()) | |||||
np.testing.assert_equal(var_backup, bn.running_var.numpy()) | |||||
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps) | yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps) | ||||
assertTensorClose(yv_expect, yv1.numpy(), max_err=5e-6) | |||||
np.testing.assert_allclose(yv1.numpy(), yv_expect, atol=5e-6) | |||||
@pytest.mark.skipif( | @pytest.mark.skipif( | ||||
@@ -272,9 +271,9 @@ def test_syncbn2d(): | |||||
yv = bn(Tensor(xv)) | yv = bn(Tensor(xv)) | ||||
yv_expect = (xv - mean) / sd | yv_expect = (xv - mean) / sd | ||||
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6) | |||||
assertTensorClose(running_mean, bn.running_mean.numpy(), max_err=5e-6) | |||||
assertTensorClose(running_var, bn.running_var.numpy(), max_err=5e-6) | |||||
np.testing.assert_allclose(yv.numpy(), yv_expect, atol=5e-6) | |||||
np.testing.assert_allclose(bn.running_mean.numpy(), running_mean, atol=5e-6) | |||||
np.testing.assert_allclose(bn.running_var.numpy(), running_var, atol=5e-6) | |||||
# test set 'training' flag to False | # test set 'training' flag to False | ||||
mean_backup = bn.running_mean.numpy() | mean_backup = bn.running_mean.numpy() | ||||
@@ -284,11 +283,11 @@ def test_syncbn2d(): | |||||
data = Tensor(xv) | data = Tensor(xv) | ||||
yv1 = bn(data) | yv1 = bn(data) | ||||
yv2 = bn(data) | yv2 = bn(data) | ||||
assertTensorClose(yv1.numpy(), yv2.numpy(), max_err=0) | |||||
assertTensorClose(mean_backup, bn.running_mean.numpy(), max_err=0) | |||||
assertTensorClose(var_backup, bn.running_var.numpy(), max_err=0) | |||||
np.testing.assert_equal(yv1.numpy(), yv2.numpy()) | |||||
np.testing.assert_equal(mean_backup, bn.running_mean.numpy()) | |||||
np.testing.assert_equal(var_backup, bn.running_var.numpy()) | |||||
yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps) | yv_expect = (xv - running_mean) / np.sqrt(running_var + bn.eps) | ||||
assertTensorClose(yv_expect, yv1.numpy(), max_err=5e-6) | |||||
np.testing.assert_allclose(yv1.numpy(), yv_expect, atol=5e-6) | |||||
def test_batchnorm_no_stats(): | def test_batchnorm_no_stats(): | ||||
@@ -311,7 +310,7 @@ def test_batchnorm_no_stats(): | |||||
yv = bn(Tensor(xv)) | yv = bn(Tensor(xv)) | ||||
yv_expect = (xv - mean) / sd | yv_expect = (xv - mean) / sd | ||||
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6) | |||||
np.testing.assert_allclose(yv.numpy(), yv_expect, atol=5e-6) | |||||
@pytest.mark.skipif( | @pytest.mark.skipif( | ||||
@@ -341,7 +340,7 @@ def test_syncbn_no_stats(): | |||||
yv = bn(Tensor(xv)) | yv = bn(Tensor(xv)) | ||||
yv_expect = (xv - mean) / sd | yv_expect = (xv - mean) / sd | ||||
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6) | |||||
np.testing.assert_allclose(yv.numpy(), yv_expect, atol=5e-6) | |||||
def test_batchnorm2d_no_stats(): | def test_batchnorm2d_no_stats(): | ||||
@@ -363,7 +362,7 @@ def test_batchnorm2d_no_stats(): | |||||
yv = bn(Tensor(xv)) | yv = bn(Tensor(xv)) | ||||
yv_expect = (xv - mean) / sd | yv_expect = (xv - mean) / sd | ||||
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6) | |||||
np.testing.assert_allclose(yv.numpy(), yv_expect, atol=5e-6) | |||||
@pytest.mark.skipif( | @pytest.mark.skipif( | ||||
@@ -392,4 +391,4 @@ def test_syncbn2d_no_stats(): | |||||
yv = bn(Tensor(xv)) | yv = bn(Tensor(xv)) | ||||
yv_expect = (xv - mean) / sd | yv_expect = (xv - mean) / sd | ||||
assertTensorClose(yv_expect, yv.numpy(), max_err=5e-6) | |||||
np.testing.assert_allclose(yv.numpy(), yv_expect, atol=5e-6) |
@@ -12,7 +12,6 @@ import numpy as np | |||||
from megengine import Parameter, tensor | from megengine import Parameter, tensor | ||||
from megengine.module import ConvTranspose2d, LocalConv2d | from megengine.module import ConvTranspose2d, LocalConv2d | ||||
from megengine.test import assertTensorClose | |||||
def test_conv_transpose2d(): | def test_conv_transpose2d(): | ||||
@@ -49,7 +48,7 @@ def test_conv_transpose2d(): | |||||
conv_transpose2d.bias = Parameter(bias, dtype=np.float32) | conv_transpose2d.bias = Parameter(bias, dtype=np.float32) | ||||
y = conv_transpose2d(tensor(inp)) | y = conv_transpose2d(tensor(inp)) | ||||
assertTensorClose(out, y.numpy(), max_err=2e-6) | |||||
np.testing.assert_allclose(out, y.numpy(), atol=2e-6) | |||||
def test_local_conv2d(): | def test_local_conv2d(): | ||||
@@ -107,4 +106,4 @@ def test_local_conv2d(): | |||||
* weights[0, oh, ow, :, :, :, oc] | * weights[0, oh, ow, :, :, :, oc] | ||||
) | ) | ||||
assertTensorClose(outputs.numpy(), expected, max_err=1e-5) | |||||
np.testing.assert_allclose(outputs.numpy(), expected, atol=1e-5) |
@@ -29,7 +29,6 @@ from megengine.module import ( | |||||
Softmax, | Softmax, | ||||
) | ) | ||||
from megengine.quantization.quantize import quantize, quantize_qat | from megengine.quantization.quantize import quantize, quantize_qat | ||||
from megengine.test import assertTensorClose | |||||
class MLP(Module): | class MLP(Module): | ||||
@@ -87,7 +86,7 @@ def graph_mode(*modes): | |||||
def _default_compare_fn(x, y): | def _default_compare_fn(x, y): | ||||
assertTensorClose(x.numpy(), y) | |||||
np.testing.assert_allclose(x.numpy(), y, rtol=1e-6) | |||||
def opr_test( | def opr_test( | ||||
@@ -102,7 +101,7 @@ def opr_test( | |||||
mode: the list of test mode which are eager, static and dynamic_shape | mode: the list of test mode which are eager, static and dynamic_shape | ||||
will test all the cases if None. | will test all the cases if None. | ||||
func: the function to run opr. | func: the function to run opr. | ||||
compare_fn: the function to compare the result and expected, use assertTensorClose if None. | |||||
compare_fn: the function to compare the result and expected, use np.testing.assert_allclose if None. | |||||
ref_fn: the function to generate expected data, should assign output if None. | ref_fn: the function to generate expected data, should assign output if None. | ||||
cases: the list which have dict element, the list length should be 2 for dynamic shape test. | cases: the list which have dict element, the list length should be 2 for dynamic shape test. | ||||
and the dict should have input, | and the dict should have input, | ||||
@@ -331,17 +330,17 @@ def test_module_api_hooks(): | |||||
bn1 = F.batch_norm2d( | bn1 = F.batch_norm2d( | ||||
x + 3, mean1, Parameter(np.ones(shape), dtype=np.float32), training=True | x + 3, mean1, Parameter(np.ones(shape), dtype=np.float32), training=True | ||||
) | ) | ||||
assertTensorClose( | |||||
np.testing.assert_allclose( | |||||
net.i.bn.running_mean.numpy(), mean1.numpy(), | net.i.bn.running_mean.numpy(), mean1.numpy(), | ||||
) | ) | ||||
mean2 = Parameter(np.zeros(shape), dtype=np.float32) | mean2 = Parameter(np.zeros(shape), dtype=np.float32) | ||||
bn2 = F.batch_norm2d( | bn2 = F.batch_norm2d( | ||||
bn1 + 3, mean2, Parameter(np.ones(shape), dtype=np.float32), training=True | bn1 + 3, mean2, Parameter(np.ones(shape), dtype=np.float32), training=True | ||||
) | ) | ||||
assertTensorClose( | |||||
np.testing.assert_allclose( | |||||
net.bn.running_mean.numpy(), mean2.numpy(), | net.bn.running_mean.numpy(), mean2.numpy(), | ||||
) | ) | ||||
assertTensorClose((bn2 + 2).numpy(), y.numpy()) | |||||
np.testing.assert_allclose((bn2 + 2).numpy(), y.numpy()) | |||||
assert len(hooks) == 8 | assert len(hooks) == 8 | ||||
for handler in hooks: | for handler in hooks: | ||||
@@ -479,7 +478,7 @@ def test_state_dict(): | |||||
mlp1 = MLP() | mlp1 = MLP() | ||||
mlp1.load_state_dict(state_dict, strict=False) | mlp1.load_state_dict(state_dict, strict=False) | ||||
pred1 = mlp1(data) | pred1 = mlp1(data) | ||||
assertTensorClose(pred0.numpy(), pred1.numpy(), max_err=5e-6) | |||||
np.testing.assert_allclose(pred0.numpy(), pred1.numpy(), atol=5e-6) | |||||
with pytest.raises(KeyError): | with pytest.raises(KeyError): | ||||
mlp1.load_state_dict(state_dict) | mlp1.load_state_dict(state_dict) | ||||
del state_dict["extra"] | del state_dict["extra"] | ||||
@@ -520,13 +519,13 @@ def test_shared_param(): | |||||
net = Simple() | net = Simple() | ||||
assert net.conv0.weight is net.conv1.weight | assert net.conv0.weight is net.conv1.weight | ||||
data = tensor(np.random.random((1, 1, 8, 8)).astype(np.float32)) | data = tensor(np.random.random((1, 1, 8, 8)).astype(np.float32)) | ||||
assertTensorClose(net.conv0(data).numpy(), net.conv1(data).numpy()) | |||||
np.testing.assert_allclose(net.conv0(data).numpy(), net.conv1(data).numpy()) | |||||
with BytesIO() as f: | with BytesIO() as f: | ||||
mge.save(net, f) | mge.save(net, f) | ||||
f.seek(0) | f.seek(0) | ||||
net1 = mge.load(f) | net1 = mge.load(f) | ||||
assert net1.conv0.weight is net1.conv1.weight | assert net1.conv0.weight is net1.conv1.weight | ||||
assertTensorClose(net1.conv0(data).numpy(), net1.conv1(data).numpy()) | |||||
np.testing.assert_allclose(net1.conv0(data).numpy(), net1.conv1(data).numpy()) | |||||
with BytesIO() as f: | with BytesIO() as f: | ||||
mge.save(net.conv0, f) | mge.save(net.conv0, f) | ||||
@@ -539,7 +538,7 @@ def test_shared_param(): | |||||
conv1 = mge.load(f) | conv1 = mge.load(f) | ||||
assert conv0.weight is not conv1.weight | assert conv0.weight is not conv1.weight | ||||
assertTensorClose(conv0(data).numpy(), conv1(data).numpy()) | |||||
np.testing.assert_allclose(conv0(data).numpy(), conv1(data).numpy()) | |||||
def test_pickle_module(): | def test_pickle_module(): | ||||
@@ -562,8 +561,8 @@ def test_pickle_module(): | |||||
mlp1 = mge.load(fout) | mlp1 = mge.load(fout) | ||||
pred2 = mlp1(data) | pred2 = mlp1(data) | ||||
assertTensorClose(pred0.numpy(), pred1.numpy(), max_err=5e-6) | |||||
assertTensorClose(pred0.numpy(), pred2.numpy(), max_err=5e-6) | |||||
np.testing.assert_allclose(pred0.numpy(), pred1.numpy(), atol=5e-6) | |||||
np.testing.assert_allclose(pred0.numpy(), pred2.numpy(), atol=5e-6) | |||||
@pytest.mark.skip(reason="under development") | @pytest.mark.skip(reason="under development") | ||||
@@ -609,8 +608,8 @@ def test_load_quantized(): | |||||
mlp.load_state_dict(checkpoint) | mlp.load_state_dict(checkpoint) | ||||
pred1 = mlp(data) | pred1 = mlp(data) | ||||
assertTensorClose( | |||||
pred0.astype("float32").numpy(), pred1.astype("float32").numpy(), max_err=5e-6 | |||||
np.testing.assert_allclose( | |||||
pred0.astype("float32").numpy(), pred1.astype("float32").numpy(), atol=5e-6 | |||||
) | ) | ||||
@@ -15,7 +15,6 @@ import megengine as mge | |||||
import megengine.functional as F | import megengine.functional as F | ||||
from megengine import Parameter, Tensor | from megengine import Parameter, Tensor | ||||
from megengine.module import Conv2d | from megengine.module import Conv2d | ||||
from megengine.test import assertTensorClose | |||||
def test_set_value(): | def test_set_value(): | ||||
@@ -23,21 +22,21 @@ def test_set_value(): | |||||
param = Parameter(v0) | param = Parameter(v0) | ||||
v1 = np.random.random((2, 3)).astype(np.float32) | v1 = np.random.random((2, 3)).astype(np.float32) | ||||
param.set_value(v1) | param.set_value(v1) | ||||
assertTensorClose(param.numpy(), v1, max_err=5e-6) | |||||
np.testing.assert_allclose(param.numpy(), v1, atol=5e-6) | |||||
v2 = np.random.random((3, 3)).astype(np.float32) | v2 = np.random.random((3, 3)).astype(np.float32) | ||||
# TODO: add this | # TODO: add this | ||||
# with pytest.raises(ValueError): | # with pytest.raises(ValueError): | ||||
# param.set_value(v2) | # param.set_value(v2) | ||||
assertTensorClose(param.numpy(), v1, max_err=5e-6) | |||||
np.testing.assert_allclose(param.numpy(), v1, atol=5e-6) | |||||
@pytest.mark.skip(reason="fill unsupported") | @pytest.mark.skip(reason="fill unsupported") | ||||
def test_fill(): | def test_fill(): | ||||
a = Tensor(np.zeros((2, 3), dtype=np.float32)) | a = Tensor(np.zeros((2, 3), dtype=np.float32)) | ||||
a.fill(3) | a.fill(3) | ||||
assertTensorClose(a.numpy(), np.full((2, 3), 3, dtype=np.float32)) | |||||
np.testing.assert_allclose(a.numpy(), np.full((2, 3), 3, dtype=np.float32)) | |||||
a.fill(124.568) | a.fill(124.568) | ||||
assertTensorClose(a.numpy(), np.full((2, 3), 124.568, dtype=np.float32)) | |||||
np.testing.assert_allclose(a.numpy(), np.full((2, 3), 124.568, dtype=np.float32)) | |||||
# TODO: remove or rewrite following test | # TODO: remove or rewrite following test | ||||
@@ -51,11 +50,11 @@ def test_fill(): | |||||
# f = compile(v, None) | # f = compile(v, None) | ||||
# out, = f() | # out, = f() | ||||
# assertTensorClose(out, p_ * 2) | |||||
# np.testing.assert_allclose(out, p_ * 2) | |||||
# F.add_update(p, p) | # F.add_update(p, p) | ||||
# out, = f() | # out, = f() | ||||
# assertTensorClose(out, p_ * 4) | |||||
# np.testing.assert_allclose(out, p_ * 4) | |||||
# TODO: remove or rewrite following test | # TODO: remove or rewrite following test | ||||
@@ -74,7 +73,7 @@ def test_fill(): | |||||
# data1 = Input("data", value=v) | # data1 = Input("data", value=v) | ||||
# out1 = net(data1) | # out1 = net(data1) | ||||
# assertTensorClose(out0, out1.numpy()) | |||||
# np.testing.assert_allclose(out0, out1.numpy()) | |||||
# def test_shape_warning(): | # def test_shape_warning(): | ||||
@@ -12,7 +12,6 @@ from megengine.module import ( | |||||
QuantStub, | QuantStub, | ||||
) | ) | ||||
from megengine.quantization.quantize import disable_fake_quant, quantize_qat | from megengine.quantization.quantize import disable_fake_quant, quantize_qat | ||||
from megengine.test import assertTensorClose | |||||
def test_qat_convbn2d(): | def test_qat_convbn2d(): | ||||
@@ -31,22 +30,24 @@ def test_qat_convbn2d(): | |||||
# import pdb | # import pdb | ||||
# pdb.set_trace() | # pdb.set_trace() | ||||
qat_outputs = qat_module(inputs) | qat_outputs = qat_module(inputs) | ||||
assertTensorClose(normal_outputs.numpy(), qat_outputs.numpy(), max_err=5e-6) | |||||
assertTensorClose( | |||||
np.testing.assert_allclose( | |||||
normal_outputs.numpy(), qat_outputs.numpy(), atol=5e-6 | |||||
) | |||||
np.testing.assert_allclose( | |||||
module.bn.running_mean.numpy(), | module.bn.running_mean.numpy(), | ||||
qat_module.bn.running_mean.numpy(), | qat_module.bn.running_mean.numpy(), | ||||
max_err=5e-8, | |||||
atol=5e-8, | |||||
) | ) | ||||
assertTensorClose( | |||||
module.bn.running_var.numpy(), | |||||
qat_module.bn.running_var.numpy(), | |||||
max_err=5e-7, | |||||
np.testing.assert_allclose( | |||||
module.bn.running_var.numpy(), qat_module.bn.running_var.numpy(), atol=5e-7, | |||||
) | ) | ||||
module.eval() | module.eval() | ||||
normal_outputs = module(inputs) | normal_outputs = module(inputs) | ||||
qat_module.eval() | qat_module.eval() | ||||
qat_outputs = qat_module(inputs) | qat_outputs = qat_module(inputs) | ||||
assertTensorClose(normal_outputs.numpy(), qat_outputs.numpy(), max_err=5e-6) | |||||
np.testing.assert_allclose( | |||||
normal_outputs.numpy(), qat_outputs.numpy(), atol=5e-6 | |||||
) | |||||
def test_qat_conv(): | def test_qat_conv(): | ||||
@@ -82,10 +83,10 @@ def test_qat_conv(): | |||||
disable_fake_quant(qat_net) | disable_fake_quant(qat_net) | ||||
normal_outputs = net(inputs) | normal_outputs = net(inputs) | ||||
qat_outputs = qat_net(inputs) | qat_outputs = qat_net(inputs) | ||||
assertTensorClose(normal_outputs.numpy(), qat_outputs.numpy()) | |||||
np.testing.assert_allclose(normal_outputs.numpy(), qat_outputs.numpy()) | |||||
net.eval() | net.eval() | ||||
normal_outputs = net(inputs) | normal_outputs = net(inputs) | ||||
qat_net.eval() | qat_net.eval() | ||||
qat_outputs = qat_net(inputs) | qat_outputs = qat_net(inputs) | ||||
assertTensorClose(normal_outputs.numpy(), qat_outputs.numpy()) | |||||
np.testing.assert_allclose(normal_outputs.numpy(), qat_outputs.numpy()) |
@@ -13,7 +13,6 @@ import megengine as mge | |||||
from megengine import tensor | from megengine import tensor | ||||
from megengine.quantization.fake_quant import TQT_Function | from megengine.quantization.fake_quant import TQT_Function | ||||
from megengine.quantization.internal_fake_quant import * | from megengine.quantization.internal_fake_quant import * | ||||
from megengine.test import assertTensorClose | |||||
class numpy_TQT_Function: | class numpy_TQT_Function: | ||||
@@ -60,13 +59,13 @@ def test_TQT(): | |||||
nf = numpy_TQT_Function(-127, 127) | nf = numpy_TQT_Function(-127, 127) | ||||
def check_inp(a, b, c, a_np, b_np, c_np): | def check_inp(a, b, c, a_np, b_np, c_np): | ||||
assertTensorClose( | |||||
f.forward(a, b).numpy(), nf.forward(a_np, b_np).astype("float32") | |||||
np.testing.assert_allclose( | |||||
f.forward(a, b).numpy(), nf.forward(a_np, b_np).astype("float32"), rtol=1e-6 | |||||
) | ) | ||||
c1, c2 = f.backward(c) | c1, c2 = f.backward(c) | ||||
c1_np, c2_np = nf.backward(c_np) | c1_np, c2_np = nf.backward(c_np) | ||||
assertTensorClose(c1.numpy(), c1_np.astype("float32")) | |||||
assertTensorClose(c2.numpy(), c2_np.astype("float32")) | |||||
np.testing.assert_allclose(c1.numpy(), c1_np.astype("float32"), rtol=1e-6) | |||||
np.testing.assert_allclose(c2.numpy(), c2_np.astype("float32"), rtol=1e-6) | |||||
a_np = np.random.random((4, 3)).astype("float32") | a_np = np.random.random((4, 3)).astype("float32") | ||||
b_np = np.random.random((1)).astype("float32") | b_np = np.random.random((1)).astype("float32") | ||||