diff --git a/imperative/python/megengine/functional/quantized.py b/imperative/python/megengine/functional/quantized.py index bf6cc551..07a3b61a 100644 --- a/imperative/python/megengine/functional/quantized.py +++ b/imperative/python/megengine/functional/quantized.py @@ -24,7 +24,6 @@ def conv_bias_activation( padding: Union[int, Tuple[int, int]] = 0, dilation: Union[int, Tuple[int, int]] = 1, groups: int = 1, - format="NCHW", nonlinear_mode="IDENTITY", conv_mode="CROSS_CORRELATION", compute_mode="DEFAULT", @@ -69,7 +68,7 @@ def conv_bias_activation( dilate_h=dh, dilate_w=dw, dtype=dtype, - format=format, + format="NCHW", strategy=get_conv_execution_strategy(), nonlineMode=nonlinear_mode, mode=conv_mode, diff --git a/imperative/python/test/unit/functional/test_functional.py b/imperative/python/test/unit/functional/test_functional.py index 5cb7a4de..0778fb15 100644 --- a/imperative/python/test/unit/functional/test_functional.py +++ b/imperative/python/test/unit/functional/test_functional.py @@ -417,6 +417,7 @@ def test_batched_nms(): np.testing.assert_equal(results.numpy(), np.array([1, 4, 5], dtype=np.int32)) +@pytest.mark.skip(reason="cuda does not support nchw int8") def test_conv_bias(): inp_scale = 1.5 w_scale = 2.5