From 38bd5999111e9a2bf5d3fd609b8469305fc52e82 Mon Sep 17 00:00:00 2001 From: Megvii Engine Team Date: Tue, 19 Jul 2022 11:23:33 +0800 Subject: [PATCH] fix(mgb): make error infomation of invalid MatMul more readable GitOrigin-RevId: 96b922dd20d99616fcc1c252f30caffd07a49913 --- dnn/src/common/batched_matrix_mul.cpp | 8 +++++++- dnn/src/common/matrix_mul.cpp | 10 +++++++++- dnn/src/cuda/matrix_mul/naive.cpp | 9 ++++++++- 3 files changed, 24 insertions(+), 3 deletions(-) diff --git a/dnn/src/common/batched_matrix_mul.cpp b/dnn/src/common/batched_matrix_mul.cpp index c4946fb8..2a30660a 100644 --- a/dnn/src/common/batched_matrix_mul.cpp +++ b/dnn/src/common/batched_matrix_mul.cpp @@ -22,7 +22,13 @@ void BatchedMatrixMulForward::deduce_dtype(DType A, DType B, DType& C) { } megdnn_assert( C.valid() && (C == C_candi || C == C_candi2), - "unsupported BatchedMatMul(%s, %s) -> %s", A.name(), B.name(), C.name()); + "runtime does not support BatchedMatMul(%s, %s) -> %s\n" + "now support case list: BatchedMatMul(FLOAT, FLOAT)\n" + " BatchedMatMul(Int8, Int8)\n" + " BatchedMatMul(QuantizedS8, QuantizedS8)\n" + " BatchedMatMul(Quantized8Asymm, Quantized8Asymm)\n" + " BatchedMatMul(Quantized4Asymm, Quantized4Asymm)\n", + A.name(), B.name(), C.name()); } void BatchedMatrixMulForward::deduce_layout( const TensorLayout& A, const TensorLayout& B, TensorLayout& C) { diff --git a/dnn/src/common/matrix_mul.cpp b/dnn/src/common/matrix_mul.cpp index 494040a9..0b4d0415 100644 --- a/dnn/src/common/matrix_mul.cpp +++ b/dnn/src/common/matrix_mul.cpp @@ -31,7 +31,15 @@ void MatrixMulForward::deduce_dtype(DType A, DType B, DType& C) { } megdnn_assert( C.valid() && (C == C_candi || C == C_candi2), - "unsupported MatMul(%s, %s) -> %s", A.name(), B.name(), C.name()); + "runtime does not support MatMul(%s, %s) -> %s\n" + "now support case list: MatMul(FLOAT, FLOAT)\n" + " MatMul(Int8, Int8)\n" + " MatMul(Int16, Int16)\n" + " MatMul(QuantizedS8, QuantizedS8)\n" + " MatMul(Quantized8Asymm, Quantized8Asymm)\n" + " MatMul(Quantized4Asymm, Quantized4Asymm)\n" + " MatMul(QuantizedS4, QuantizedS4)\n", + A.name(), B.name(), C.name()); } void MatrixMulForward::deduce_layout( diff --git a/dnn/src/cuda/matrix_mul/naive.cpp b/dnn/src/cuda/matrix_mul/naive.cpp index 2a2fbacc..20848f8a 100644 --- a/dnn/src/cuda/matrix_mul/naive.cpp +++ b/dnn/src/cuda/matrix_mul/naive.cpp @@ -65,7 +65,14 @@ void MatrixMulForwardImpl::AlgoNaive::exec(const ExecArgs& args) const { #undef DISPATCH_CMODE #undef DISPATCH megdnn_throw(ssprintf( - "unsupported Matmul(%s, %s) -> %s with cmode = %d", + "runtime does not support MatMul(%s, %s) -> %s with cmode = %d\n" + "now support case list: MatMul(FLOAT, FLOAT)\n" + " MatMul(Int8, Int8)\n" + " MatMul(Int16, Int16)\n" + " MatMul(QuantizedS8, QuantizedS8)\n" + " MatMul(Quantized8Asymm, Quantized8Asymm)\n" + " MatMul(Quantized4Asymm, Quantized4Asymm)\n" + " MatMul(QuantizedS4, QuantizedS4)\n", args.layout_a.dtype.name(), args.layout_b.dtype.name(), args.layout_c.dtype.name(), static_cast(param.compute_mode))); }