From 55974e8cf9729d975d5b01c648718d7c9efbf6e2 Mon Sep 17 00:00:00 2001 From: Megvii Engine Team Date: Fri, 19 Mar 2021 11:15:05 +0800 Subject: [PATCH] feat(log): opt log * opt log at release mode * add MGE_OVERRIDE_LOG_LEVEL for runtime debug //! env to config LogLevel //! DEBUG = 0, INFO = 1, WARN = 2, ERROR = 3, NO_LOG = 4 //! for example , export MGE_OVERRIDE_LOG_LEVEL=0, means set LogLevel to DEBUG GitOrigin-RevId: 16cd674c56445b1f034359a2ac48beb7f86183a4 --- dnn/include/megdnn/dtype.h | 28 ++++----- .../warp_perspective/warp_perspective_cv.cpp | 3 +- dnn/src/arm_common/conv_bias/postprocess_helper.h | 8 ++- dnn/src/arm_common/elemwise/binary/algo.cpp | 2 +- dnn/src/arm_common/elemwise/binary/algo.h | 3 +- dnn/src/arm_common/elemwise/opr_impl.cpp | 2 +- dnn/src/arm_common/elemwise/opr_impl.h | 24 ++++---- dnn/src/arm_common/elemwise/ternary/algo.h | 3 +- dnn/src/arm_common/elemwise/unary/algo.h | 2 +- dnn/src/arm_common/reduce/opr_impl.cpp | 2 +- dnn/src/arm_common/resize/resize_cv.cpp | 2 +- dnn/src/arm_common/warp_affine/warp_affine_cv.cpp | 2 +- .../warp_perspective/warp_perspective_cv.cpp | 2 +- .../megcore/cambricon_computing_context.cpp | 2 +- dnn/src/common/algo_chooser.h | 15 +++-- dnn/src/common/basic_types.cpp | 36 +++++------- dnn/src/common/batched_matrix_mul.cpp | 10 ++-- dnn/src/common/concat_split.cpp | 4 +- dnn/src/common/convolution.cpp | 16 ++--- dnn/src/common/convolution3d.cpp | 28 ++++----- dnn/src/common/cv/helper.h | 6 +- dnn/src/common/deformable_conv.cpp | 19 +++--- dnn/src/common/dtype.cpp | 14 ++--- dnn/src/common/elemwise/kern_defs.cuh | 4 +- dnn/src/common/elemwise/opr_impl.cpp | 9 ++- dnn/src/common/elemwise_multi_type/opr_impl.cpp | 2 +- dnn/src/common/group_local.cpp | 15 +++-- dnn/src/common/handle.cpp | 7 ++- dnn/src/common/images2neibs.cpp | 16 ++--- dnn/src/common/indexing_one_hot.cpp | 9 +-- dnn/src/common/local/opr_impl.cpp | 18 +++--- dnn/src/common/local_share/opr_impl.cpp | 46 +++++++-------- dnn/src/common/matrix_inverse.cpp | 7 +-- dnn/src/common/matrix_mul.cpp | 14 ++--- dnn/src/common/megcore/cpu/api.cpp | 3 +- .../megcore/cpu/default_computing_context.cpp | 3 +- dnn/src/common/megcore/public_api/misc.cpp | 4 +- dnn/src/common/pooling.cpp | 21 +++---- dnn/src/common/relayout_format.cpp | 7 +++ dnn/src/common/remap.cpp | 8 +-- dnn/src/common/separableConv.cpp | 20 +++---- dnn/src/common/separableFilter.cpp | 13 ++--- dnn/src/common/utils.cpp | 12 ++-- dnn/src/common/utils.cuh | 64 +++++++++++--------- dnn/src/common/utils.h | 13 ++++- dnn/src/common/warp_affine.cpp | 32 +++++----- dnn/src/common/warp_perspective.cpp | 45 +++++++------- dnn/src/common/winograd/winograd_helper.cpp | 4 +- dnn/src/cuda/add_update/opr_impl.cpp | 4 +- dnn/src/cuda/batch_conv_bias/algo.cpp | 4 +- dnn/src/cuda/batch_conv_bias/opr_impl.cpp | 4 +- dnn/src/cuda/batch_normalization/opr_impl.cpp | 6 +- dnn/src/cuda/batched_matrix_mul/algo.cpp | 4 +- dnn/src/cuda/batched_matrix_mul/cublas_lt.cpp | 6 +- dnn/src/cuda/conv_bias/algo.cpp | 14 ++--- .../cuda/conv_bias/cudnn_conv_bias_activation.cpp | 6 +- dnn/src/cuda/conv_bias/helper.cpp | 14 ++--- dnn/src/cuda/convolution/backward_data/algo.cpp | 9 ++- dnn/src/cuda/convolution/backward_filter/algo.cpp | 23 ++++---- dnn/src/cuda/convolution/forward/algos.cpp | 8 +-- dnn/src/cuda/convolution3d/backward_data/algo.cpp | 26 ++++----- .../cuda/convolution3d/backward_filter/algo.cpp | 27 ++++----- dnn/src/cuda/convolution3d/forward/algo.cpp | 26 ++++----- dnn/src/cuda/cudnn_wrapper.cpp | 10 ++-- dnn/src/cuda/cumsum/kern_impl.cu | 2 +- dnn/src/cuda/deformable_conv/opr_impl.cpp | 12 ++-- dnn/src/cuda/indexing_multi_axis_vec/opr_impl.cpp | 2 +- dnn/src/cuda/indexing_one_hot/opr_impl.cpp | 4 +- dnn/src/cuda/local_share/backward_data/algo.cpp | 4 +- dnn/src/cuda/local_share/backward_filter/algo.cpp | 4 +- dnn/src/cuda/local_share/forward/algo.cpp | 4 +- dnn/src/cuda/local_share/opr_impl.cpp | 12 ++-- dnn/src/cuda/matrix_mul/algos.cpp | 4 +- dnn/src/cuda/matrix_mul/cublasLt_wrapper.cpp | 12 ++-- dnn/src/cuda/matrix_mul/cublas_lt.cpp | 6 +- dnn/src/cuda/megcore/cuda_computing_context.cpp | 3 +- dnn/src/cuda/megcore/public_api/computing.cpp | 7 ++- dnn/src/cuda/relayout/opr_impl.cpp | 4 +- dnn/src/cuda/remap/backward_data.cu | 2 +- dnn/src/cuda/remap/backward_mat.cu | 2 +- dnn/src/cuda/remap/forward.cpp | 4 +- dnn/src/cuda/remap/forward.cu | 4 +- dnn/src/cuda/resize/forward.cpp | 3 +- dnn/src/cuda/warp_affine/opr_impl.cpp | 3 +- dnn/src/cuda/warp_perspective/forward.cpp | 5 +- dnn/src/fallback/batched_matrix_mul/algos.cpp | 6 +- dnn/src/fallback/convolution/algos.cpp | 6 +- dnn/src/fallback/convolution/opr_impl.cpp | 5 +- dnn/src/fallback/convolution/run_conv.cpp | 6 +- dnn/src/fallback/matrix_mul/algos.cpp | 2 +- dnn/src/fallback/matrix_mul/opr_impl.cpp | 5 +- dnn/src/fallback/powc/opr_impl.cpp | 4 +- dnn/src/fallback/resize/opr_impl.cpp | 2 +- dnn/src/fallback/warp_perspective/opr_impl.cpp | 2 +- dnn/src/naive/batch_normalization/opr_impl.cpp | 4 +- dnn/src/naive/convolution/convolution.cpp | 4 +- dnn/src/naive/convolution3d/convolution3d.cpp | 2 +- dnn/src/naive/group_local/opr_impl.cpp | 6 +- dnn/src/naive/indexing_multi_axis_vec/opr_impl.cpp | 2 +- dnn/src/naive/indexing_one_hot/opr_impl.cpp | 4 +- dnn/src/naive/local/local.cpp | 4 +- dnn/src/naive/pooling/opr_impl.cpp | 2 +- dnn/src/naive/reduce/opr_impl.cpp | 32 +++++----- dnn/src/naive/relayout_format/opr_impl.cpp | 2 +- dnn/src/naive/remap/opr_impl.cpp | 8 +-- dnn/src/naive/resize/opr_impl.cpp | 6 +- dnn/src/naive/resize/resize_cv.cpp | 2 +- dnn/src/naive/separable_filter/opr_impl.cpp | 2 +- dnn/src/naive/warp_affine/opr_impl.cpp | 2 +- dnn/src/naive/warp_affine/opr_impl.h | 2 +- dnn/src/naive/warp_affine/warp_affine_cv.cpp | 2 +- dnn/src/naive/warp_perspective/opr_impl.cpp | 12 ++-- dnn/src/naive/warp_perspective/opr_impl.h | 2 +- .../naive/warp_perspective/warp_perspective_cv.cpp | 3 +- dnn/src/rocm/add_update/opr_impl.cpp | 4 +- dnn/src/rocm/argmxx/opr_impl.cpp | 6 +- dnn/src/rocm/batch_normalization/opr_impl.cpp | 6 +- dnn/src/rocm/batched_matrix_mul/algos.cpp | 4 +- dnn/src/rocm/convolution/backward_data/algo.cpp | 4 +- dnn/src/rocm/convolution/backward_filter/algo.cpp | 4 +- dnn/src/rocm/convolution/forward/algo.cpp | 4 +- dnn/src/rocm/indexing_multi_axis_vec/opr_impl.cpp | 4 +- dnn/src/rocm/indexing_one_hot/opr_impl.cpp | 4 +- dnn/src/rocm/matrix_mul/algos.cpp | 4 +- dnn/src/rocm/miopen_wrapper.cpp | 8 +-- dnn/src/x86/conv_bias/f32/algos.cpp | 68 +++++++++++----------- dnn/src/x86/elemwise/opr_impl.cpp | 8 +-- dnn/src/x86/local/opr_impl.cpp | 4 +- dnn/src/x86/lrn/opr_impl.cpp | 2 +- dnn/src/x86/resize/resize_cv.cpp | 2 +- dnn/src/x86/separable_filter/opr_impl.cpp | 3 +- dnn/src/x86/utils.cpp | 2 +- dnn/src/x86/warp_affine/warp_affine_cv.cpp | 2 +- .../x86/warp_perspective/warp_perspective_cv.cpp | 2 +- dnn/test/common/elemwise.cpp | 2 +- dnn/test/common/topk.cpp | 4 +- dnn/test/naive/rng.cpp | 4 +- dnn/test/rocm/argmxx.cpp | 4 +- dnn/test/rocm/batched_matrix_mul.cpp | 2 +- dnn/test/rocm/eye.cpp | 2 +- dnn/test/rocm/matrix_mul.cpp | 10 ++-- dnn/test/rocm/pooling.cpp | 2 +- dnn/test/rocm/reduce.cpp | 4 +- dnn/test/rocm/type_cvt.cpp | 2 +- src/core/impl/common.cpp | 21 ++++++- src/core/impl/exception.cpp | 6 +- src/core/include/megbrain/common.h | 21 +++++-- src/core/include/megbrain/dtype.h | 4 +- src/jit/impl/mlir/ir/types.h | 18 +++--- src/opr/impl/dnn/convolution.cpp | 4 +- src/serialization/test/extern_c_opr.cpp | 2 +- 151 files changed, 657 insertions(+), 664 deletions(-) diff --git a/dnn/include/megdnn/dtype.h b/dnn/include/megdnn/dtype.h index 4ec1bc53..153c7772 100644 --- a/dnn/include/megdnn/dtype.h +++ b/dnn/include/megdnn/dtype.h @@ -25,13 +25,13 @@ #include "megdnn/internal/visibility_prologue.h" #if MEGDNN_DISABLE_FLOAT16 -#define MEGDNN_INC_FLOAT16(_x) -#define MEGDNN_FLOAT16_SELECT(_x, _y) _y +#define DNN_INC_FLOAT16(_x) +#define DNN_FLOAT16_SELECT(_x, _y) _y #else #include "megdnn/dtype/half.hpp" #include "megdnn/dtype/bfloat16.hpp" -#define MEGDNN_INC_FLOAT16(_x) _x -#define MEGDNN_FLOAT16_SELECT(_x, _y) _x +#define DNN_INC_FLOAT16(_x) _x +#define DNN_FLOAT16_SELECT(_x, _y) _x #endif namespace megdnn { @@ -49,8 +49,8 @@ namespace megdnn { cb(IntB2) \ cb(IntB4) \ cb(Byte) \ - MEGDNN_INC_FLOAT16(cb(Float16)) \ - MEGDNN_INC_FLOAT16(cb(BFloat16)) \ + DNN_INC_FLOAT16(cb(Float16)) \ + DNN_INC_FLOAT16(cb(BFloat16)) \ cb(UintB4) \ cb(Bool) \ cb(Uint16) \ @@ -65,8 +65,8 @@ namespace megdnn { cb(Int16) \ cb(Int32) \ cb(Byte) \ - MEGDNN_INC_FLOAT16(cb(Float16)) \ - MEGDNN_INC_FLOAT16(cb(BFloat16)) \ + DNN_INC_FLOAT16(cb(Float16)) \ + DNN_INC_FLOAT16(cb(BFloat16)) \ cb(Bool) \ cb(Uint16) \ @@ -108,8 +108,8 @@ namespace megdnn { #define MEGDNN_FOREACH_COMPUTING_DTYPE_FLOAT(cb) \ cb(::megdnn::dtype::Float32) \ - MEGDNN_INC_FLOAT16(cb(::megdnn::dtype::Float16)) \ - MEGDNN_INC_FLOAT16(cb(::megdnn::dtype::BFloat16)) + DNN_INC_FLOAT16(cb(::megdnn::dtype::Float16)) \ + DNN_INC_FLOAT16(cb(::megdnn::dtype::BFloat16)) /*! @@ -360,8 +360,8 @@ typedef int8_t dt_int8; typedef uint8_t dt_uint8; typedef bool dt_bool; typedef uint16_t dt_uint16; -MEGDNN_INC_FLOAT16(typedef half_float::half dt_float16;) -MEGDNN_INC_FLOAT16(typedef half_bfloat16::bfloat16 dt_bfloat16;) +DNN_INC_FLOAT16(typedef half_float::half dt_float16;) +DNN_INC_FLOAT16(typedef half_bfloat16::bfloat16 dt_bfloat16;) #define MEGDNN_PARAMETERIZED_DTYPE_ENUM_BASE 100000 #if MEGDNN_CC_HOST @@ -722,10 +722,10 @@ MEGDNN_DEF_DT(Int8, dt_int8, INT, SIGNED, INT8_MIN, INT8_MAX); MEGDNN_DEF_DT(Uint8, dt_uint8, INT, UNSIGNED, 0, UINT8_MAX); MEGDNN_DEF_DT(Bool, dt_bool, BOOL, UNSIGNED, false, true); MEGDNN_DEF_DT(Uint16, dt_uint16, INT, UNSIGNED, 0, UINT16_MAX); -MEGDNN_INC_FLOAT16(MEGDNN_DEF_DT(Float16, dt_float16, FLOAT, SIGNED, +DNN_INC_FLOAT16(MEGDNN_DEF_DT(Float16, dt_float16, FLOAT, SIGNED, std::numeric_limits::lowest(), std::numeric_limits::max())); -MEGDNN_INC_FLOAT16(MEGDNN_DEF_DT(BFloat16, dt_bfloat16, FLOAT, SIGNED, +DNN_INC_FLOAT16(MEGDNN_DEF_DT(BFloat16, dt_bfloat16, FLOAT, SIGNED, std::numeric_limits::lowest(), std::numeric_limits::max())); diff --git a/dnn/src/aarch64/warp_perspective/warp_perspective_cv.cpp b/dnn/src/aarch64/warp_perspective/warp_perspective_cv.cpp index 3b7b1cda..743524ca 100644 --- a/dnn/src/aarch64/warp_perspective/warp_perspective_cv.cpp +++ b/dnn/src/aarch64/warp_perspective/warp_perspective_cv.cpp @@ -270,8 +270,7 @@ void megdnn::aarch64::warp_perspective_cv_exec( DISPATCH_IMODE(imode, bmode, ch, cb) #undef cb } else { - megdnn_throw( - megdnn_mangle("Unsupported datatype of WarpPerspective optr.")); + megdnn_throw("Unsupported datatype of WarpPerspective optr."); } } // vim: syntax=cpp.doxygen diff --git a/dnn/src/arm_common/conv_bias/postprocess_helper.h b/dnn/src/arm_common/conv_bias/postprocess_helper.h index df197742..0a9178a1 100644 --- a/dnn/src/arm_common/conv_bias/postprocess_helper.h +++ b/dnn/src/arm_common/conv_bias/postprocess_helper.h @@ -152,8 +152,9 @@ struct PostProcess { MEGDNN_MARK_USED_VAR(OH); MEGDNN_MARK_USED_VAR(OW); MEGDNN_MARK_USED_VAR(pack_oc_size); - megdnn_assert(bias_mode == megdnn::BiasMode::NO_BIAS && - nonlineMode == megdnn::NonlineMode::IDENTITY); + megdnn_throw_if(bias_mode != megdnn::BiasMode::NO_BIAS || + nonlineMode != megdnn::NonlineMode::IDENTITY, + megdnn_error, "biasmode or nonlineMode do not support"); } }; @@ -310,7 +311,8 @@ struct PostProcess { megdnn::BiasMode bias_mode, megdnn::NonlineMode nonlineMode, megdnn::DType bias_type, megdnn::DType dst_type, size_t N, size_t OC, size_t OH, size_t OW, size_t pack_oc_size = 1) { - megdnn_assert(nonlineMode == megdnn::NonlineMode::IDENTITY); + megdnn_throw_if(nonlineMode != megdnn::NonlineMode::IDENTITY, + megdnn_error, "nonlineMode do not support"); FOR_BIAS(bias_mode, OH, OW); } }; diff --git a/dnn/src/arm_common/elemwise/binary/algo.cpp b/dnn/src/arm_common/elemwise/binary/algo.cpp index e804a304..833b7cd1 100644 --- a/dnn/src/arm_common/elemwise/binary/algo.cpp +++ b/dnn/src/arm_common/elemwise/binary/algo.cpp @@ -115,7 +115,7 @@ bool ElemwiseImpl::AlgoBinaryVecBcast101x4::is_available( auto& elparam = kern_param.binary_elparam; auto& src0 = elparam[0]; #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - if (MEGDNN_FLOAT16_SELECT(src0.layout.dtype == dtype::Float16{}, false)) { + if (DNN_FLOAT16_SELECT(src0.layout.dtype == dtype::Float16{}, false)) { return false; } #endif diff --git a/dnn/src/arm_common/elemwise/binary/algo.h b/dnn/src/arm_common/elemwise/binary/algo.h index bde96007..b98ee269 100644 --- a/dnn/src/arm_common/elemwise/binary/algo.h +++ b/dnn/src/arm_common/elemwise/binary/algo.h @@ -23,8 +23,7 @@ namespace arm_common { } \ const char* name() const override { \ if (m_name.empty()) { \ - m_name = megdnn_mangle( \ - ssprintf("Elemwise::AlgoBinaryCase" #case)); \ + m_name = ssprintf("Elemwise::AlgoBinaryCase" #case); \ } \ return m_name.c_str(); \ } \ diff --git a/dnn/src/arm_common/elemwise/opr_impl.cpp b/dnn/src/arm_common/elemwise/opr_impl.cpp index a5577835..109eda68 100644 --- a/dnn/src/arm_common/elemwise/opr_impl.cpp +++ b/dnn/src/arm_common/elemwise/opr_impl.cpp @@ -66,7 +66,7 @@ void ElemwiseImpl::exec(const TensorNDArray& srcs, _megdnn_tensor_out dst) { } if (m_dst->layout.dtype == dtype::Float32() || - MEGDNN_FLOAT16_SELECT(m_dst->layout.dtype == dtype::Float16(), false) || + DNN_FLOAT16_SELECT(m_dst->layout.dtype == dtype::Float16(), false) || m_dst->layout.dtype == dtype::Int32() || m_dst->layout.dtype == dtype::Int16() || m_dst->layout.dtype == dtype::Int8()) { diff --git a/dnn/src/arm_common/elemwise/opr_impl.h b/dnn/src/arm_common/elemwise/opr_impl.h index 84c131cd..56824535 100644 --- a/dnn/src/arm_common/elemwise/opr_impl.h +++ b/dnn/src/arm_common/elemwise/opr_impl.h @@ -63,18 +63,18 @@ public: }; #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC -#define DISPATCH_TYPE(_case) \ - if (src0.layout.dtype == dtype::Float32{}) { \ - DISPATCH_MODE_FLOAT(_case, float, 0); \ - } else if (MEGDNN_FLOAT16_SELECT(src0.layout.dtype == dtype::Float16{}, \ - false)) { \ - DISPATCH_MODE_FLOAT(_case, __fp16, 1); \ - } else if (src0.layout.dtype == dtype::Int32{}) { \ - DISPATCH_MODE_INT(_case, int, 2); \ - } else if (src0.layout.dtype == dtype::Int16{}) { \ - DISPATCH_MODE_INT(_case, dt_int16, 3); \ - } else if (src0.layout.dtype == dtype::Int8{}) { \ - DISPATCH_MODE_INT(_case, dt_int8, 4); \ +#define DISPATCH_TYPE(_case) \ + if (src0.layout.dtype == dtype::Float32{}) { \ + DISPATCH_MODE_FLOAT(_case, float, 0); \ + } else if (DNN_FLOAT16_SELECT(src0.layout.dtype == dtype::Float16{}, \ + false)) { \ + DISPATCH_MODE_FLOAT(_case, __fp16, 1); \ + } else if (src0.layout.dtype == dtype::Int32{}) { \ + DISPATCH_MODE_INT(_case, int, 2); \ + } else if (src0.layout.dtype == dtype::Int16{}) { \ + DISPATCH_MODE_INT(_case, dt_int16, 3); \ + } else if (src0.layout.dtype == dtype::Int8{}) { \ + DISPATCH_MODE_INT(_case, dt_int8, 4); \ } #else #define DISPATCH_TYPE(_case) \ diff --git a/dnn/src/arm_common/elemwise/ternary/algo.h b/dnn/src/arm_common/elemwise/ternary/algo.h index 3748830c..d63b4ee8 100644 --- a/dnn/src/arm_common/elemwise/ternary/algo.h +++ b/dnn/src/arm_common/elemwise/ternary/algo.h @@ -23,8 +23,7 @@ namespace arm_common { } \ const char* name() const override { \ if (m_name.empty()) { \ - m_name = megdnn_mangle( \ - ssprintf("Elemwise::AlgoTernaryFma3" #case)); \ + m_name = ssprintf("Elemwise::AlgoTernaryFma3" #case); \ } \ return m_name.c_str(); \ } \ diff --git a/dnn/src/arm_common/elemwise/unary/algo.h b/dnn/src/arm_common/elemwise/unary/algo.h index 4be33cf2..8d31fbef 100644 --- a/dnn/src/arm_common/elemwise/unary/algo.h +++ b/dnn/src/arm_common/elemwise/unary/algo.h @@ -21,7 +21,7 @@ class ElemwiseImpl::AlgoUnary final : public ElemwiseImpl::AlgoBase { } const char* name() const override { if (m_name.empty()) { - m_name = megdnn_mangle(ssprintf("Elemwise::AlgoUnary")); + m_name = ssprintf("Elemwise::AlgoUnary"); } return m_name.c_str(); } diff --git a/dnn/src/arm_common/reduce/opr_impl.cpp b/dnn/src/arm_common/reduce/opr_impl.cpp index 5f128bab..adb92c98 100644 --- a/dnn/src/arm_common/reduce/opr_impl.cpp +++ b/dnn/src/arm_common/reduce/opr_impl.cpp @@ -916,7 +916,7 @@ void ReduceImpl::exec(_megdnn_tensor_in src, _megdnn_tensor_out dst, } #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC if (src.layout.dtype.enumv() == DTypeEnum::Float16) { - MEGDNN_INC_FLOAT16(DISPATCH_MODE_FLOAT(__fp16, __fp16, __fp16)); + DNN_INC_FLOAT16(DISPATCH_MODE_FLOAT(__fp16, __fp16, __fp16)); } #endif } diff --git a/dnn/src/arm_common/resize/resize_cv.cpp b/dnn/src/arm_common/resize/resize_cv.cpp index ea05818d..cf2a4d1d 100644 --- a/dnn/src/arm_common/resize/resize_cv.cpp +++ b/dnn/src/arm_common/resize/resize_cv.cpp @@ -2044,7 +2044,7 @@ void megdnn::arm_common::resize_cv_exec( } MIDOUT_END(); } else { - megdnn_throw(megdnn_mangle("Unsupported datatype of resize optr.")); + megdnn_throw("Unsupported datatype of resize optr."); } } } diff --git a/dnn/src/arm_common/warp_affine/warp_affine_cv.cpp b/dnn/src/arm_common/warp_affine/warp_affine_cv.cpp index 8f442f3e..b66767fe 100644 --- a/dnn/src/arm_common/warp_affine/warp_affine_cv.cpp +++ b/dnn/src/arm_common/warp_affine/warp_affine_cv.cpp @@ -285,7 +285,7 @@ void megdnn::arm_common::warp_affine_cv_exec( DISPATCH_IMODE(imode, bmode, ch, cb) #undef cb } else { - megdnn_throw(megdnn_mangle("Unsupported datatype of WarpAffine optr.")); + megdnn_throw("Unsupported datatype of WarpAffine optr."); } } diff --git a/dnn/src/arm_common/warp_perspective/warp_perspective_cv.cpp b/dnn/src/arm_common/warp_perspective/warp_perspective_cv.cpp index a67e25d2..17a9fd72 100644 --- a/dnn/src/arm_common/warp_perspective/warp_perspective_cv.cpp +++ b/dnn/src/arm_common/warp_perspective/warp_perspective_cv.cpp @@ -229,7 +229,7 @@ void megdnn::arm_common::warp_perspective_cv_exec( DISPATCH_IMODE(imode, bmode, ch, cb) #undef cb } else { - megdnn_throw(megdnn_mangle("Unsupported datatype of WarpAffine optr.")); + megdnn_throw("Unsupported datatype of WarpAffine optr."); } } diff --git a/dnn/src/cambricon/megcore/cambricon_computing_context.cpp b/dnn/src/cambricon/megcore/cambricon_computing_context.cpp index 77d49e64..2deb23e2 100644 --- a/dnn/src/cambricon/megcore/cambricon_computing_context.cpp +++ b/dnn/src/cambricon/megcore/cambricon_computing_context.cpp @@ -53,7 +53,7 @@ void CambriconComputingContext::memcpy(void* dst, const void* src, dir = CNRT_MEM_TRANS_DIR_DEV2DEV; break; default: - megdnn_throw(megdnn_mangle("bad cnrt mem trans dir")); + megdnn_throw("bad cnrt mem trans dir"); } if (kind == megcoreMemcpyDeviceToDevice) { cnrt_check(cnrtSyncQueue(context_.queue)); diff --git a/dnn/src/common/algo_chooser.h b/dnn/src/common/algo_chooser.h index da89c0ab..bec9aedc 100644 --- a/dnn/src/common/algo_chooser.h +++ b/dnn/src/common/algo_chooser.h @@ -120,16 +120,15 @@ typename Opr::Algorithm* get_reproducible_algo( MEGDNN_MARK_USED_VAR(name); if (available_but_limited_by_workspace) { - megdnn_throw(megdnn_mangle(ssprintf( + megdnn_throw(ssprintf( "no reproducible %s algorithm: %s workspace limit %zu is " "less than mini workspace limit %zu", name, args.to_string().c_str(), workspace_limit_in_bytes, - min_workspace_limit_in_bytes))); + min_workspace_limit_in_bytes)); } else if (available_but_not_reproducible) { - megdnn_throw( - megdnn_mangle(ssprintf("no reproducible %s algorithm", name))); + megdnn_throw(ssprintf("no reproducible %s algorithm", name)); } else { - megdnn_throw(megdnn_mangle(ssprintf("no usable %s algorithm", name))); + megdnn_throw(ssprintf("no usable %s algorithm", name)); } } @@ -154,13 +153,13 @@ typename Opr::Algorithm* get_usable_algo( MEGDNN_MARK_USED_VAR(name); if (available_but_limited_by_workspace) { - megdnn_throw(megdnn_mangle(ssprintf( + megdnn_throw(ssprintf( "no usable %s algorithm: %s workspace limit %zu is " "less than mini workspace limit %zu", name, args.to_string().c_str(), workspace_limit_in_bytes, - min_workspace_limit_in_bytes))); + min_workspace_limit_in_bytes)); } else { - megdnn_throw(megdnn_mangle(ssprintf("no usable %s algorithm", name))); + megdnn_throw(ssprintf("no usable %s algorithm", name)); } } diff --git a/dnn/src/common/basic_types.cpp b/dnn/src/common/basic_types.cpp index ac869af4..08117899 100644 --- a/dnn/src/common/basic_types.cpp +++ b/dnn/src/common/basic_types.cpp @@ -413,7 +413,7 @@ TensorLayout::Span TensorLayout::span() const { TensorLayout TensorLayout::broadcast(const TensorShape& tshape) const { megdnn_throw_if(!ndim || !tshape.ndim, tensor_reshape_error, - megdnn_mangle("broadcast involves empty tensor")); + "broadcast involves empty tensor"); if (is_scalar()) { TensorLayout result{dtype, format}; @@ -426,10 +426,9 @@ TensorLayout TensorLayout::broadcast(const TensorShape& tshape) const { } megdnn_throw_if(tshape.ndim < ndim, tensor_reshape_error, - megdnn_mangle(ssprintf( - "dimension for broadcast less than " - "dst_shape: src_shape=%s dst_shape=%s", - to_string().c_str(), tshape.to_string().c_str()))); + ssprintf("dimension for broadcast less than " + "dst_shape: src_shape=%s dst_shape=%s", + to_string().c_str(), tshape.to_string().c_str())); TensorLayout result{dtype, format}; for (size_t i = 0; i < tshape.ndim; ++i) { int target_idx = tshape.ndim - i - 1; @@ -439,10 +438,9 @@ TensorLayout TensorLayout::broadcast(const TensorShape& tshape) const { if (tshape.shape[target_idx] != cur_shape) { megdnn_throw_if( cur_shape != 1 && cur_stride != 0, tensor_reshape_error, - megdnn_mangle(ssprintf( - "broadcast on dim with shape not equal to 1: " - "src_shape=%s dst_shape=%s", - to_string().c_str(), tshape.to_string().c_str()))); + ssprintf("broadcast on dim with shape not equal to 1: " + "src_shape=%s dst_shape=%s", + to_string().c_str(), tshape.to_string().c_str())); result.shape[target_idx] = tshape.shape[target_idx]; result.stride[target_idx] = 0; } else { @@ -461,9 +459,9 @@ bool TensorLayout::try_reshape(TensorLayout& result, bool is_empty_shape = false; for (size_t i = 0; i < tshp.ndim; ++i) { if (!tshp.shape[i]) { - megdnn_throw_if(!format.is_default(), tensor_reshape_error, - megdnn_mangle(ssprintf("bad target tshp: %s", - tshp.to_string().c_str()))); + megdnn_throw_if( + !format.is_default(), tensor_reshape_error, + ssprintf("bad target tshp: %s", tshp.to_string().c_str())); is_empty_shape = true; break; } @@ -472,11 +470,10 @@ bool TensorLayout::try_reshape(TensorLayout& result, megdnn_throw_if( !tshp.ndim || total_nr_elems() != tshp.total_nr_elems(), tensor_reshape_error, - megdnn_mangle(ssprintf( - "number of elements do not match " - "in reshape: src=%s dest=%s", - static_cast(*this).to_string().c_str(), - tshp.to_string().c_str()))); + ssprintf("number of elements do not match " + "in reshape: src=%s dest=%s", + static_cast(*this).to_string().c_str(), + tshp.to_string().c_str())); auto cont = collapse_contiguous(); result.dtype = this->dtype; @@ -516,9 +513,8 @@ TensorLayout TensorLayout::reshape(const TensorShape& shape) const { TensorLayout ret; auto succ = try_reshape(ret, shape); megdnn_throw_if(!succ, tensor_reshape_error, - megdnn_mangle(ssprintf("can not reshape from %s to %s", - to_string().c_str(), - shape.to_string().c_str()))); + ssprintf("can not reshape from %s to %s", + to_string().c_str(), shape.to_string().c_str())); return ret; } diff --git a/dnn/src/common/batched_matrix_mul.cpp b/dnn/src/common/batched_matrix_mul.cpp index cc734ef1..e4d9c4cc 100644 --- a/dnn/src/common/batched_matrix_mul.cpp +++ b/dnn/src/common/batched_matrix_mul.cpp @@ -39,15 +39,15 @@ void BatchedMatrixMulForward::deduce_layout(const TensorLayout& A, TensorLayout& C) { auto errmsg = [&]() { std::string msg; - msg.append(megdnn_mangle("A=")); + msg.append("A="); msg.append(A.to_string()); - msg.append(megdnn_mangle(", B=")); + msg.append(", B="); msg.append(B.to_string()); - msg.append(megdnn_mangle(", C=")); + msg.append(", C="); msg.append(C.to_string()); - msg.append(megdnn_mangle(", transposeA=")); + msg.append(", transposeA="); msg.append(std::to_string(m_param.transposeA)); - msg.append(megdnn_mangle(", transposeB=")); + msg.append(", transposeB="); msg.append(std::to_string(m_param.transposeB)); return msg; }; diff --git a/dnn/src/common/concat_split.cpp b/dnn/src/common/concat_split.cpp index 713d6e28..77cc3700 100644 --- a/dnn/src/common/concat_split.cpp +++ b/dnn/src/common/concat_split.cpp @@ -41,8 +41,8 @@ void ConcatSplitBase::check_layout_common(const TensorLayoutArray &srcs, megdnn_assert_eq_size_t(src.ndim, ndim); } // ensure param().axis is correct - auto errmsg = megdnn_mangle("param().axis=") + - std::to_string(param().axis) + megdnn_mangle(", ndim=") + + auto errmsg = "param().axis=" + + std::to_string(param().axis) + ", ndim=" + std::to_string(ndim); MEGDNN_MARK_USED_VAR(errmsg); megdnn_assert(param().axis < static_cast(ndim), "%s", diff --git a/dnn/src/common/convolution.cpp b/dnn/src/common/convolution.cpp index b23f4f83..ec7c70d6 100644 --- a/dnn/src/common/convolution.cpp +++ b/dnn/src/common/convolution.cpp @@ -23,17 +23,17 @@ std::string get_errmsg(const TensorLayout& src, const TensorLayout& filter, MEGDNN_MARK_USED_VAR(filter); MEGDNN_MARK_USED_VAR(dst); return megdnn_layout_msg(src) + ", " + megdnn_layout_msg(filter) + ", " + - megdnn_layout_msg(dst) + ", " + megdnn_mangle("is_nchw=") + + megdnn_layout_msg(dst) + ", " + "is_nchw=" + std::to_string(param.format == param::Convolution::Format::NCHW) + - ", " + +megdnn_mangle("is_xcorr=") + + ", " + "is_xcorr=" + std::to_string( (param.mode == Convolution::Mode::CROSS_CORRELATION)) + - ", " + megdnn_mangle("pad_h=") + std::to_string(param.pad_h) + ", " + - megdnn_mangle("pad_w=") + std::to_string(param.pad_w) + ", " + - megdnn_mangle("stride_h=") + std::to_string(param.stride_h) + ", " + - megdnn_mangle("stride_w=") + std::to_string(param.stride_w) + ", " + - megdnn_mangle("dilate_h=") + std::to_string(param.dilate_h) + ", " + - megdnn_mangle("dilate_w=") + std::to_string(param.dilate_w); + ", " + "pad_h=" + std::to_string(param.pad_h) + ", " + + "pad_w=" + std::to_string(param.pad_w) + ", " + + "stride_h=" + std::to_string(param.stride_h) + ", " + + "stride_w=" + std::to_string(param.stride_w) + ", " + + "dilate_h=" + std::to_string(param.dilate_h) + ", " + + "dilate_w=" + std::to_string(param.dilate_w); } template diff --git a/dnn/src/common/convolution3d.cpp b/dnn/src/common/convolution3d.cpp index 223d69ad..98aee729 100644 --- a/dnn/src/common/convolution3d.cpp +++ b/dnn/src/common/convolution3d.cpp @@ -22,20 +22,20 @@ std::string get_errmsg(const TensorLayout& src, const TensorLayout& filter, MEGDNN_MARK_USED_VAR(filter); MEGDNN_MARK_USED_VAR(dst); return megdnn_layout_msg(src) + ", " + megdnn_layout_msg(filter) + ", " + - megdnn_layout_msg(dst) + ", " + megdnn_mangle("is_ncdhw=") + + megdnn_layout_msg(dst) + ", " + "is_ncdhw=" + std::to_string(param.format == param::Convolution3D::Format::NCDHW) + - ", " + +megdnn_mangle("is_xcorr=") + + ", " + "is_xcorr=" + std::to_string( (param.mode == Convolution3D::Mode::CROSS_CORRELATION)) + - ", " + megdnn_mangle("pad_d=") + std::to_string(param.pad_d) + ", " + - megdnn_mangle("pad_h=") + std::to_string(param.pad_h) + ", " + - megdnn_mangle("pad_w=") + std::to_string(param.pad_w) + ", " + - megdnn_mangle("stride_d=") + std::to_string(param.stride_d) + ", " + - megdnn_mangle("stride_h=") + std::to_string(param.stride_h) + ", " + - megdnn_mangle("stride_w=") + std::to_string(param.stride_w) + ", " + - megdnn_mangle("dilate_d=") + std::to_string(param.dilate_d) + ", " + - megdnn_mangle("dilate_h=") + std::to_string(param.dilate_h) + ", " + - megdnn_mangle("dilate_w=") + std::to_string(param.dilate_w); + ", " + "pad_d=" + std::to_string(param.pad_d) + ", " + + "pad_h=" + std::to_string(param.pad_h) + ", " + + "pad_w=" + std::to_string(param.pad_w) + ", " + + "stride_d=" + std::to_string(param.stride_d) + ", " + + "stride_h=" + std::to_string(param.stride_h) + ", " + + "stride_w=" + std::to_string(param.stride_w) + ", " + + "dilate_d=" + std::to_string(param.dilate_d) + ", " + + "dilate_h=" + std::to_string(param.dilate_h) + ", " + + "dilate_w=" + std::to_string(param.dilate_w); } } // namespace @@ -127,15 +127,15 @@ Convolution3DBase::CanonizedFilterMeta Convolution3DBase::deduce_layout_fwd( megdnn_assert(src.ndim >= 5_z, "%s", errmsg().c_str()); megdnn_assert(src.dtype == filter.dtype, "%s", errmsg().c_str()); if (param().data_type == Param::DataType::FLOAT) { - megdnn_assert(src.dtype == dtype::Float32() MEGDNN_INC_FLOAT16( + megdnn_assert(src.dtype == dtype::Float32() DNN_INC_FLOAT16( || src.dtype == dtype::Float16()), "invalid src dtype for conv: %s", src.dtype.name()); dst.dtype = src.dtype; } else { megdnn_assert(param().data_type == Param::DataType::FLOAT_IO16xC32); - MEGDNN_INC_FLOAT16(megdnn_assert(src.dtype == dtype::Float16(), + DNN_INC_FLOAT16(megdnn_assert(src.dtype == dtype::Float16(), "invalid src dtype for conv: %s", src.dtype.name())); - MEGDNN_INC_FLOAT16(dst.dtype = dtype::Float16()); + DNN_INC_FLOAT16(dst.dtype = dtype::Float16()); } auto img_dim = src.ndim - 2; megdnn_assert(img_dim == 3, "this is the convolution for 3D image"); diff --git a/dnn/src/common/cv/helper.h b/dnn/src/common/cv/helper.h index d4f7ca13..db689bb7 100644 --- a/dnn/src/common/cv/helper.h +++ b/dnn/src/common/cv/helper.h @@ -77,9 +77,9 @@ #include #endif -#define MegCVException(expr) \ - do { \ - megdnn_throw(megdnn_mangle(#expr)); \ +#define MegCVException(expr) \ + do { \ + megdnn_throw(#expr); \ } while (0) namespace megdnn { diff --git a/dnn/src/common/deformable_conv.cpp b/dnn/src/common/deformable_conv.cpp index 87052b1c..5328bda0 100644 --- a/dnn/src/common/deformable_conv.cpp +++ b/dnn/src/common/deformable_conv.cpp @@ -27,16 +27,15 @@ std::string get_errmsg(const TensorLayout& src, const TensorLayout& filter, MEGDNN_MARK_USED_VAR(dst); return megdnn_layout_msg(src) + ", " + megdnn_layout_msg(filter) + ", " + megdnn_layout_msg(offset) + ", " + megdnn_layout_msg(mask) + ", " + - megdnn_layout_msg(dst) + ", " + megdnn_mangle("only support nchw") + - ", " + megdnn_mangle("group=") + std::to_string(param.group) + ", " + - megdnn_mangle("deformable_group=") + - std::to_string(param.deformable_group) + ", " + - megdnn_mangle("pad_h=") + std::to_string(param.pad_h) + ", " + - megdnn_mangle("pad_w=") + std::to_string(param.pad_w) + ", " + - megdnn_mangle("stride_h=") + std::to_string(param.stride_h) + ", " + - megdnn_mangle("stride_w=") + std::to_string(param.stride_w) + ", " + - megdnn_mangle("dilate_h=") + std::to_string(param.dilate_h) + ", " + - megdnn_mangle("dilate_w=") + std::to_string(param.dilate_w); + megdnn_layout_msg(dst) + ", " + "only support nchw" + ", " + + "group=" + std::to_string(param.group) + ", " + + "deformable_group=" + std::to_string(param.deformable_group) + ", " + + "pad_h=" + std::to_string(param.pad_h) + ", " + + "pad_w=" + std::to_string(param.pad_w) + ", " + + "stride_h=" + std::to_string(param.stride_h) + ", " + + "stride_w=" + std::to_string(param.stride_w) + ", " + + "dilate_h=" + std::to_string(param.dilate_h) + ", " + + "dilate_w=" + std::to_string(param.dilate_w); } template diff --git a/dnn/src/common/dtype.cpp b/dnn/src/common/dtype.cpp index a82d4a6b..3e490fd0 100644 --- a/dnn/src/common/dtype.cpp +++ b/dnn/src/common/dtype.cpp @@ -42,15 +42,13 @@ MEGDNN_FOREACH_PARAMETERIZED_DTYPE(TEMPLATED_IMPL) #undef IMPL void DType::on_assert_is_failed(const char *rname) const { - megdnn_throw(megdnn_mangle( - ssprintf("attempt to access dtype %s as %s", - name(), rname).c_str())); + megdnn_throw(ssprintf("attempt to access dtype %s as %s", name(), rname) + .c_str()); MEGDNN_MARK_USED_VAR(rname); } void DType::on_request_lowbit_size() const { - megdnn_throw(megdnn_mangle( - ssprintf("attempt to get size of lowbit dtype %s", name()))); + megdnn_throw(ssprintf("attempt to get size of lowbit dtype %s", name())); } DType DType::from_enum(DTypeEnum ev) { @@ -60,11 +58,11 @@ DType DType::from_enum(DTypeEnum ev) { #undef cb #define cb(_dt) case DTypeEnum::_dt: MEGDNN_FOREACH_PARAMETERIZED_DTYPE(cb) - megdnn_throw(megdnn_mangle( - "cannot construct parameterized DType via DType::from_enum")); + megdnn_throw( + "cannot construct parameterized DType via DType::from_enum"); #undef cb } - megdnn_throw(megdnn_mangle("bad DTypeEnum value")); + megdnn_throw("bad DTypeEnum value"); } template diff --git a/dnn/src/common/elemwise/kern_defs.cuh b/dnn/src/common/elemwise/kern_defs.cuh index 5c4f859a..7b81bf2b 100644 --- a/dnn/src/common/elemwise/kern_defs.cuh +++ b/dnn/src/common/elemwise/kern_defs.cuh @@ -87,8 +87,8 @@ namespace megdnn { //! define kernel for all float types #define DEF_KERN_FLOAT(_mode, _imp) \ DEF_KERN(dt_float32, _mode, _imp); \ - MEGDNN_INC_FLOAT16(DEF_KERN(dt_float16, _mode, _imp);) \ - MEGDNN_INC_FLOAT16(DEF_KERN(dt_bfloat16, _mode, _imp);) + DNN_INC_FLOAT16(DEF_KERN(dt_float16, _mode, _imp);) \ + DNN_INC_FLOAT16(DEF_KERN(dt_bfloat16, _mode, _imp);) //! define kernel for all int types #define DEF_KERN_INT(_mode, _imp) \ diff --git a/dnn/src/common/elemwise/opr_impl.cpp b/dnn/src/common/elemwise/opr_impl.cpp index 7370593e..52c01490 100644 --- a/dnn/src/common/elemwise/opr_impl.cpp +++ b/dnn/src/common/elemwise/opr_impl.cpp @@ -85,7 +85,7 @@ const ModeTrait& ModeTrait::from_mode(Mode mode) { MIDOUT_BEGIN(megdnn_common_elemwise, midout_iv(Mode::_m)) { \ auto&& t = get(Mode::_m); \ t.arity = _a; \ - t.name = megdnn_mangle(#_m); \ + t.name = (#_m); \ } \ MIDOUT_END(); #define _a 1 @@ -111,7 +111,7 @@ const ModeTrait& ModeTrait::from_mode(Mode mode) { t.allow_float = true; \ t.allow_bool = true; \ t.arity = _arity; \ - t.name = megdnn_mangle(#_m); \ + t.name = (#_m); \ } \ MIDOUT_END(); FUSE(FUSE_MUL_ADD3, 3); @@ -159,14 +159,13 @@ const ModeTrait& ModeTrait::from_mode(Mode mode) { void ElemwiseForward::deduce_shape(const TensorShapeArray& src, TensorShape& dst) { auto err = [&]() { - std::string msg( - megdnn_mangle("bad input shape for polyadic operator: ")); + std::string msg("bad input shape for polyadic operator: "); bool first = true; for (auto&& i : src) { if (first) first = false; else - msg.append(megdnn_mangle(", ")); + msg.append(", "); msg.append(i.to_string()); } megdnn_throw(msg); diff --git a/dnn/src/common/elemwise_multi_type/opr_impl.cpp b/dnn/src/common/elemwise_multi_type/opr_impl.cpp index ef634e1b..ef1ec392 100644 --- a/dnn/src/common/elemwise_multi_type/opr_impl.cpp +++ b/dnn/src/common/elemwise_multi_type/opr_impl.cpp @@ -158,7 +158,7 @@ const ModeTrait& ModeTrait::from_mode(Mode mode) { #define SET(f, m) \ MIDOUT_BEGIN(megdnn_common_elemwise_multi_type, midout_iv(Mode::m)) { \ - f(traits[static_cast(Mode::m)], megdnn_mangle(#m)); \ + f(traits[static_cast(Mode::m)], (#m)); \ } \ MIDOUT_END(); SET(init_fma3_int16x32x32x32, FUSE_MUL_ADD3_INT16x32x32x32); diff --git a/dnn/src/common/group_local.cpp b/dnn/src/common/group_local.cpp index ffcb3923..72485aef 100644 --- a/dnn/src/common/group_local.cpp +++ b/dnn/src/common/group_local.cpp @@ -19,13 +19,12 @@ void GroupLocalBase::deduce_layout_fwd(const TensorLayout &src, TensorLayout &dst) { auto errmsg = [&]() { - return megdnn_layout_msg(src) + ", " - + megdnn_layout_msg(filter) + ", " - + megdnn_layout_msg(dst) + ", " - + megdnn_mangle("pad_h=") + std::to_string(param().pad_h) + ", " - + megdnn_mangle("pad_w=") + std::to_string(param().pad_w) + ", " - + megdnn_mangle("stride_h=") + std::to_string(param().stride_h) + ", " - + megdnn_mangle("stride_w=") + std::to_string(param().stride_w); + return megdnn_layout_msg(src) + ", " + megdnn_layout_msg(filter) + + ", " + megdnn_layout_msg(dst) + ", " + + "pad_h=" + std::to_string(param().pad_h) + ", " + + "pad_w=" + std::to_string(param().pad_w) + ", " + + "stride_h=" + std::to_string(param().stride_h) + ", " + + "stride_w=" + std::to_string(param().stride_w); }; MEGDNN_MARK_USED_VAR(errmsg); megdnn_assert_contiguous(src); @@ -66,7 +65,7 @@ void GroupLocalBase::check_layout_fwd(const TensorLayout &src, megdnn_assert_eq_dtype(src, dst); deduce_layout_fwd(src, filter, dst_expected); megdnn_assert_eq_layout(dst_expected, dst); - megdnn_assert(src.dtype == dtype::Float32() || MEGDNN_FLOAT16_SELECT(src.dtype == dtype::Float16(), true)); + megdnn_assert(src.dtype == dtype::Float32() || DNN_FLOAT16_SELECT(src.dtype == dtype::Float16(), true)); } void GroupLocalForward::check_exec(const TensorLayout &src, diff --git a/dnn/src/common/handle.cpp b/dnn/src/common/handle.cpp index 08264766..44fb13ba 100644 --- a/dnn/src/common/handle.cpp +++ b/dnn/src/common/handle.cpp @@ -87,7 +87,7 @@ std::unique_ptr Handle::make(megcoreComputingHandle_t computing_handle, } else if (debug_level == 2) { return make_unique(computing_handle); } else { - megdnn_throw(megdnn_mangle("Debug level must be 0/1/2.")); + megdnn_throw("Debug level must be 0/1/2."); } } MIDOUT_END(); @@ -116,7 +116,8 @@ std::unique_ptr Handle::make(megcoreComputingHandle_t computing_handle, } else { // CUDA - megdnn_assert_internal(platform == megcorePlatformCUDA); + megdnn_throw_if(platform != megcorePlatformCUDA, megdnn_error, + "platform should be CUDA Platform"); #if MEGDNN_WITH_CUDA return make_unique(computing_handle); #else @@ -216,7 +217,7 @@ std::unique_ptr Handle::make(megcoreComputingHandle_t computing_handle, CASE(CAMBRICON, cambricon); #endif default: - megdnn_throw(megdnn_mangle("bad handle type")); + megdnn_throw("bad handle type"); } #undef CASE } diff --git a/dnn/src/common/images2neibs.cpp b/dnn/src/common/images2neibs.cpp index 23670942..5345f9e7 100644 --- a/dnn/src/common/images2neibs.cpp +++ b/dnn/src/common/images2neibs.cpp @@ -19,16 +19,12 @@ void Images2NeibsBase::deduce_layout_fwd(const TensorLayout &src, { auto errmsg = [&]() { return megdnn_layout_msg(src) + ", " + - megdnn_mangle("pad_h=") + std::to_string(param().pad_h) + ", " + - megdnn_mangle("pad_w=") + std::to_string(param().pad_w) + ", " + - megdnn_mangle("stride_h=") + - std::to_string(param().stride_h) + ", " + - megdnn_mangle("stride_w=") + - std::to_string(param().stride_w) + ", " + - megdnn_mangle("window_h=") + - std::to_string(param().window_h) + ", " + - megdnn_mangle("window_w=") + - std::to_string(param().window_w); + "pad_h=" + std::to_string(param().pad_h) + ", " + + "pad_w=" + std::to_string(param().pad_w) + ", " + + "stride_h=" + std::to_string(param().stride_h) + ", " + + "stride_w=" + std::to_string(param().stride_w) + ", " + + "window_h=" + std::to_string(param().window_h) + ", " + + "window_w=" + std::to_string(param().window_w); }; MEGDNN_MARK_USED_VAR(errmsg); megdnn_assert_contiguous(src); diff --git a/dnn/src/common/indexing_one_hot.cpp b/dnn/src/common/indexing_one_hot.cpp index bb7f3572..6e7ee89f 100644 --- a/dnn/src/common/indexing_one_hot.cpp +++ b/dnn/src/common/indexing_one_hot.cpp @@ -32,10 +32,11 @@ void IndexingOneHotBase::check_layout_fwd( const TensorLayout &src, const TensorLayout &index, const TensorLayout &dst) { auto errmsg = [&]() -> std::string { - return megdnn_mangle(ssprintf("bad layout for IndexingOneHot: " - "src=%s index=%s dst=%s axis=%d", - src.to_string().c_str(), index.to_string().c_str(), - dst.to_string().c_str(), m_param.axis)); + return ssprintf( + "bad layout for IndexingOneHot: " + "src=%s index=%s dst=%s axis=%d", + src.to_string().c_str(), index.to_string().c_str(), + dst.to_string().c_str(), m_param.axis); }; MEGDNN_MARK_USED_VAR(errmsg); megdnn_assert_eq_dtype(src, dst); diff --git a/dnn/src/common/local/opr_impl.cpp b/dnn/src/common/local/opr_impl.cpp index 579e7254..bad17e03 100644 --- a/dnn/src/common/local/opr_impl.cpp +++ b/dnn/src/common/local/opr_impl.cpp @@ -17,15 +17,13 @@ namespace megdnn { void LocalBase::deduce_layout_fwd(const TensorLayout &src, const TensorLayout &filter, TensorLayout &dst) { - auto errmsg = megdnn_layout_msg(src) + ", " - + megdnn_layout_msg(filter) + ", " - + megdnn_layout_msg(dst) + ", " - + megdnn_mangle("is_xcorr=") - + std::to_string((param().mode == Mode::CROSS_CORRELATION)) + ", " - + megdnn_mangle("pad_h=") + std::to_string(param().pad_h) + ", " - + megdnn_mangle("pad_w=") + std::to_string(param().pad_w) + ", " - + megdnn_mangle("stride_h=") + std::to_string(param().stride_h) + ", " - + megdnn_mangle("stride_w=") + std::to_string(param().stride_w) ; + auto errmsg = megdnn_layout_msg(src) + ", " + megdnn_layout_msg(filter) + + ", " + megdnn_layout_msg(dst) + ", " + "is_xcorr=" + + std::to_string((param().mode == Mode::CROSS_CORRELATION)) + + ", " + "pad_h=" + std::to_string(param().pad_h) + ", " + + "pad_w=" + std::to_string(param().pad_w) + ", " + + "stride_h=" + std::to_string(param().stride_h) + ", " + + "stride_w=" + std::to_string(param().stride_w); auto errmsg_c = errmsg.c_str(); MEGDNN_MARK_USED_VAR(errmsg_c); @@ -77,7 +75,7 @@ void LocalBase::check_layout_fwd(const TensorLayout &src, megdnn_assert(src.dtype == filter.dtype && src.dtype == dst.dtype); megdnn_assert(src.dtype == dtype::Float32() || - MEGDNN_FLOAT16_SELECT(src.dtype == dtype::Float16(), true)); + DNN_FLOAT16_SELECT(src.dtype == dtype::Float16(), true)); } void LocalForward::deduce_layout(const TensorLayout &src, diff --git a/dnn/src/common/local_share/opr_impl.cpp b/dnn/src/common/local_share/opr_impl.cpp index 8616ba78..67185d12 100644 --- a/dnn/src/common/local_share/opr_impl.cpp +++ b/dnn/src/common/local_share/opr_impl.cpp @@ -19,20 +19,17 @@ void LocalShareBase::deduce_layout_fwd(const TensorLayout& src, using Mode = LocalShare::Param::Mode; auto errmsg = megdnn_layout_msg(src) + ", " + megdnn_layout_msg(filter) + ", " + - megdnn_layout_msg(dst) + ", " + megdnn_mangle("is_xcorr=") + + megdnn_layout_msg(dst) + ", " + "is_xcorr=" + std::to_string((param().mode == Mode::CROSS_CORRELATION)) + ", " + - megdnn_mangle("pad_h=") + std::to_string(param().pad_h) + ", " + - megdnn_mangle("pad_w=") + std::to_string(param().pad_w) + ", " + - megdnn_mangle("stride_h=") + std::to_string(param().stride_h) + - ", " + megdnn_mangle("stride_w=") + - std::to_string(param().stride_w) + ", " + - megdnn_mangle("dilate_h=") + std::to_string(param().dilate_h) + - ", " + megdnn_mangle("dilate_w=") + - std::to_string(param().dilate_w) + ", " + - megdnn_mangle("spatial_groups_h=") + - std::to_string(param().spatial_groups_h) + ", " + - megdnn_mangle("spatial_groups_w=") + - std::to_string(param().spatial_groups_w); + "pad_h=" + std::to_string(param().pad_h) + ", " + + "pad_w=" + std::to_string(param().pad_w) + ", " + + "stride_h=" + std::to_string(param().stride_h) + ", " + + "stride_w=" + std::to_string(param().stride_w) + ", " + + "dilate_h=" + std::to_string(param().dilate_h) + ", " + + "dilate_w=" + std::to_string(param().dilate_w) + ", " + + "spatial_groups_h=" + std::to_string(param().spatial_groups_h) + + ", " + + "spatial_groups_w=" + std::to_string(param().spatial_groups_w); auto errmsg_c = errmsg.c_str(); MEGDNN_MARK_USED_VAR(errmsg_c); @@ -118,20 +115,17 @@ void LocalShareBackwardData::deduce_layout(const TensorLayout& filter, using Mode = LocalShare::Param::Mode; auto errmsg = megdnn_layout_msg(filter) + ", " + megdnn_layout_msg(diff) + ", " + - megdnn_layout_msg(grad) + ", " + megdnn_mangle("is_xcorr=") + + megdnn_layout_msg(grad) + ", " + "is_xcorr=" + std::to_string((param().mode == Mode::CROSS_CORRELATION)) + ", " + - megdnn_mangle("pad_h=") + std::to_string(param().pad_h) + ", " + - megdnn_mangle("pad_w=") + std::to_string(param().pad_w) + ", " + - megdnn_mangle("stride_h=") + std::to_string(param().stride_h) + - ", " + megdnn_mangle("stride_w=") + - std::to_string(param().stride_w) + ", " + - megdnn_mangle("dilate_h=") + std::to_string(param().dilate_h) + - ", " + megdnn_mangle("dilate_w=") + - std::to_string(param().dilate_w) + ", " + - megdnn_mangle("spatial_groups_h=") + - std::to_string(param().spatial_groups_h) + ", " + - megdnn_mangle("spatial_groups_w=") + - std::to_string(param().spatial_groups_w); + "pad_h=" + std::to_string(param().pad_h) + ", " + + "pad_w=" + std::to_string(param().pad_w) + ", " + + "stride_h=" + std::to_string(param().stride_h) + ", " + + "stride_w=" + std::to_string(param().stride_w) + ", " + + "dilate_h=" + std::to_string(param().dilate_h) + ", " + + "dilate_w=" + std::to_string(param().dilate_w) + ", " + + "spatial_groups_h=" + std::to_string(param().spatial_groups_h) + + ", " + + "spatial_groups_w=" + std::to_string(param().spatial_groups_w); auto errmsg_c = errmsg.c_str(); MEGDNN_MARK_USED_VAR(errmsg_c); diff --git a/dnn/src/common/matrix_inverse.cpp b/dnn/src/common/matrix_inverse.cpp index 4d48b5bd..e6cbb30d 100644 --- a/dnn/src/common/matrix_inverse.cpp +++ b/dnn/src/common/matrix_inverse.cpp @@ -34,10 +34,9 @@ void MatrixInverse::canonize_params(const TensorLayout& layout, size_t* batch, layout[layout.ndim - 2] == layout[layout.ndim - 1], "invalid MatrixInverse layout: %s", layout.to_string().c_str()); - megdnn_assert( - MEGDNN_FLOAT16_SELECT(layout.dtype == dtype::Float16(), false) || - layout.dtype == dtype::Float32(), - "MatrixInverse only supports f16 & f32"); + megdnn_assert(DNN_FLOAT16_SELECT(layout.dtype == dtype::Float16(), false) || + layout.dtype == dtype::Float32(), + "MatrixInverse only supports f16 & f32"); if (batch) { *batch = 1; for (size_t i = 0; i < layout.ndim - 2; ++i) { diff --git a/dnn/src/common/matrix_mul.cpp b/dnn/src/common/matrix_mul.cpp index 1cd2a9ea..04490bb9 100644 --- a/dnn/src/common/matrix_mul.cpp +++ b/dnn/src/common/matrix_mul.cpp @@ -100,15 +100,15 @@ void MatrixMulForward::check_exec(const TensorLayout& A, const TensorLayout& B, size_t workspace_in_bytes) { auto errmsg = [&]() { std::string msg; - msg.append(megdnn_mangle("A=")); + msg.append("A="); msg.append(A.to_string()); - msg.append(megdnn_mangle(", B=")); + msg.append(", B="); msg.append(B.to_string()); - msg.append(megdnn_mangle(", C=")); + msg.append(", C="); msg.append(C.to_string()); - msg.append(megdnn_mangle(", transposeA=")); + msg.append(", transposeA="); msg.append(std::to_string(param().transposeA)); - msg.append(megdnn_mangle(", transposeB=")); + msg.append(", transposeB="); msg.append(std::to_string(param().transposeB)); return msg; }; @@ -175,7 +175,7 @@ void MatrixMulForward::check_exec(const TensorLayout& A, const TensorLayout& B, megdnn_assert(C.dtype.enumv() == DTypeEnum::QuantizedS16); } megdnn_assert(param().compute_mode != - Param::ComputeMode::FLOAT32 MEGDNN_INC_FLOAT16( + Param::ComputeMode::FLOAT32 DNN_INC_FLOAT16( || A.dtype == dtype::Float16() || A.dtype == dtype::BFloat16()), "ComputeMode::FLOAT32 is only available for Float16/BFloat16 " @@ -195,7 +195,7 @@ size_t MatrixMulForward::pack_size(const Param::Format format) { case Param::Format::MK8: return 8; default: - megdnn_throw(megdnn_mangle("Unknown matmul format.")); + megdnn_throw("Unknown matmul format."); } } diff --git a/dnn/src/common/megcore/cpu/api.cpp b/dnn/src/common/megcore/cpu/api.cpp index cc814e33..f8dee8d9 100644 --- a/dnn/src/common/megcore/cpu/api.cpp +++ b/dnn/src/common/megcore/cpu/api.cpp @@ -40,7 +40,8 @@ CPUDispatcher* megcoreGetCPUDispatcher(megcoreComputingHandle_t handle) { megcoreDeviceHandle_t dev_handle = H->content->dev_handle(); megcorePlatform_t platform; megcoreGetPlatform(dev_handle, &platform); - megdnn_assert(platform &megcorePlatformCPU); + megdnn_throw_if(!(platform & megcorePlatformCPU), megdnn_error, + "can not be default ComputingContext"); auto context = static_cast( H->content.get()); return context->get_dispatcher(); diff --git a/dnn/src/common/megcore/cpu/default_computing_context.cpp b/dnn/src/common/megcore/cpu/default_computing_context.cpp index 65d35746..696d5ded 100644 --- a/dnn/src/common/megcore/cpu/default_computing_context.cpp +++ b/dnn/src/common/megcore/cpu/default_computing_context.cpp @@ -41,7 +41,8 @@ DefaultComputingContext::DefaultComputingContext( { megcorePlatform_t platform; megcoreGetPlatform(dev_handle, &platform); - megdnn_assert(platform & megcorePlatformCPU); + megdnn_throw_if(!(platform & megcorePlatformCPU), megdnn_error, + "can not be default ComputingContext"); } DefaultComputingContext::~DefaultComputingContext() noexcept = default; diff --git a/dnn/src/common/megcore/public_api/misc.cpp b/dnn/src/common/megcore/public_api/misc.cpp index 9c199cc8..2d79dc73 100644 --- a/dnn/src/common/megcore/public_api/misc.cpp +++ b/dnn/src/common/megcore/public_api/misc.cpp @@ -13,7 +13,7 @@ const char *megcoreGetErrorName(megcoreStatus_t status) { -#define CASE(x) case x: return megdnn_mangle(#x) +#define CASE(x) case x: return (#x) switch (status) { CASE(megcoreSuccess); CASE(megcoreErrorMemoryAllocation); @@ -22,7 +22,7 @@ const char *megcoreGetErrorName(megcoreStatus_t status) CASE(megcoreErrorInternalError); CASE(megcoreErrorInvalidComputingHandle); default: - return megdnn_mangle(""); + return ""; } #undef CASE } diff --git a/dnn/src/common/pooling.cpp b/dnn/src/common/pooling.cpp index c2cd9531..597fc30c 100644 --- a/dnn/src/common/pooling.cpp +++ b/dnn/src/common/pooling.cpp @@ -19,18 +19,15 @@ void PoolingBase::deduce_layout_fwd(const TensorLayout& src, TensorLayout& dst) { auto errmsg = megdnn_layout_msg(src) + ", " + megdnn_layout_msg(dst) + ", " + - megdnn_mangle("pad_h=") + std::to_string(param().pad_h) + ", " + - megdnn_mangle("pad_w=") + std::to_string(param().pad_w) + ", " + - megdnn_mangle("stride_h=") + std::to_string(param().stride_h) + - ", " + megdnn_mangle("stride_w=") + - std::to_string(param().stride_w) + ", " + - megdnn_mangle("window_h=") + std::to_string(param().window_h) + - ", " + megdnn_mangle("window_w=") + - std::to_string(param().window_w) + ", " + megdnn_mangle("is_max=") + - std::to_string(param().mode == Mode::MAX) + ", " + - megdnn_mangle("is_nhwc=") + - std::to_string(param().format == Param::Format::NHWC) + ", " + - megdnn_mangle("is_nhwcd4=") + + "pad_h=" + std::to_string(param().pad_h) + ", " + + "pad_w=" + std::to_string(param().pad_w) + ", " + + "stride_h=" + std::to_string(param().stride_h) + ", " + + "stride_w=" + std::to_string(param().stride_w) + ", " + + "window_h=" + std::to_string(param().window_h) + ", " + + "window_w=" + std::to_string(param().window_w) + ", " + + "is_max=" + std::to_string(param().mode == Mode::MAX) + ", " + + "is_nhwc=" + std::to_string(param().format == Param::Format::NHWC) + + ", " + "is_nhwcd4=" + std::to_string(param().format == Param::Format::NHWCD4); auto errmsg_c = errmsg.c_str(); diff --git a/dnn/src/common/relayout_format.cpp b/dnn/src/common/relayout_format.cpp index 645e6bac..0a63fe0e 100644 --- a/dnn/src/common/relayout_format.cpp +++ b/dnn/src/common/relayout_format.cpp @@ -361,11 +361,18 @@ void RelayoutFormat::deduce_format(TensorFormat src, TensorFormat& dst) { if (!dst.is_default() && ( handle()->type() != Handle::HandleType::NAIVE)) { +#if MEGDNN_ENABLE_MANGLING + megdnn_throw( + "Only naive and opencl handle support " + "Image2DPack4TensorFormat, try build with debug for get more " + "info"); +#else megdnn_throw( "Only naive and opencl handle support " "Image2DPack4TensorFormat, try to export MGB_USE_MEGDNN_DBG=2 " "and also export CUDA_VISIBLE_DEVICES=\'\' at CUDA env" "to enable naive handle"); +#endif } #undef CHECK_SRC } diff --git a/dnn/src/common/remap.cpp b/dnn/src/common/remap.cpp index fdb0f704..218f420c 100644 --- a/dnn/src/common/remap.cpp +++ b/dnn/src/common/remap.cpp @@ -69,8 +69,8 @@ void RemapBase::check_layout_fwd(const TensorLayout& src, "%s", errmsg().c_str()); } else { megdnn_throw( - "megdnn currently do not support other param.format except " - "NHWC and NCHW"); + "currently do not support other param.format except NHWC and " + "NCHW"); } } @@ -91,7 +91,7 @@ void RemapBackwardData::check_exec(const TensorLayout& map_xy, const TensorLayout& grad, size_t workspace_in_bytes) { check_layout_fwd(grad, map_xy, diff); - megdnn_assert(grad.dtype == dtype::Float32() MEGDNN_INC_FLOAT16( + megdnn_assert(grad.dtype == dtype::Float32() DNN_INC_FLOAT16( || grad.dtype == dtype::BFloat16()), "Backward Remap only supports Float32/BFloat16."); auto required_workspace_in_bytes = @@ -106,7 +106,7 @@ void RemapBackwardMat::check_exec(const TensorLayout& src, size_t workspace_in_bytes) { check_layout_fwd(src, map_xy, diff); megdnn_assert_eq_layout(map_xy, grad); - megdnn_assert(grad.dtype == dtype::Float32() MEGDNN_INC_FLOAT16( + megdnn_assert(grad.dtype == dtype::Float32() DNN_INC_FLOAT16( || grad.dtype == dtype::BFloat16()), "Backward Remap only supports Float32/BFloat16."); auto required_workspace_in_bytes = diff --git a/dnn/src/common/separableConv.cpp b/dnn/src/common/separableConv.cpp index 313f4c59..f22832c6 100644 --- a/dnn/src/common/separableConv.cpp +++ b/dnn/src/common/separableConv.cpp @@ -20,17 +20,15 @@ void SeparableConvBase::deduce_layout_fwd(const TensorLayout &src, TensorLayout &dst) { auto errmsg = [&]() { - return megdnn_layout_msg(src) + ", " - + megdnn_layout_msg(filter_x) + ", " - + megdnn_layout_msg(dst) + ", " - + megdnn_mangle("is_xcorr=") - + megdnn_mangle("borderMode=") - + std::to_string((param().mode == Mode::CROSS_CORRELATION)) + ", " - + std::to_string((int)(param().borderMode)) + ", " - + megdnn_mangle("pad_h=") + std::to_string(param().pad_h) + ", " - + megdnn_mangle("pad_w=") + std::to_string(param().pad_w) + ", " - + megdnn_mangle("stride_h=") + std::to_string(param().stride_h) + ", " - + megdnn_mangle("stride_w=") + std::to_string(param().stride_w); + return megdnn_layout_msg(src) + ", " + megdnn_layout_msg(filter_x) + + ", " + megdnn_layout_msg(dst) + ", " + + "is_xcorr=" + "borderMode=" + + std::to_string((param().mode == Mode::CROSS_CORRELATION)) + + ", " + std::to_string((int)(param().borderMode)) + ", " + + "pad_h=" + std::to_string(param().pad_h) + ", " + + "pad_w=" + std::to_string(param().pad_w) + ", " + + "stride_h=" + std::to_string(param().stride_h) + ", " + + "stride_w=" + std::to_string(param().stride_w); }; MEGDNN_MARK_USED_VAR(errmsg); megdnn_assert_contiguous(src); diff --git a/dnn/src/common/separableFilter.cpp b/dnn/src/common/separableFilter.cpp index 1ca152c8..d2c1adc9 100644 --- a/dnn/src/common/separableFilter.cpp +++ b/dnn/src/common/separableFilter.cpp @@ -21,14 +21,11 @@ void SeparableFilterBase::deduce_layout_fwd(const TensorLayout& src, auto errmsg = [&]() { return megdnn_layout_msg(src) + ", " + megdnn_layout_msg(filter_x) + ", " + megdnn_layout_msg(dst) + ", " + - megdnn_mangle("borderMode=") + - std::to_string((int)(param().borderMode)) + ", " + - megdnn_mangle("ksize_h=") + std::to_string(param().ksize_h) + - ", " + megdnn_mangle("ksize_w=") + - std::to_string(param().ksize_w) + ", " + - megdnn_mangle("anchor_h=") + std::to_string(param().anchor_h) + - ", " + megdnn_mangle("anchor_w=") + - std::to_string(param().anchor_w); + "borderMode=" + std::to_string((int)(param().borderMode)) + + ", " + "ksize_h=" + std::to_string(param().ksize_h) + ", " + + "ksize_w=" + std::to_string(param().ksize_w) + ", " + + "anchor_h=" + std::to_string(param().anchor_h) + ", " + + "anchor_w=" + std::to_string(param().anchor_w); }; MEGDNN_MARK_USED_VAR(errmsg); megdnn_assert_contiguous(src); diff --git a/dnn/src/common/utils.cpp b/dnn/src/common/utils.cpp index 00c48349..7362550f 100644 --- a/dnn/src/common/utils.cpp +++ b/dnn/src/common/utils.cpp @@ -81,21 +81,21 @@ bool megdnn::get_next_addr(size_t* idx, const size_t* shp, size_t n, size_t stride) { auto errmsg = [&]() { std::string res; - res.append(megdnn_mangle("idx={")); + res.append("idx={"); for (size_t i = 0; i < n; ++i) { res.append(std::to_string(idx[i])); if (i + 1 < n) - res.append(megdnn_mangle(",")); + res.append(","); } - res.append(megdnn_mangle("}, shp={")); + res.append("}, shp={"); for (size_t i = 0; i < n; ++i) { res.append(std::to_string(shp[i])); if (i + 1 < n) - res.append(megdnn_mangle(",")); + res.append(","); } - res.append(megdnn_mangle("}, n=")); + res.append("}, n="); res.append(std::to_string(n)); - res.append(megdnn_mangle(", stride=")); + res.append(", stride="); res.append(std::to_string(stride)); return res; }; diff --git a/dnn/src/common/utils.cuh b/dnn/src/common/utils.cuh index 9408d2bf..12deba34 100644 --- a/dnn/src/common/utils.cuh +++ b/dnn/src/common/utils.cuh @@ -13,43 +13,55 @@ #include "megdnn/arch.h" //! a comma to be used in macro for template params -#define MEGDNN_COMMA , +#define MEGDNN_COMMA , #define MEGDNN_MARK_USED_VAR(v) static_cast(v) -#if MEGDNN_ENABLE_MANGLING -#define megdnn_mangle(x) ("") +#if MEGDNN_ENABLE_LOGGING +#define megdnn_message_strip(x) (x) #else -#define megdnn_mangle(x) (x) -#endif // MEGDNN_ENABLE_MANGLING +#define megdnn_message_strip(x) ("") +#endif // MEGDNN_ENABLE_LOGGING -#define megdnn_throw(msg) ::megdnn::ErrorHandler::on_megdnn_error( \ - megdnn_mangle(msg)) -#define megdnn_throw_if(cond, err_type, msg) do { \ - if (megdnn_unlikely(cond)) { \ - ::megdnn::ErrorHandler::on_##err_type(megdnn_mangle(msg)); \ - } \ -} while(0) +#define megdnn_throw(msg) \ + ::megdnn::ErrorHandler::on_megdnn_error(megdnn_message_strip(msg)) +#define megdnn_throw_if(cond, err_type, msg) \ + do { \ + if (megdnn_unlikely(cond)) { \ + ::megdnn::ErrorHandler::on_##err_type(megdnn_message_strip(msg)); \ + } \ + } while (0) //! megdnn_assert +#if MEGDNN_ENABLE_LOGGING #if MEGDNN_ENABLE_MANGLING -#define megdnn_assert(expr, ...) \ - do { \ - if (megdnn_unlikely(!(expr))) { \ - ::megdnn::__assert_fail__(NULL, 0, NULL, NULL, NULL); \ - } \ +#define megdnn_assert(expr, ...) \ + do { \ + if (megdnn_unlikely(!(expr))) { \ + ::megdnn::__assert_fail__( \ + "about location info, please build with debug", __LINE__, \ + NULL, #expr, ##__VA_ARGS__); \ + } \ } while (0) #else -#define megdnn_assert(expr, ...) \ - do { \ - if (megdnn_unlikely(!(expr))) { \ - ::megdnn::__assert_fail__(__FILE__, __LINE__, \ - __PRETTY_FUNCTION__, # expr, ## __VA_ARGS__); \ - } \ +#define megdnn_assert(expr, ...) \ + do { \ + if (megdnn_unlikely(!(expr))) { \ + ::megdnn::__assert_fail__(__FILE__, __LINE__, __PRETTY_FUNCTION__, \ + #expr, ##__VA_ARGS__); \ + } \ + } while (0) +#endif // MEGDNN_ENABLE_MANGLING +#else +#define megdnn_assert(expr, ...) \ + do { \ + if (megdnn_unlikely(!(expr))) { \ + ::megdnn::__assert_fail__(NULL, 0, NULL, NULL, NULL); \ + } \ } while (0) -#endif // MEGDNN_ENABLE_MANGLING +#endif // MEGDNN_ENABLE_LOGGING -#define megdnn_assert_internal(expr) \ - do { \ +#define megdnn_assert_internal(expr) \ + do { \ megdnn_assert(expr, "Impossible: internal error."); \ } while (0) diff --git a/dnn/src/common/utils.h b/dnn/src/common/utils.h index a0309aed..1f306398 100644 --- a/dnn/src/common/utils.h +++ b/dnn/src/common/utils.h @@ -116,7 +116,7 @@ } while (0) #define megdnn_layout_msg(layout) \ - std::string(megdnn_mangle(#layout "=" + (layout).to_string())) + std::string(#layout "=" + (layout).to_string()) #define MEGDNN_LOCK_GUARD(var) \ std::lock_guard> _lock_guard_##var { var } @@ -124,6 +124,16 @@ namespace megdnn { /* ================ logging ================ */ +#if MEGDNN_ENABLE_MANGLING +#define megdnn_log_debug(fmt...) \ + _megdnn_do_log(::megdnn::LogLevel::DEBUG, "", "", __LINE__, fmt) +#define megdnn_log(fmt...) \ + _megdnn_do_log(::megdnn::LogLevel::INFO, "", "", __LINE__, fmt) +#define megdnn_log_warn(fmt...) \ + _megdnn_do_log(::megdnn::LogLevel::WARN, "", "", __LINE__, fmt) +#define megdnn_log_error(fmt...) \ + _megdnn_do_log(::megdnn::LogLevel::ERROR, "", "", __LINE__, fmt) +#else #define megdnn_log_debug(fmt...) \ _megdnn_do_log(::megdnn::LogLevel::DEBUG, __FILE__, __func__, __LINE__, fmt) #define megdnn_log(fmt...) \ @@ -132,6 +142,7 @@ namespace megdnn { _megdnn_do_log(::megdnn::LogLevel::WARN, __FILE__, __func__, __LINE__, fmt) #define megdnn_log_error(fmt...) \ _megdnn_do_log(::megdnn::LogLevel::ERROR, __FILE__, __func__, __LINE__, fmt) +#endif #if MEGDNN_ENABLE_LOGGING void __log__(LogLevel level, const char* file, const char* func, int line, diff --git a/dnn/src/common/warp_affine.cpp b/dnn/src/common/warp_affine.cpp index 6aa9b5bd..48bd648d 100644 --- a/dnn/src/common/warp_affine.cpp +++ b/dnn/src/common/warp_affine.cpp @@ -34,7 +34,7 @@ void WarpAffineBase::check_layout_fwd(const TensorLayout& src, megdnn_assert(src.ndim == 4_z, "%s", errmsg().c_str()); megdnn_assert(dst.ndim == 4_z, "%s", errmsg().c_str()); megdnn_assert(src.dtype.enumv() == DTypeEnum::Float32 || - MEGDNN_FLOAT16_SELECT( + DNN_FLOAT16_SELECT( src.dtype.enumv() == DTypeEnum::Float16, false) || src.dtype.enumv() == DTypeEnum::Int8 || @@ -42,7 +42,7 @@ void WarpAffineBase::check_layout_fwd(const TensorLayout& src, (src.dtype.enumv() == DTypeEnum::QuantizedS8 || src.dtype.enumv() == DTypeEnum::Quantized8Asymm), "WarpAffine NCHW input dtype should be " - "Float32/Int8/Uint8/QInt8/QUint8" MEGDNN_FLOAT16_SELECT( + "Float32/Int8/Uint8/QInt8/QUint8" DNN_FLOAT16_SELECT( "/Float16", "") "."); megdnn_assert( (src.dtype.category() == DTypeCategory::FLOAT && @@ -95,46 +95,46 @@ void WarpAffine::check_exec(const TensorLayout& src, const TensorLayout& mat, std::string WarpAffineBase::param_msg() const { std::string res; - res.append(megdnn_mangle("imode=")); + res.append("imode="); switch (param().imode) { case InterpolationMode::NEAREST: - res.append(megdnn_mangle("NEAREST")); + res.append("NEAREST"); break; case InterpolationMode::LINEAR: - res.append(megdnn_mangle("LINEAR")); + res.append("LINEAR"); break; case InterpolationMode::AREA: - res.append(megdnn_mangle("AREA")); + res.append("AREA"); break; case InterpolationMode::CUBIC: - res.append(megdnn_mangle("CUBIC")); + res.append("CUBIC"); break; case InterpolationMode::LANCZOS4: - res.append(megdnn_mangle("LANCZOS4")); + res.append("LANCZOS4"); break; } - res.append(megdnn_mangle("bmode=")); + res.append("bmode="); switch (param().border_mode) { case BorderMode::WRAP: - res.append(megdnn_mangle("WRAP")); + res.append("WRAP"); break; case BorderMode::CONSTANT: - res.append(megdnn_mangle("CONSTANT")); + res.append("CONSTANT"); break; case BorderMode::REFLECT: - res.append(megdnn_mangle("REFLECT")); + res.append("REFLECT"); break; case BorderMode::REFLECT_101: - res.append(megdnn_mangle("REFLECT_101")); + res.append("REFLECT_101"); break; case BorderMode::REPLICATE: - res.append(megdnn_mangle("REPLICATE")); + res.append("REPLICATE"); break; case BorderMode::TRANSPARENT: - res.append(megdnn_mangle("TRANSPARENT")); + res.append("TRANSPARENT"); break; case BorderMode::ISOLATED: - res.append(megdnn_mangle("ISOLATED")); + res.append("ISOLATED"); break; } if (param().border_mode == BorderMode::CONSTANT) { diff --git a/dnn/src/common/warp_perspective.cpp b/dnn/src/common/warp_perspective.cpp index 9ec8cc63..4c18b2b5 100644 --- a/dnn/src/common/warp_perspective.cpp +++ b/dnn/src/common/warp_perspective.cpp @@ -64,7 +64,7 @@ void WarpPerspectiveBase::check_layout_fwd(const TensorLayout& src, if (param().format == param::WarpPerspective::Format::NCHW) { megdnn_assert( src.dtype.enumv() == DTypeEnum::Float32 || - MEGDNN_FLOAT16_SELECT( + DNN_FLOAT16_SELECT( (src.dtype.enumv() == DTypeEnum::Float16 || src.dtype.enumv() == DTypeEnum::BFloat16), false) || @@ -73,7 +73,7 @@ void WarpPerspectiveBase::check_layout_fwd(const TensorLayout& src, (src.dtype.enumv() == DTypeEnum::QuantizedS8 || src.dtype.enumv() == DTypeEnum::Quantized8Asymm), "WarpPerspective NCHW input dtype should be " - "Float32/Int8/Uint8/QInt8/QUint8" MEGDNN_FLOAT16_SELECT( + "Float32/Int8/Uint8/QInt8/QUint8" DNN_FLOAT16_SELECT( "/Float16/BFloat16", "") "."); megdnn_assert( (src.dtype.category() == DTypeCategory::FLOAT && @@ -120,14 +120,13 @@ void WarpPerspectiveBase::check_layout_fwd(const TensorLayout& src, param::WarpPerspective::Format::NHWCD4); megdnn_assert( src.dtype == dtype::Float32() || - MEGDNN_FLOAT16_SELECT( - (src.dtype == dtype::Float16() || - src.dtype == dtype::BFloat16()), - false) || + DNN_FLOAT16_SELECT((src.dtype == dtype::Float16() || + src.dtype == dtype::BFloat16()), + false) || src.dtype.enumv() == DTypeEnum::QuantizedS8 || src.dtype.enumv() == DTypeEnum::Quantized8Asymm, "WarpPerspective NHWCD4 input dtype should be " - "Float32" MEGDNN_FLOAT16_SELECT( + "Float32" DNN_FLOAT16_SELECT( "/Float16/BFloat16", "") ",QunatizedS8, Quantized8Asymm."); megdnn_assert( @@ -189,46 +188,46 @@ void WarpPerspectiveBase::check_layout_fwd(const TensorLayout& src, std::string WarpPerspectiveBase::param_msg() const { std::string res; - res.append(megdnn_mangle("imode=")); + res.append("imode="); switch (param().imode) { case InterpolationMode::NEAREST: - res.append(megdnn_mangle("NEAREST")); + res.append("NEAREST"); break; case InterpolationMode::LINEAR: - res.append(megdnn_mangle("LINEAR")); + res.append("LINEAR"); break; case InterpolationMode::AREA: - res.append(megdnn_mangle("AREA")); + res.append("AREA"); break; case InterpolationMode::CUBIC: - res.append(megdnn_mangle("CUBIC")); + res.append("CUBIC"); break; case InterpolationMode::LANCZOS4: - res.append(megdnn_mangle("LANCZOS4")); + res.append("LANCZOS4"); break; } - res.append(megdnn_mangle("bmode=")); + res.append("bmode="); switch (param().bmode) { case BorderMode::WRAP: - res.append(megdnn_mangle("WRAP")); + res.append("WRAP"); break; case BorderMode::CONSTANT: - res.append(megdnn_mangle("CONSTANT")); + res.append("CONSTANT"); break; case BorderMode::REFLECT: - res.append(megdnn_mangle("REFLECT")); + res.append("REFLECT"); break; case BorderMode::REFLECT_101: - res.append(megdnn_mangle("REFLECT_101")); + res.append("REFLECT_101"); break; case BorderMode::REPLICATE: - res.append(megdnn_mangle("REPLICATE")); + res.append("REPLICATE"); break; case BorderMode::TRANSPARENT: - res.append(megdnn_mangle("TRANSPARENT")); + res.append("TRANSPARENT"); break; case BorderMode::ISOLATED: - res.append(megdnn_mangle("ISOLATED")); + res.append("ISOLATED"); break; } if (param().bmode == BorderMode::CONSTANT) { @@ -301,7 +300,7 @@ void WarpPerspectiveBackwardData::check_exec(const TensorLayout& mat, const TensorLayout& grad, size_t workspace_in_bytes) { check_layout_fwd(grad, mat, mat_idx, diff); - megdnn_assert(grad.dtype == dtype::Float32() MEGDNN_INC_FLOAT16( + megdnn_assert(grad.dtype == dtype::Float32() DNN_INC_FLOAT16( || grad.dtype == dtype::BFloat16()), "Backward WarpPerspective only supports Float32/BFloat16."); auto required_workspace_in_bytes = @@ -317,7 +316,7 @@ void WarpPerspectiveBackwardMat::check_exec(const TensorLayout& src, size_t workspace_in_bytes) { check_layout_fwd(src, mat, mat_idx, diff); megdnn_assert_eq_layout(mat, grad); - megdnn_assert(grad.dtype == dtype::Float32() MEGDNN_INC_FLOAT16( + megdnn_assert(grad.dtype == dtype::Float32() DNN_INC_FLOAT16( || grad.dtype == dtype::BFloat16()), "Backward WarpPerspective only supports Float32/BFloat16."); auto required_workspace_in_bytes = diff --git a/dnn/src/common/winograd/winograd_helper.cpp b/dnn/src/common/winograd/winograd_helper.cpp index beef9151..9d96d373 100644 --- a/dnn/src/common/winograd/winograd_helper.cpp +++ b/dnn/src/common/winograd/winograd_helper.cpp @@ -353,7 +353,7 @@ void StrategyHelper< _output_compute_type>; INST(float, float, float, float) -MEGDNN_INC_FLOAT16(INST(dt_float16, dt_float16, dt_float16, dt_float16)) +DNN_INC_FLOAT16(INST(dt_float16, dt_float16, dt_float16, dt_float16)) INST(int8_t, int8_t, int16_t, int) INST(uint8_t, uint8_t, int16_t, int) #undef INST @@ -376,7 +376,7 @@ INST(int8_t, int8_t, float, float, param::ConvBias::Format::NCHW44) INST(int8_t, int8_t, int16_t, int, param::ConvBias::Format::NCHW) INST(int8_t, int8_t, int16_t, int, param::ConvBias::Format::NCHW44) INST(float, float, float, float, param::ConvBias::Format::NCHW88) -MEGDNN_INC_FLOAT16(INST(dt_float16, dt_float16, dt_float16, dt_float16, +DNN_INC_FLOAT16(INST(dt_float16, dt_float16, dt_float16, dt_float16, param::ConvBias::Format::NCHW)) #undef INST } // namespace winograd diff --git a/dnn/src/cuda/add_update/opr_impl.cpp b/dnn/src/cuda/add_update/opr_impl.cpp index 17ea51d1..5341d20b 100644 --- a/dnn/src/cuda/add_update/opr_impl.cpp +++ b/dnn/src/cuda/add_update/opr_impl.cpp @@ -39,7 +39,7 @@ void AddUpdateForwardImpl::exec( #undef cb default: - megdnn_throw(megdnn_mangle("unsupported dtype for AddUpdate")); + megdnn_throw("unsupported dtype for AddUpdate"); } } @@ -59,7 +59,7 @@ void AddUpdateForwardImpl::exec_noncontig( #undef cb default: - megdnn_throw(megdnn_mangle("unsupported dtype for AddUpdate")); + megdnn_throw("unsupported dtype for AddUpdate"); } } diff --git a/dnn/src/cuda/batch_conv_bias/algo.cpp b/dnn/src/cuda/batch_conv_bias/algo.cpp index de2f83a8..91425147 100644 --- a/dnn/src/cuda/batch_conv_bias/algo.cpp +++ b/dnn/src/cuda/batch_conv_bias/algo.cpp @@ -55,7 +55,7 @@ BatchConvBiasForwardImpl::AlgoBase::ExecArgs::ExecArgs( std::string BatchConvBiasForwardImpl::AlgoBase::SizeArgs::to_string() const { auto&& param = opr->param(); MEGDNN_MARK_USED_VAR(param); - return megdnn_mangle(ssprintf( + return ssprintf( "src=%s, filter=%s, bias=%s, z=%s, dst=%s, " "pad=%ux%u, stride=%ux%u, dilate=%ux%u, xcorr=%d, " "dtype=(%s(src),%s(flt),%s(bias),%s(z))->(%s(dst))", @@ -65,7 +65,7 @@ std::string BatchConvBiasForwardImpl::AlgoBase::SizeArgs::to_string() const { param.stride_h, param.stride_w, param.dilate_h, param.dilate_w, static_cast(param.mode), src_layout.dtype.name(), filter_layout.dtype.name(), bias_layout.dtype.name(), - z_layout.dtype.name(), dst_layout.dtype.name())); + z_layout.dtype.name(), dst_layout.dtype.name()); } // vim: syntax=cpp.doxygen diff --git a/dnn/src/cuda/batch_conv_bias/opr_impl.cpp b/dnn/src/cuda/batch_conv_bias/opr_impl.cpp index 7a3ccc7e..760adf94 100644 --- a/dnn/src/cuda/batch_conv_bias/opr_impl.cpp +++ b/dnn/src/cuda/batch_conv_bias/opr_impl.cpp @@ -32,11 +32,11 @@ BatchConvBiasForwardImpl::get_algorithm_heuristic( args, reproducible, workspace_limit_in_bytes)) { return &sm_algo_pack.int8_nchw4_implicit_gemm_dotprod; } - megdnn_throw(megdnn_mangle( + megdnn_throw( ssprintf("no %s batch conv bias algorithm with args(%s) and " "workspace limit (%zu bytes)", reproducible ? "reproducible" : "usable", - args.to_string().c_str(), workspace_limit_in_bytes))); + args.to_string().c_str(), workspace_limit_in_bytes)); } std::vector diff --git a/dnn/src/cuda/batch_normalization/opr_impl.cpp b/dnn/src/cuda/batch_normalization/opr_impl.cpp index fff77f26..4a6551a0 100644 --- a/dnn/src/cuda/batch_normalization/opr_impl.cpp +++ b/dnn/src/cuda/batch_normalization/opr_impl.cpp @@ -35,8 +35,7 @@ void BNTensorDescHolder::setup(const TensorLayout& x, bn_mode = CUDNN_BATCHNORM_SPATIAL; break; default: - megdnn_throw(megdnn_mangle( - "Unknown param dim type of batch normalization.")); + megdnn_throw("Unknown param dim type of batch normalization."); } xy_desc.set(TensorLayout(xy_shape, x.dtype)); param_desc.set(xy_desc.desc, bn_mode); @@ -83,8 +82,7 @@ void BNForwardImpl::exec(_megdnn_tensor_in src, _megdnn_tensor_in bn_scale, m_param.epsilon)); break; default: - megdnn_throw(megdnn_mangle( - "Unknown forward mode type of batch normalization.")); + megdnn_throw("Unknown forward mode type of batch normalization."); } } diff --git a/dnn/src/cuda/batched_matrix_mul/algo.cpp b/dnn/src/cuda/batched_matrix_mul/algo.cpp index cd59ed84..3f3994de 100644 --- a/dnn/src/cuda/batched_matrix_mul/algo.cpp +++ b/dnn/src/cuda/batched_matrix_mul/algo.cpp @@ -27,11 +27,11 @@ std::string BatchedMatrixMulForwardImpl::AlgoBase::SizeArgs::to_string() const { MEGDNN_MARK_USED_VAR(m); MEGDNN_MARK_USED_VAR(n); MEGDNN_MARK_USED_VAR(k); - return megdnn_mangle(ssprintf( + return ssprintf( "A={%zux%zu},B={%zux%zu},C={%zux%zu},Transpose A=%d,Transpose " "B=%d,ldA=%zu,ldB=%zu,ldC=%zu", m, k, k, n, m, n, param.transposeA, param.transposeB, - layout_a.stride[0], layout_b.stride[0], layout_c.stride[0])); + layout_a.stride[0], layout_b.stride[0], layout_c.stride[0]); } BatchedMatrixMulForwardImpl::AlgoBase::SizeArgs::SizeArgs( diff --git a/dnn/src/cuda/batched_matrix_mul/cublas_lt.cpp b/dnn/src/cuda/batched_matrix_mul/cublas_lt.cpp index 98e55249..627ff031 100644 --- a/dnn/src/cuda/batched_matrix_mul/cublas_lt.cpp +++ b/dnn/src/cuda/batched_matrix_mul/cublas_lt.cpp @@ -145,8 +145,7 @@ void BatchedMatrixMulForwardImpl::AlgoCublasLt::exec( } else if (desc.dt_compute == CUBLAS_COMPUTE_32F) { batched_sgemm(); } else { - megdnn_throw( - megdnn_mangle("compute_type must be int32/float16/float32")); + megdnn_throw("compute_type must be int32/float16/float32"); } #else if (desc.dt_compute == CUDA_R_32I) { @@ -156,8 +155,7 @@ void BatchedMatrixMulForwardImpl::AlgoCublasLt::exec( } else if (desc.dt_compute == CUDA_R_32F) { batched_sgemm(); } else { - megdnn_throw( - megdnn_mangle("compute_type must be int32/float16/float32")); + megdnn_throw("compute_type must be int32/float16/float32"); } #endif } diff --git a/dnn/src/cuda/conv_bias/algo.cpp b/dnn/src/cuda/conv_bias/algo.cpp index 5367305e..8f251344 100644 --- a/dnn/src/cuda/conv_bias/algo.cpp +++ b/dnn/src/cuda/conv_bias/algo.cpp @@ -163,7 +163,7 @@ std::string ConvBiasForwardImpl::AlgoBase::SizeArgs::to_string() const { default: megdnn_throw("invalid conv bias nonlinear mode"); } - return megdnn_mangle(ssprintf( + return ssprintf( "src=%s, filter=%u{%u,%u,%u,%u}, bias=%s, z=%s, dst=%s, " "pad=%ux%u, stride=%ux%u, dilate=%ux%u, xcorr=%d, dtype=%s,%s, " "nonlinear_mode=%s", @@ -173,7 +173,7 @@ std::string ConvBiasForwardImpl::AlgoBase::SizeArgs::to_string() const { fm.padding[0], fm.padding[1], fm.stride[0], fm.stride[1], fm.dilation[0], fm.dilation[1], !fm.should_flip, src_layout->dtype.name(), dst_layout->dtype.name(), - nonlinear_mode_str.c_str())); + nonlinear_mode_str.c_str()); } void ConvBiasForwardImpl::AlgoPack::fill_cudnn_algos() { @@ -253,9 +253,8 @@ ConvBiasForwardImpl::AlgoPack::cudnn_conv_from_enum( if (i.cudnn_enum() == algo) return &i; } - megdnn_throw( - megdnn_mangle(ssprintf("can not find cudnn conv fwd algorithm %d", - static_cast(algo)))); + megdnn_throw(ssprintf("can not find cudnn conv fwd algorithm %d", + static_cast(algo))); } ConvBiasForwardImpl::AlgoBase* @@ -265,9 +264,8 @@ ConvBiasForwardImpl::AlgoPack::cudnn_conv_bias_act_from_enum( if (i.cudnn_enum() == algo) return &i; } - megdnn_throw(megdnn_mangle( - ssprintf("can not find cudnn conv bias act algorithm %d", - static_cast(algo)))); + megdnn_throw(ssprintf("can not find cudnn conv bias act algorithm %d", + static_cast(algo))); } // vim: syntax=cpp.doxygen diff --git a/dnn/src/cuda/conv_bias/cudnn_conv_bias_activation.cpp b/dnn/src/cuda/conv_bias/cudnn_conv_bias_activation.cpp index 5755a912..99fca489 100644 --- a/dnn/src/cuda/conv_bias/cudnn_conv_bias_activation.cpp +++ b/dnn/src/cuda/conv_bias/cudnn_conv_bias_activation.cpp @@ -104,7 +104,7 @@ bool ConvBiasForwardImpl::AlgoCUDNNConvBiasActivation::is_available( break; return false; default: - megdnn_throw(megdnn_mangle("unsupported NonlineMode")); + megdnn_throw("unsupported NonlineMode"); } size_t workspace_size; auto status = cudnnGetConvolutionForwardWorkspaceSize( @@ -139,7 +139,7 @@ size_t ConvBiasForwardImpl::AlgoCUDNNConvBiasActivation::get_workspace_in_bytes( void ConvBiasForwardImpl::AlgoCUDNNConvBiasActivation::exec( const ExecArgs& args) const { #if CUDNN_MAJOR < 7 - megdnn_throw(megdnn_mangle("ConvBias require cudnn 7.0 or higher")); + megdnn_throw("ConvBias require cudnn 7.0 or higher"); #else megdnn_assert(cudnnGetVersion() >= 7401); CUDNNForwardDescs D; @@ -269,7 +269,7 @@ void ConvBiasForwardImpl::AlgoCUDNNConvBiasActivation::exec( break; } default: - megdnn_throw(megdnn_mangle("unsupported NonlineMode")); + megdnn_throw("unsupported NonlineMode"); } #endif } diff --git a/dnn/src/cuda/conv_bias/helper.cpp b/dnn/src/cuda/conv_bias/helper.cpp index 1f45f170..8bb1d8b3 100644 --- a/dnn/src/cuda/conv_bias/helper.cpp +++ b/dnn/src/cuda/conv_bias/helper.cpp @@ -31,8 +31,8 @@ ConvBiasDesc::~ConvBiasDesc() { void ConvBiasDesc::set_conv_bias(DType data_type, const param::ConvBias& param, size_t nr_group) { #if CUDNN_VERSION < 7100 - megdnn_throw(megdnn_mangle( - "ConvBias(CUDNN_ACTIVATION_IDENTITY) require cudnn 7.1 or higher")); + megdnn_throw( + "ConvBias(CUDNN_ACTIVATION_IDENTITY) require cudnn 7.1 or higher"); #else cudnnConvolutionMode_t mode; using Param = param::ConvBias; @@ -44,7 +44,7 @@ void ConvBiasDesc::set_conv_bias(DType data_type, const param::ConvBias& param, mode = CUDNN_CONVOLUTION; break; default: - megdnn_throw(megdnn_mangle("conv mode must be conv or xcorr.")); + megdnn_throw("conv mode must be conv or xcorr."); } cudnn_check(cudnnSetConvolutionGroupCount(conv_desc, nr_group)); cudnnDataType_t compute_type; @@ -57,7 +57,7 @@ void ConvBiasDesc::set_conv_bias(DType data_type, const param::ConvBias& param, compute_type = CUDNN_DATA_INT32; break; default: - megdnn_throw(megdnn_mangle("unspport data type for conv bias")); + megdnn_throw("unspport data type for conv bias"); } if (data_type.enumv() == DTypeEnum::Float16) { auto comp_mode = param.compute_mode; @@ -81,7 +81,7 @@ void ConvBiasDesc::set_conv_bias(DType data_type, const param::ConvBias& param, 0)); break; default: - megdnn_throw(megdnn_mangle("unsupported non linear mode")); + megdnn_throw("unsupported non linear mode"); } #endif } @@ -98,7 +98,7 @@ void ConvBiasDesc::set_conv(DType data_type, const param::ConvBias& param, mode = CUDNN_CONVOLUTION; break; default: - megdnn_throw(megdnn_mangle("conv mode must be conv or xcorr.")); + megdnn_throw("conv mode must be conv or xcorr."); } cudnnDataType_t compute_type; MEGDNN_MARK_USED_VAR(compute_type); @@ -114,7 +114,7 @@ void ConvBiasDesc::set_conv(DType data_type, const param::ConvBias& param, compute_type = CUDNN_DATA_INT32; #endif } else { - megdnn_throw(megdnn_mangle("unspport data type for conv bias")); + megdnn_throw("unspport data type for conv bias"); } #if CUDNN_MAJOR >= 7 cudnn_check(cudnnSetConvolutionGroupCount(conv_desc, nr_group)); diff --git a/dnn/src/cuda/convolution/backward_data/algo.cpp b/dnn/src/cuda/convolution/backward_data/algo.cpp index 871effc1..f178240b 100644 --- a/dnn/src/cuda/convolution/backward_data/algo.cpp +++ b/dnn/src/cuda/convolution/backward_data/algo.cpp @@ -73,9 +73,8 @@ ConvolutionBackwardDataImpl::AlgoPack::cudnn_from_enum( if (i.cudnn_enum() == algo) return &i; } - megdnn_throw( - megdnn_mangle(ssprintf("can not find cudnn bwd_data algorithm %d", - static_cast(algo)))); + megdnn_throw(ssprintf("can not find cudnn bwd_data algorithm %d", + static_cast(algo))); } ConvolutionBackwardDataImpl::AlgoPack ConvolutionBackwardDataImpl::sm_algo_pack; @@ -110,14 +109,14 @@ ConvolutionBackwardDataImpl::AlgoBase::ExecArgs::ExecArgs( std::string ConvolutionBackwardDataImpl::AlgoBase::SizeArgs::to_string() const { auto&& fm = filter_meta; MEGDNN_MARK_USED_VAR(fm); - return megdnn_mangle(ssprintf( + return ssprintf( "filter=%u{%u,%u,%u,%u}, diff=%s, grad=%s, " "pad=%ux%u, stride=%ux%u, dilate=%ux%u, xcorr=%d, dtype=%s,%s", fm.group, fm.ocpg, fm.icpg, fm.spatial[0], fm.spatial[1], diff_layout->to_string().c_str(), grad_layout->to_string().c_str(), fm.padding[0], fm.padding[1], fm.stride[0], fm.stride[1], fm.dilation[0], fm.dilation[1], !fm.should_flip, - diff_layout->dtype.name(), grad_layout->dtype.name())); + diff_layout->dtype.name(), grad_layout->dtype.name()); } // vim: syntax=cpp.doxygen diff --git a/dnn/src/cuda/convolution/backward_filter/algo.cpp b/dnn/src/cuda/convolution/backward_filter/algo.cpp index f7ea856a..bd367f80 100644 --- a/dnn/src/cuda/convolution/backward_filter/algo.cpp +++ b/dnn/src/cuda/convolution/backward_filter/algo.cpp @@ -60,9 +60,8 @@ ConvolutionBackwardFilterImpl::AlgoPack::cudnn_from_enum( if (i.cudnn_enum() == algo) return &i; } - megdnn_throw(megdnn_mangle(ssprintf( - "can not find cudnn bwd_filter algorithm %d", - static_cast(algo)))); + megdnn_throw(ssprintf("can not find cudnn bwd_filter algorithm %d", + static_cast(algo))); } ConvolutionBackwardFilterImpl::AlgoPack @@ -103,16 +102,14 @@ std::string ConvolutionBackwardFilterImpl::AlgoBase::SizeArgs::to_string() const { auto &&fm = grad_filter_meta; MEGDNN_MARK_USED_VAR(fm); - return megdnn_mangle(ssprintf( - "src=%s diff=%s grad_filter=%u{%u,%u,%u,%u}, " - "pad=%ux%u, stride=%ux%u, dilate=%ux%u, xcorr=%d, dtype=%s,%s", - src_layout->to_string().c_str(), - diff_layout->to_string().c_str(), - fm.group, fm.ocpg, fm.icpg, fm.spatial[0], fm.spatial[1], - fm.padding[0], fm.padding[1], fm.stride[0], fm.stride[1], - fm.dilation[0], fm.dilation[1], - !fm.should_flip, - src_layout->dtype.name(), diff_layout->dtype.name())); + return ssprintf( + "src=%s diff=%s grad_filter=%u{%u,%u,%u,%u}, " + "pad=%ux%u, stride=%ux%u, dilate=%ux%u, xcorr=%d, dtype=%s,%s", + src_layout->to_string().c_str(), diff_layout->to_string().c_str(), + fm.group, fm.ocpg, fm.icpg, fm.spatial[0], fm.spatial[1], + fm.padding[0], fm.padding[1], fm.stride[0], fm.stride[1], + fm.dilation[0], fm.dilation[1], !fm.should_flip, + src_layout->dtype.name(), diff_layout->dtype.name()); } // vim: syntax=cpp.doxygen diff --git a/dnn/src/cuda/convolution/forward/algos.cpp b/dnn/src/cuda/convolution/forward/algos.cpp index 203dd63b..039f071e 100644 --- a/dnn/src/cuda/convolution/forward/algos.cpp +++ b/dnn/src/cuda/convolution/forward/algos.cpp @@ -110,10 +110,10 @@ ConvolutionForwardImpl::AlgoBase::ExecArgs::ExecArgs( workspace{workspace} {} std::string ConvolutionForwardImpl::AlgoBase::SizeArgs::to_string() const { - return megdnn_mangle(ssprintf("src=%s, filter=%s, dst=%s", - layout_src->to_string().c_str(), - layout_filter->to_string().c_str(), - layout_dst->to_string().c_str())); + return ssprintf("src=%s, filter=%s, dst=%s", + layout_src->to_string().c_str(), + layout_filter->to_string().c_str(), + layout_dst->to_string().c_str()); } /* ===================== default algo ===================== */ diff --git a/dnn/src/cuda/convolution3d/backward_data/algo.cpp b/dnn/src/cuda/convolution3d/backward_data/algo.cpp index 1e992916..b019d0b0 100644 --- a/dnn/src/cuda/convolution3d/backward_data/algo.cpp +++ b/dnn/src/cuda/convolution3d/backward_data/algo.cpp @@ -54,9 +54,8 @@ Convolution3DBackwardDataImpl::AlgoPack::cudnn_from_enum( if (i.cudnn_enum() == algo) return &i; } - megdnn_throw(megdnn_mangle(ssprintf( - "can not find cudnn bwd_data algorithm %d", - static_cast(algo)))); + megdnn_throw(ssprintf("can not find cudnn bwd_data algorithm %d", + static_cast(algo))); } Convolution3DBackwardDataImpl::AlgoPack Convolution3DBackwardDataImpl::sm_algo_pack; @@ -96,17 +95,16 @@ Convolution3DBackwardDataImpl::AlgoBase::ExecArgs::ExecArgs( std::string Convolution3DBackwardDataImpl::AlgoBase::SizeArgs::to_string() const { auto &&fm = filter_meta; MEGDNN_MARK_USED_VAR(fm); - return megdnn_mangle(ssprintf( - "filter=%u{%u,%u,%u,%u,%u}, diff=%s, grad=%s, " - "pad=%ux%ux%u, stride=%ux%ux%u, dilate=%ux%ux%u, xcorr=%d, dtype=%s,%s", - fm.group, fm.ocpg, fm.icpg, fm.spatial[0], fm.spatial[1], fm.spatial[2], - diff_layout->to_string().c_str(), - grad_layout->to_string().c_str(), - fm.padding[0], fm.padding[1], fm.padding[2], - fm.stride[0], fm.stride[1], fm.stride[2], - fm.dilation[0], fm.dilation[1] ,fm.dilation[2], - !fm.should_flip, - diff_layout->dtype.name(), grad_layout->dtype.name())); + return ssprintf( + "filter=%u{%u,%u,%u,%u,%u}, diff=%s, grad=%s, " + "pad=%ux%ux%u, stride=%ux%ux%u, dilate=%ux%ux%u, xcorr=%d, " + "dtype=%s,%s", + fm.group, fm.ocpg, fm.icpg, fm.spatial[0], fm.spatial[1], + fm.spatial[2], diff_layout->to_string().c_str(), + grad_layout->to_string().c_str(), fm.padding[0], fm.padding[1], + fm.padding[2], fm.stride[0], fm.stride[1], fm.stride[2], + fm.dilation[0], fm.dilation[1], fm.dilation[2], !fm.should_flip, + diff_layout->dtype.name(), grad_layout->dtype.name()); } // vim: syntax=cpp.doxygen diff --git a/dnn/src/cuda/convolution3d/backward_filter/algo.cpp b/dnn/src/cuda/convolution3d/backward_filter/algo.cpp index 6ae55261..4ae4aa94 100644 --- a/dnn/src/cuda/convolution3d/backward_filter/algo.cpp +++ b/dnn/src/cuda/convolution3d/backward_filter/algo.cpp @@ -56,9 +56,8 @@ Convolution3DBackwardFilterImpl::AlgoPack::cudnn_from_enum( if (i.cudnn_enum() == algo) return &i; } - megdnn_throw(megdnn_mangle(ssprintf( - "can not find cudnn bwd_filter algorithm %d", - static_cast(algo)))); + megdnn_throw(ssprintf("can not find cudnn bwd_filter algorithm %d", + static_cast(algo))); } Convolution3DBackwardFilterImpl::AlgoPack @@ -100,18 +99,16 @@ std::string Convolution3DBackwardFilterImpl::AlgoBase::SizeArgs::to_string() const { auto &&fm = grad_filter_meta; MEGDNN_MARK_USED_VAR(fm); - return megdnn_mangle(ssprintf( - "src=%s diff=%s grad_filter=%u{%u,%u,%u,%u,%u}, " - "pad=%ux%ux%u, stride=%ux%ux%u, dilate=%ux%ux%u, xcorr=%d, dtype=%s,%s", - src_layout->to_string().c_str(), - diff_layout->to_string().c_str(), - fm.group, fm.ocpg, fm.icpg, - fm.spatial[0], fm.spatial[1], fm.spatial[2], - fm.padding[0], fm.padding[1], fm.padding[2], - fm.stride[0], fm.stride[1], fm.stride[2], - fm.dilation[0], fm.dilation[1], fm.dilation[2], - !fm.should_flip, - src_layout->dtype.name(), diff_layout->dtype.name())); + return ssprintf( + "src=%s diff=%s grad_filter=%u{%u,%u,%u,%u,%u}, " + "pad=%ux%ux%u, stride=%ux%ux%u, dilate=%ux%ux%u, xcorr=%d, " + "dtype=%s,%s", + src_layout->to_string().c_str(), diff_layout->to_string().c_str(), + fm.group, fm.ocpg, fm.icpg, fm.spatial[0], fm.spatial[1], + fm.spatial[2], fm.padding[0], fm.padding[1], fm.padding[2], + fm.stride[0], fm.stride[1], fm.stride[2], fm.dilation[0], + fm.dilation[1], fm.dilation[2], !fm.should_flip, + src_layout->dtype.name(), diff_layout->dtype.name()); } // vim: syntax=cpp.doxygen diff --git a/dnn/src/cuda/convolution3d/forward/algo.cpp b/dnn/src/cuda/convolution3d/forward/algo.cpp index b04930b0..872ec8f9 100644 --- a/dnn/src/cuda/convolution3d/forward/algo.cpp +++ b/dnn/src/cuda/convolution3d/forward/algo.cpp @@ -59,8 +59,8 @@ Convolution3DForwardImpl::AlgoPack::cudnn_from_enum( if (i.cudnn_enum() == algo) return &i; } - megdnn_throw(megdnn_mangle(ssprintf("can not find cudnn fwd algorithm %d", - static_cast(algo)))); + megdnn_throw(ssprintf("can not find cudnn fwd algorithm %d", + static_cast(algo))); } Convolution3DForwardImpl::AlgoPack Convolution3DForwardImpl::sm_algo_pack; @@ -101,18 +101,16 @@ Convolution3DForwardImpl::AlgoBase::ExecArgs::ExecArgs( std::string Convolution3DForwardImpl::AlgoBase::SizeArgs::to_string() const { auto &&fm = filter_meta; MEGDNN_MARK_USED_VAR(fm); - return megdnn_mangle(ssprintf( - "src=%s, filter=%u{%u,%u,%u,%u,%u}, dst=%s, " - "pad=%ux%ux%u, stride=%ux%ux%u, dilate=%ux%ux%u, xcorr=%d, dtype=%s,%s", - src_layout->to_string().c_str(), - fm.group, fm.ocpg, fm.icpg, - fm.spatial[0], fm.spatial[1], fm.spatial[2], - dst_layout->to_string().c_str(), - fm.padding[0], fm.padding[1], fm.padding[2], - fm.stride[0], fm.stride[1], fm.stride[2], - fm.dilation[0], fm.dilation[1], fm.dilation[2], - !fm.should_flip, - src_layout->dtype.name(), dst_layout->dtype.name())); + return ssprintf( + "src=%s, filter=%u{%u,%u,%u,%u,%u}, dst=%s, " + "pad=%ux%ux%u, stride=%ux%ux%u, dilate=%ux%ux%u, xcorr=%d, " + "dtype=%s,%s", + src_layout->to_string().c_str(), fm.group, fm.ocpg, fm.icpg, + fm.spatial[0], fm.spatial[1], fm.spatial[2], + dst_layout->to_string().c_str(), fm.padding[0], fm.padding[1], + fm.padding[2], fm.stride[0], fm.stride[1], fm.stride[2], + fm.dilation[0], fm.dilation[1], fm.dilation[2], !fm.should_flip, + src_layout->dtype.name(), dst_layout->dtype.name()); } // vim: syntax=cpp.doxygen diff --git a/dnn/src/cuda/cudnn_wrapper.cpp b/dnn/src/cuda/cudnn_wrapper.cpp index 127caa74..bd2ec42d 100644 --- a/dnn/src/cuda/cudnn_wrapper.cpp +++ b/dnn/src/cuda/cudnn_wrapper.cpp @@ -54,9 +54,9 @@ cudnnDataType_t to_cudnn_dtype(DType type, #endif default: #if CUDNN_MAJOR >= 6 - megdnn_throw(megdnn_mangle("dtype must be float16/float32/int8/int32")); + megdnn_throw("dtype must be float16/float32/int8/int32"); #else - megdnn_throw(megdnn_mangle("dtype must be float16/float32")); + megdnn_throw("dtype must be float16/float32"); #endif } @@ -259,7 +259,7 @@ void ConvDesc::set(DType data_type, const param::Convolution& param, mode = CUDNN_CONVOLUTION; break; default: - megdnn_throw(megdnn_mangle("conv mode must be conv or xcorr.")); + megdnn_throw("conv mode must be conv or xcorr."); } cudnnDataType_t compute_type; MEGDNN_MARK_USED_VAR(compute_type); @@ -275,7 +275,7 @@ void ConvDesc::set(DType data_type, const param::Convolution& param, compute_type = CUDNN_DATA_INT32; #endif } else { - megdnn_throw(megdnn_mangle("unspport data type for conv bias")); + megdnn_throw("unspport data type for conv bias"); } #if CUDNN_MAJOR >= 7 cudnn_check(cudnnSetConvolutionGroupCount(desc, nr_group)); @@ -445,7 +445,7 @@ void Conv3DDesc::set(const param::Convolution3D& param, const size_t nr_group) { mode = CUDNN_CONVOLUTION; break; default: - megdnn_throw(megdnn_mangle("conv mode must be conv or xcorr.")); + megdnn_throw("conv mode must be conv or xcorr."); } #if CUDNN_MAJOR >= 7 cudnn_check(cudnnSetConvolutionGroupCount(desc, nr_group)); diff --git a/dnn/src/cuda/cumsum/kern_impl.cu b/dnn/src/cuda/cumsum/kern_impl.cu index 47f5d6fa..b40774e4 100644 --- a/dnn/src/cuda/cumsum/kern_impl.cu +++ b/dnn/src/cuda/cumsum/kern_impl.cu @@ -62,7 +62,7 @@ uint32_t cumsum::get_workspace_bytes_for_cub_1d(uint32_t nr_item, CASE(8, uint64_t); #undef CASE default: - report_error(megdnn_mangle("unsupported item size in cumsum")); + report_error("unsupported item size in cumsum"); } } diff --git a/dnn/src/cuda/deformable_conv/opr_impl.cpp b/dnn/src/cuda/deformable_conv/opr_impl.cpp index e38dd4ea..1131724e 100644 --- a/dnn/src/cuda/deformable_conv/opr_impl.cpp +++ b/dnn/src/cuda/deformable_conv/opr_impl.cpp @@ -77,11 +77,11 @@ AlgoFwd* Fwd::get_algorithm_heuristic(const TensorLayout& im, args, reproducible, workspace_limit_in_bytes)) { return &sm_algo_pack.algo_matmul; } - megdnn_throw(megdnn_mangle( + megdnn_throw( ssprintf("no %s deformable conv fwd algorithm with args(%s) and " "workspace limit (%zu bytes)", reproducible ? "reproducible" : "usable", - args.to_string().c_str(), workspace_limit_in_bytes))); + args.to_string().c_str(), workspace_limit_in_bytes)); } const char* Fwd::get_algorithm_set_name() const { @@ -131,11 +131,11 @@ AlgoBwdFlt* BwdFlt::get_algorithm_heuristic( args, reproducible, workspace_limit_in_bytes)) { return &sm_algo_pack.algo_matmul; } - megdnn_throw(megdnn_mangle(ssprintf( + megdnn_throw(ssprintf( "no %s deformable conv bwd filter algorithm with args(%s) and " "workspace limit (%zu bytes)", reproducible ? "reproducible" : "usable", args.to_string().c_str(), - workspace_limit_in_bytes))); + workspace_limit_in_bytes)); } size_t BwdFlt::get_workspace_in_bytes( @@ -194,11 +194,11 @@ AlgoBwdData* BwdData::get_algorithm_heuristic( args, reproducible, workspace_limit_in_bytes)) { return &sm_algo_pack.algo_matmul; } - megdnn_throw(megdnn_mangle(ssprintf( + megdnn_throw(ssprintf( "no %s deformable conv bwd data algorithm with args(%s) and " "workspace limit (%zu bytes)", reproducible ? "reproducible" : "usable", args.to_string().c_str(), - workspace_limit_in_bytes))); + workspace_limit_in_bytes)); } size_t BwdData::get_workspace_in_bytes( diff --git a/dnn/src/cuda/indexing_multi_axis_vec/opr_impl.cpp b/dnn/src/cuda/indexing_multi_axis_vec/opr_impl.cpp index a3bc6919..3dd62d2c 100644 --- a/dnn/src/cuda/indexing_multi_axis_vec/opr_impl.cpp +++ b/dnn/src/cuda/indexing_multi_axis_vec/opr_impl.cpp @@ -199,7 +199,7 @@ size_t IndexingIncrMultiAxisVecImpl::get_workspace_in_bytes( void IndexingIncrMultiAxisVecImpl::exec( _megdnn_tensor_inout data, _megdnn_tensor_in value, const IndexDesc &index, _megdnn_workspace workspace) { - MEGDNN_INC_FLOAT16( + DNN_INC_FLOAT16( megdnn_assert(data.layout.dtype != dtype::Float16(), "float16 incr on cuda currently not supported")); auto info = check_exec(data.layout, value.layout, index, workspace.size); diff --git a/dnn/src/cuda/indexing_one_hot/opr_impl.cpp b/dnn/src/cuda/indexing_one_hot/opr_impl.cpp index e23be960..f5a6fb47 100644 --- a/dnn/src/cuda/indexing_one_hot/opr_impl.cpp +++ b/dnn/src/cuda/indexing_one_hot/opr_impl.cpp @@ -53,7 +53,7 @@ void IndexingOneHotForwardImpl::exec( switch (src.layout.dtype.enumv()) { MEGDNN_FOREACH_COMPUTING_DTYPE(cb) default: - megdnn_throw(megdnn_mangle("bad dtype")); + megdnn_throw("bad dtype"); } #undef cb } @@ -80,7 +80,7 @@ void IndexingSetOneHotForwardImpl::exec( switch (data.layout.dtype.enumv()) { MEGDNN_FOREACH_COMPUTING_DTYPE(cb) default: - megdnn_throw(megdnn_mangle("bad dtype")); + megdnn_throw("bad dtype"); } #undef cb } diff --git a/dnn/src/cuda/local_share/backward_data/algo.cpp b/dnn/src/cuda/local_share/backward_data/algo.cpp index 34592a27..2a5d6d4c 100644 --- a/dnn/src/cuda/local_share/backward_data/algo.cpp +++ b/dnn/src/cuda/local_share/backward_data/algo.cpp @@ -47,14 +47,14 @@ LocalShareBackwardDataImpl::AlgoBase::ExecArgs::ExecArgs(LocalShareBackwardDataI std::string LocalShareBackwardDataImpl::AlgoBase::SizeArgs::to_string() const { auto&& param = opr->param(); MEGDNN_MARK_USED_VAR(param); - return megdnn_mangle(ssprintf( + return ssprintf( "filter=%s, diff=%s, grad=%s, " "pad=%ux%u, stride=%ux%u, dilate=%ux%u, xcorr=%d, dtype=%s,%s->%s", filter_layout.to_string().c_str(), diff_layout.to_string().c_str(), grad_layout.to_string().c_str(), param.pad_h, param.pad_w, param.stride_h, param.stride_w, param.dilate_h, param.dilate_w, static_cast(param.mode), filter_layout.dtype.name(), - diff_layout.dtype.name(), grad_layout.dtype.name())); + diff_layout.dtype.name(), grad_layout.dtype.name()); } // vim: syntax=cpp.doxygen diff --git a/dnn/src/cuda/local_share/backward_filter/algo.cpp b/dnn/src/cuda/local_share/backward_filter/algo.cpp index 6f6981a8..0e7a2a46 100644 --- a/dnn/src/cuda/local_share/backward_filter/algo.cpp +++ b/dnn/src/cuda/local_share/backward_filter/algo.cpp @@ -48,14 +48,14 @@ std::string LocalShareBackwardFilterImpl::AlgoBase::SizeArgs::to_string() const { auto&& param = opr->param(); MEGDNN_MARK_USED_VAR(param); - return megdnn_mangle(ssprintf( + return ssprintf( "src=%s, diff=%s, grad=%s, " "pad=%ux%u, stride=%ux%u, dilate=%ux%u, xcorr=%d, dtype=%s,%s->%s", src_layout.to_string().c_str(), diff_layout.to_string().c_str(), grad_layout.to_string().c_str(), param.pad_h, param.pad_w, param.stride_h, param.stride_w, param.dilate_h, param.dilate_w, static_cast(param.mode), src_layout.dtype.name(), - diff_layout.dtype.name(), grad_layout.dtype.name())); + diff_layout.dtype.name(), grad_layout.dtype.name()); } // vim: syntax=cpp.doxygen diff --git a/dnn/src/cuda/local_share/forward/algo.cpp b/dnn/src/cuda/local_share/forward/algo.cpp index 822c19ab..f915d70d 100644 --- a/dnn/src/cuda/local_share/forward/algo.cpp +++ b/dnn/src/cuda/local_share/forward/algo.cpp @@ -49,14 +49,14 @@ LocalShareForwardImpl::AlgoBase::ExecArgs::ExecArgs(LocalShareForwardImpl* opr, std::string LocalShareForwardImpl::AlgoBase::SizeArgs::to_string() const { auto&& param = opr->param(); MEGDNN_MARK_USED_VAR(param); - return megdnn_mangle(ssprintf( + return ssprintf( "src=%s, filter=%s, dst=%s, " "pad=%ux%u, stride=%ux%u, dilate=%ux%u, xcorr=%d, dtype=%s,%s", src_layout.to_string().c_str(), filter_layout.to_string().c_str(), dst_layout.to_string().c_str(), param.pad_h, param.pad_w, param.stride_h, param.stride_w, param.dilate_h, param.dilate_w, static_cast(param.mode), src_layout.dtype.name(), - dst_layout.dtype.name())); + dst_layout.dtype.name()); } // vim: syntax=cpp.doxygen diff --git a/dnn/src/cuda/local_share/opr_impl.cpp b/dnn/src/cuda/local_share/opr_impl.cpp index 93705f6a..f4d1f232 100644 --- a/dnn/src/cuda/local_share/opr_impl.cpp +++ b/dnn/src/cuda/local_share/opr_impl.cpp @@ -39,11 +39,11 @@ LocalShareForwardImpl::get_algorithm_heuristic(const TensorLayout& src, args, reproducible, workspace_limit_in_bytes)) { return &sm_algo_pack.batched_matmul; } - megdnn_throw(megdnn_mangle( + megdnn_throw( ssprintf("no %s local share conv algorithm with args(%s) and " "workspace limit (%zu bytes)", reproducible ? "reproducible" : "usable", - args.to_string().c_str(), workspace_limit_in_bytes))); + args.to_string().c_str(), workspace_limit_in_bytes)); } std::vector @@ -89,11 +89,11 @@ LocalShareBackwardDataImpl::get_algorithm_heuristic( args, reproducible, workspace_limit_in_bytes)) { return &sm_algo_pack.batched_matmul; } - megdnn_throw(megdnn_mangle( + megdnn_throw( ssprintf("no %s local share bwd data algorithm with args(%s) and " "workspace limit (%zu bytes)", reproducible ? "reproducible" : "usable", - args.to_string().c_str(), workspace_limit_in_bytes))); + args.to_string().c_str(), workspace_limit_in_bytes)); } std::vector @@ -139,11 +139,11 @@ LocalShareBackwardFilterImpl::get_algorithm_heuristic( args, reproducible, workspace_limit_in_bytes)) { return &sm_algo_pack.batched_matmul; } - megdnn_throw(megdnn_mangle( + megdnn_throw( ssprintf("no %s local share bwd filter algorithm with args(%s) and " "workspace limit (%zu bytes)", reproducible ? "reproducible" : "usable", - args.to_string().c_str(), workspace_limit_in_bytes))); + args.to_string().c_str(), workspace_limit_in_bytes)); } std::vector diff --git a/dnn/src/cuda/matrix_mul/algos.cpp b/dnn/src/cuda/matrix_mul/algos.cpp index 490b32bd..57d8941b 100644 --- a/dnn/src/cuda/matrix_mul/algos.cpp +++ b/dnn/src/cuda/matrix_mul/algos.cpp @@ -122,11 +122,11 @@ std::string MatrixMulForwardImpl::AlgoBase::SizeArgs::to_string() const { MEGDNN_MARK_USED_VAR(m); MEGDNN_MARK_USED_VAR(n); MEGDNN_MARK_USED_VAR(k); - return megdnn_mangle(ssprintf( + return ssprintf( "A={%zux%zu},B={%zux%zu},C={%zux%zu},Transpose A=%d,Transpose " "B=%d,ldA=%zu,ldB=%zu,ldC=%zu", m, k, k, n, m, n, param.transposeA, param.transposeB, - layout_a.stride[0], layout_b.stride[0], layout_c.stride[0])); + layout_a.stride[0], layout_b.stride[0], layout_c.stride[0]); } // vim: syntax=cpp.doxygen diff --git a/dnn/src/cuda/matrix_mul/cublasLt_wrapper.cpp b/dnn/src/cuda/matrix_mul/cublasLt_wrapper.cpp index 06849ffb..d79c7434 100644 --- a/dnn/src/cuda/matrix_mul/cublasLt_wrapper.cpp +++ b/dnn/src/cuda/matrix_mul/cublasLt_wrapper.cpp @@ -29,8 +29,7 @@ static cudaDataType_t to_cuda_dtype(DType tp) { case DTypeEnum::QuantizedS32: return CUDA_R_32I; default: - megdnn_throw(megdnn_mangle( - "dtype must be float16/float32/int8/qs8/int32")); + megdnn_throw("dtype must be float16/float32/int8/qs8/int32"); } } @@ -45,8 +44,7 @@ static cublasComputeType_t to_cublas_compute_type(DType tp) { case DTypeEnum::QuantizedS32: return CUBLAS_COMPUTE_32I; default: - megdnn_throw( - megdnn_mangle("dtype must be float16/float32/int32/Qs32")); + megdnn_throw("dtype must be float16/float32/int32/Qs32"); } } #endif @@ -62,8 +60,7 @@ static const char* cuda_type_to_str(cudaDataType_t tp) { case CUDA_R_32I: return "CUDA_R_32I"; default: - megdnn_throw( - megdnn_mangle("dtype must be float16/float32/int8/int32")); + megdnn_throw("dtype must be float16/float32/int8/int32"); } } @@ -77,8 +74,7 @@ static size_t cuda_dtype_size(cudaDataType_t dt) { case CUDA_R_32I: return 4_z; default: - megdnn_throw( - megdnn_mangle("dtype must be float16/float32/int8/int32")); + megdnn_throw("dtype must be float16/float32/int8/int32"); } } diff --git a/dnn/src/cuda/matrix_mul/cublas_lt.cpp b/dnn/src/cuda/matrix_mul/cublas_lt.cpp index 5a6598a7..111777d3 100644 --- a/dnn/src/cuda/matrix_mul/cublas_lt.cpp +++ b/dnn/src/cuda/matrix_mul/cublas_lt.cpp @@ -140,8 +140,7 @@ void MatrixMulForwardImpl::AlgoCuBlasLt::exec(const ExecArgs& args) const { igemm(); break; default: - megdnn_throw(megdnn_mangle( - "compute type must be float16/float32/int32")); + megdnn_throw("compute type must be float16/float32/int32"); } #else switch (desc.dt_compute) { @@ -155,8 +154,7 @@ void MatrixMulForwardImpl::AlgoCuBlasLt::exec(const ExecArgs& args) const { igemm(); break; default: - megdnn_throw(megdnn_mangle( - "compute type must be float16/float32/int32")); + megdnn_throw("compute type must be float16/float32/int32"); } #endif } diff --git a/dnn/src/cuda/megcore/cuda_computing_context.cpp b/dnn/src/cuda/megcore/cuda_computing_context.cpp index b0a7684c..434273a3 100644 --- a/dnn/src/cuda/megcore/cuda_computing_context.cpp +++ b/dnn/src/cuda/megcore/cuda_computing_context.cpp @@ -27,7 +27,8 @@ CUDAComputingContext::CUDAComputingContext(megcoreDeviceHandle_t dev_handle, { megcorePlatform_t platform; megcoreGetPlatform(dev_handle, &platform); - megdnn_assert(platform == megcorePlatformCUDA); + megdnn_throw_if(platform != megcorePlatformCUDA, megdnn_error, + "platform should be CUDA Platform"); if (own_stream_) { cuda_check(cudaStreamCreateWithFlags(&context_.stream, cudaStreamNonBlocking)); diff --git a/dnn/src/cuda/megcore/public_api/computing.cpp b/dnn/src/cuda/megcore/public_api/computing.cpp index f2f6e1b3..9b45d81c 100644 --- a/dnn/src/cuda/megcore/public_api/computing.cpp +++ b/dnn/src/cuda/megcore/public_api/computing.cpp @@ -38,9 +38,10 @@ megcoreStatus_t megcore::getCUDAContext(megcoreComputingHandle_t handle, megcoreDeviceHandle_t dev_handle = H->content->dev_handle(); megcorePlatform_t platform; megcoreGetPlatform(dev_handle, &platform); - megdnn_assert(platform == megcorePlatformCUDA); - auto context = static_cast( - H->content.get()); + megdnn_throw_if(platform != megcorePlatformCUDA, megdnn_error, + "platform should be CUDA Platform"); + auto context = + static_cast(H->content.get()); *ctx = context->context(); return megcoreSuccess; } diff --git a/dnn/src/cuda/relayout/opr_impl.cpp b/dnn/src/cuda/relayout/opr_impl.cpp index d78f7eb4..1e1adfdc 100644 --- a/dnn/src/cuda/relayout/opr_impl.cpp +++ b/dnn/src/cuda/relayout/opr_impl.cpp @@ -194,8 +194,8 @@ void RelayoutForwardImpl::exec(_megdnn_tensor_in src, _megdnn_tensor_out dst, megcoreGetDeviceHandle(src_handle->megcore_computing_handle(), &dev); megcorePlatform_t plat; megcoreGetPlatform(dev, &plat); - megdnn_assert(plat == megcorePlatformCUDA, - "only relayout between cuda devices are supported"); + megdnn_throw_if(plat != megcorePlatformCUDA, megdnn_error, + "only relayout between cuda devices are supported"); int dst_dev_id = -1, src_dev_id = -1; megcoreGetDeviceID(dev, &src_dev_id); diff --git a/dnn/src/cuda/remap/backward_data.cu b/dnn/src/cuda/remap/backward_data.cu index b51f113c..72dd93e2 100644 --- a/dnn/src/cuda/remap/backward_data.cu +++ b/dnn/src/cuda/remap/backward_data.cu @@ -157,7 +157,7 @@ void backwarddata_proxy(ctype* grad, const float* map_xy, const ctype* diff, INST(ctype, NCHW, BORDER_WRAP) FOR_FORMAT_BMODE(float) -MEGDNN_INC_FLOAT16(FOR_FORMAT_BMODE(dt_bfloat16)) +DNN_INC_FLOAT16(FOR_FORMAT_BMODE(dt_bfloat16)) #undef FOR_FORMAT_BMODE #undef INST diff --git a/dnn/src/cuda/remap/backward_mat.cu b/dnn/src/cuda/remap/backward_mat.cu index ecf6d9a4..3c69e99b 100644 --- a/dnn/src/cuda/remap/backward_mat.cu +++ b/dnn/src/cuda/remap/backward_mat.cu @@ -158,7 +158,7 @@ void backwardmat_proxy(const ctype* src, const float* map_xy, const ctype* diff, INST(ctype, NCHW, BORDER_WRAP) FOR_FORMAT_BMODE(float) -MEGDNN_INC_FLOAT16(FOR_FORMAT_BMODE(dt_bfloat16)) +DNN_INC_FLOAT16(FOR_FORMAT_BMODE(dt_bfloat16)) #undef FOR_FORMAT_BMODE #undef INST diff --git a/dnn/src/cuda/remap/forward.cpp b/dnn/src/cuda/remap/forward.cpp index 29cedace..3ea90fe9 100644 --- a/dnn/src/cuda/remap/forward.cpp +++ b/dnn/src/cuda/remap/forward.cpp @@ -76,8 +76,8 @@ void RemapImpl::exec(_megdnn_tensor_in src, _megdnn_tensor_out map_xy, switch (src.layout.dtype.enumv()) { support_dtype(dtype::Float32); - MEGDNN_INC_FLOAT16(support_dtype(dtype::Float16)); - MEGDNN_INC_FLOAT16(support_dtype(dtype::BFloat16)); + DNN_INC_FLOAT16(support_dtype(dtype::Float16)); + DNN_INC_FLOAT16(support_dtype(dtype::BFloat16)); support_dtype(dtype::Int8); support_dtype(dtype::Uint8); default: diff --git a/dnn/src/cuda/remap/forward.cu b/dnn/src/cuda/remap/forward.cu index ab429275..e7beadf9 100644 --- a/dnn/src/cuda/remap/forward.cu +++ b/dnn/src/cuda/remap/forward.cu @@ -209,8 +209,8 @@ void forward_proxy(const ctype* src, const float* map_xy, ctype* dst, int N, INST(ctype, NHWC, BORDER_WRAP) FOR_FORMAT_BMODE(float) -MEGDNN_INC_FLOAT16(FOR_FORMAT_BMODE(dt_float16)) -MEGDNN_INC_FLOAT16(FOR_FORMAT_BMODE(dt_bfloat16)) +DNN_INC_FLOAT16(FOR_FORMAT_BMODE(dt_float16)) +DNN_INC_FLOAT16(FOR_FORMAT_BMODE(dt_bfloat16)) FOR_FORMAT_BMODE(int8_t) FOR_FORMAT_BMODE(uint8_t) diff --git a/dnn/src/cuda/resize/forward.cpp b/dnn/src/cuda/resize/forward.cpp index 4dce3ac0..23b0901f 100644 --- a/dnn/src/cuda/resize/forward.cpp +++ b/dnn/src/cuda/resize/forward.cpp @@ -43,8 +43,7 @@ void resize_cv_proxy(_megdnn_tensor_in src, _megdnn_tensor_out dst, src_mat.step(), dst_mat.step(), src_mat.channels(), imode, workspace, stream); } else { - megdnn_throw( - megdnn_mangle("Unsupported datatype of WarpAffine optr.")); + megdnn_throw("Unsupported datatype of WarpAffine optr."); } } } diff --git a/dnn/src/cuda/warp_affine/opr_impl.cpp b/dnn/src/cuda/warp_affine/opr_impl.cpp index 981e2f8b..5894332a 100644 --- a/dnn/src/cuda/warp_affine/opr_impl.cpp +++ b/dnn/src/cuda/warp_affine/opr_impl.cpp @@ -73,8 +73,7 @@ void warp_affine_cv_exec(_megdnn_tensor_in src, _megdnn_tensor_in mat, } } else { - megdnn_throw( - megdnn_mangle("Unsupported datatype of Warpaffine optr.")); + megdnn_throw("Unsupported datatype of Warpaffine optr."); } trans_ptr += 2 * 3; diff --git a/dnn/src/cuda/warp_perspective/forward.cpp b/dnn/src/cuda/warp_perspective/forward.cpp index c9d6e88c..c674e4b1 100644 --- a/dnn/src/cuda/warp_perspective/forward.cpp +++ b/dnn/src/cuda/warp_perspective/forward.cpp @@ -75,8 +75,7 @@ void warp_perspective_cv_exec(_megdnn_tensor_in src, _megdnn_tensor_in mat, } } else { - megdnn_throw(megdnn_mangle( - "Unsupported datatype of WarpPerspective optr.")); + megdnn_throw("Unsupported datatype of WarpPerspective optr."); } trans_ptr += 3 * 3; @@ -215,7 +214,7 @@ void WarpPerspectiveForwardImpl::exec(_megdnn_tensor_in ssrc, C, IH, IW, OH, OW, bval, bmode, async_error_info(handle()), m_error_tracker, stream); - } else if (MEGDNN_FLOAT16_SELECT( + } else if (DNN_FLOAT16_SELECT( src.layout.dtype == dtype::Float16(), false)) { #ifndef MEGDNN_DISABLE_FLOAT16 diff --git a/dnn/src/fallback/batched_matrix_mul/algos.cpp b/dnn/src/fallback/batched_matrix_mul/algos.cpp index 3c83a927..25842047 100644 --- a/dnn/src/fallback/batched_matrix_mul/algos.cpp +++ b/dnn/src/fallback/batched_matrix_mul/algos.cpp @@ -50,11 +50,13 @@ std::string BatchedMatrixMulForwardImpl::AlgoBase::SizeArgs::to_string() const { MEGDNN_MARK_USED_VAR(m); MEGDNN_MARK_USED_VAR(n); MEGDNN_MARK_USED_VAR(k); - return megdnn_mangle(ssprintf( + return ssprintf( "A={%zux%zu},B={%zux%zu},C={%zux%zu},Transpose A=%d,Transpose " "B=%d,ldA=%zu,ldB=%zu,ldC=%zu", m, k, k, n, m, n, param.transposeA, param.transposeB, - layout_a.stride[0], layout_b.stride[0], layout_c.stride[0])); + static_cast(layout_a.stride[0]), + static_cast(layout_b.stride[0]), + static_cast(layout_c.stride[0])); } /* ===================== default algo ===================== */ diff --git a/dnn/src/fallback/convolution/algos.cpp b/dnn/src/fallback/convolution/algos.cpp index 9633aff9..afb0f0f8 100644 --- a/dnn/src/fallback/convolution/algos.cpp +++ b/dnn/src/fallback/convolution/algos.cpp @@ -295,7 +295,7 @@ SmallVector ConvolutionImpl::AlgoNaive::dispatch_kern( cb(dtype::Int8, dtype::Int32); cb(dtype::Quantized8Asymm, dtype::QuantizedS32); cb(dtype::QuantizedS8, dtype::QuantizedS32); - megdnn_throw(megdnn_mangle("unknown convolution data type")); + megdnn_throw("unknown convolution data type"); #undef cb } @@ -596,8 +596,8 @@ ConvolutionBackwardDataImpl::AlgoMatrixMul::dispatch_kern( } \ } while (0); cb(dtype::Float32, "FLOAT"_hash); - MEGDNN_INC_FLOAT16(cb(dtype::Float16, "FLOAT16"_hash)); - MEGDNN_INC_FLOAT16(cb(dtype::BFloat16, "BFLOAT16"_hash)); + DNN_INC_FLOAT16(cb(dtype::Float16, "FLOAT16"_hash)); + DNN_INC_FLOAT16(cb(dtype::BFloat16, "BFLOAT16"_hash)); #undef cb #define cb(dt_src, dt_dst, midout_tag) \ diff --git a/dnn/src/fallback/convolution/opr_impl.cpp b/dnn/src/fallback/convolution/opr_impl.cpp index 87dd3bc5..3fa87e16 100644 --- a/dnn/src/fallback/convolution/opr_impl.cpp +++ b/dnn/src/fallback/convolution/opr_impl.cpp @@ -432,7 +432,7 @@ ConvolutionImpl::NCBKernSizeParam::deduce_algo_data_type() const { } else if (src_type.enumv() == DTypeEnum::Quantized8Asymm) { return ConvolutionImpl::AlgoDataType::QUINT8X8X32; } else { - megdnn_throw(ssprintf("megdnn not support data type of %s * %s -> %s\n", + megdnn_throw(ssprintf("not support data type of %s * %s -> %s\n", src_type.name(), filter_type.name(), dst_type.name())); } @@ -697,8 +697,7 @@ ConvolutionBackwardDataImpl::ncb_1g_dispatch_kern( return static_cast(algo)->dispatch_kern(this, param); } - megdnn_throw( - megdnn_mangle("no suitable ConvolutionBackwardData algorithm")); + megdnn_throw("no suitable ConvolutionBackwardData algorithm"); } bool ConvolutionBackwardDataImpl::is_matrix_mul_preferred( diff --git a/dnn/src/fallback/convolution/run_conv.cpp b/dnn/src/fallback/convolution/run_conv.cpp index e2565d46..866996c9 100644 --- a/dnn/src/fallback/convolution/run_conv.cpp +++ b/dnn/src/fallback/convolution/run_conv.cpp @@ -134,8 +134,7 @@ void run_xcorr_single_channel_templated( DISPATCH(6) DISPATCH(7) #undef DISPATCH - megdnn_throw(megdnn_mangle( - "internal error in conv template dispatching: impossible")); + megdnn_throw("internal error in conv template dispatching: impossible"); } void run_xcorr_single_channel_nontemplated( @@ -339,8 +338,7 @@ void conv_backdata_single_channel_templated( DISPATCH(7) #undef DISPATCH megdnn_throw( - megdnn_mangle("internal error in conv_backdata template " - "dispatching: impossible")); + "internal error in conv_backdata template dispatching: impossible"); } void conv_backdata_single_channel_nontemplated( diff --git a/dnn/src/fallback/matrix_mul/algos.cpp b/dnn/src/fallback/matrix_mul/algos.cpp index cebf8957..0312070c 100644 --- a/dnn/src/fallback/matrix_mul/algos.cpp +++ b/dnn/src/fallback/matrix_mul/algos.cpp @@ -165,7 +165,7 @@ MatrixMulImpl::kern_t MatrixMulImpl::AlgoGemv::get_kern( } DISPATCH(Float32, Float32, (gemm_gemv_like), 0); - MEGDNN_INC_FLOAT16(DISPATCH(Float16, Float16, + DNN_INC_FLOAT16(DISPATCH(Float16, Float16, (gemm_gemv_like), 1)); DISPATCH(Int8, Int16, (gemm_gemv_like), 2); DISPATCH(Quantized8Asymm, QuantizedS32, diff --git a/dnn/src/fallback/matrix_mul/opr_impl.cpp b/dnn/src/fallback/matrix_mul/opr_impl.cpp index 25a47b80..999de05a 100644 --- a/dnn/src/fallback/matrix_mul/opr_impl.cpp +++ b/dnn/src/fallback/matrix_mul/opr_impl.cpp @@ -263,9 +263,8 @@ MatrixMulImpl::KernSizeParam::deduce_algo_data_type() const { } else if (A_type.enumv() == DTypeEnum::Int16) { return MatrixMulImpl::AlgoDataType::INT16X16X32; } else { - megdnn_throw(ssprintf( - "megdnn matmul not support data type of %s * %s -> %s\n", - A_type.name(), B_type.name(), C_type.name())); + megdnn_throw(ssprintf("matmul not support data type of %s * %s -> %s\n", + A_type.name(), B_type.name(), C_type.name())); } } diff --git a/dnn/src/fallback/powc/opr_impl.cpp b/dnn/src/fallback/powc/opr_impl.cpp index a8559159..f3e78307 100644 --- a/dnn/src/fallback/powc/opr_impl.cpp +++ b/dnn/src/fallback/powc/opr_impl.cpp @@ -262,10 +262,10 @@ void PowCImpl::do_exec(_megdnn_tensor_in src, _megdnn_tensor_out dst, #if !MEGDNN_DISABLE_FLOAT16 case DTypeTrait::enumv: #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - return MEGDNN_INC_FLOAT16( + return DNN_INC_FLOAT16( do_exec_ct<__fp16>(src, dst, exp_f, exp_i)); #else - return MEGDNN_INC_FLOAT16( + return DNN_INC_FLOAT16( do_exec_ct(src, dst, exp_f, exp_i)); #endif #endif diff --git a/dnn/src/fallback/resize/opr_impl.cpp b/dnn/src/fallback/resize/opr_impl.cpp index f4802f99..792ab82b 100644 --- a/dnn/src/fallback/resize/opr_impl.cpp +++ b/dnn/src/fallback/resize/opr_impl.cpp @@ -133,7 +133,7 @@ void ResizeImpl::exec(_megdnn_tensor_in src, _megdnn_tensor_in dst, switch (src.layout.dtype.enumv()) { cb(dtype::Float32, float); - MEGDNN_INC_FLOAT16(cb(dtype::Float16, dt_float16)); + DNN_INC_FLOAT16(cb(dtype::Float16, dt_float16)); cb(dtype::Int8, int8_t); cb(dtype::QuantizedS8, int8_t); cb(dtype::Uint8, uint8_t); diff --git a/dnn/src/fallback/warp_perspective/opr_impl.cpp b/dnn/src/fallback/warp_perspective/opr_impl.cpp index cb76f343..7bf6588f 100644 --- a/dnn/src/fallback/warp_perspective/opr_impl.cpp +++ b/dnn/src/fallback/warp_perspective/opr_impl.cpp @@ -93,7 +93,7 @@ void WarpPerspectiveImpl::exec(_megdnn_tensor_in src, _megdnn_tensor_in mat, switch (src.layout.dtype.enumv()) { cb(dtype::Float32, float, float); - MEGDNN_INC_FLOAT16(cb(dtype::Float16, dt_float16, float)); + DNN_INC_FLOAT16(cb(dtype::Float16, dt_float16, float)); cb(dtype::Int8, int8_t, float); cb(dtype::QuantizedS8, int8_t, float); cb(dtype::Uint8, uint8_t, float); diff --git a/dnn/src/naive/batch_normalization/opr_impl.cpp b/dnn/src/naive/batch_normalization/opr_impl.cpp index c944d4d4..61036a07 100644 --- a/dnn/src/naive/batch_normalization/opr_impl.cpp +++ b/dnn/src/naive/batch_normalization/opr_impl.cpp @@ -224,7 +224,7 @@ void BNForwardImpl::exec(_megdnn_tensor_in src, _megdnn_tensor_in bn_scale, variance.layout, batch_mean.layout, batch_inv_variance.layout, dst.layout, workspace.size); - MEGDNN_INC_FLOAT16(if (src.layout.dtype == dtype::Float16() && + DNN_INC_FLOAT16(if (src.layout.dtype == dtype::Float16() && bn_scale.layout.dtype == dtype::Float32()) { MEGDNN_DISPATCH_CPU_KERN_OPR(({ using T0 = typename DTypeTrait::ctype; @@ -285,7 +285,7 @@ void BNBackwardImpl::exec(_megdnn_tensor_in x_in, _megdnn_tensor_in dy_in, bn_scale.layout.total_nr_elems(), workspace.raw_ptr); - MEGDNN_INC_FLOAT16(if (x_in.layout.dtype == dtype::Float16() && + DNN_INC_FLOAT16(if (x_in.layout.dtype == dtype::Float16() && bn_scale.layout.dtype == dtype::Float32()) { MEGDNN_DISPATCH_CPU_KERN_OPR(({ using T0 = typename DTypeTrait::ctype; diff --git a/dnn/src/naive/convolution/convolution.cpp b/dnn/src/naive/convolution/convolution.cpp index 54dc37aa..7f320c72 100644 --- a/dnn/src/naive/convolution/convolution.cpp +++ b/dnn/src/naive/convolution/convolution.cpp @@ -56,10 +56,10 @@ void ConvolutionForwardImpl::exec(_megdnn_tensor_in src, DISPATCH(Int8, Int16, dt_int8, dt_int16, dt_int16); DISPATCH(Int8, Int32, dt_int8, dt_int32, dt_int32); DISPATCH(QuantizedS8, QuantizedS32, dt_int8, dt_int32, dt_int32); - MEGDNN_INC_FLOAT16(DISPATCH_CMODE(Float16, Float16, dt_float16, + DNN_INC_FLOAT16(DISPATCH_CMODE(Float16, Float16, dt_float16, dt_float16, dt_float32, ComputeMode::FLOAT32)); - MEGDNN_INC_FLOAT16(DISPATCH_CMODE(BFloat16, BFloat16, dt_bfloat16, + DNN_INC_FLOAT16(DISPATCH_CMODE(BFloat16, BFloat16, dt_bfloat16, dt_bfloat16, dt_float32, ComputeMode::FLOAT32)); DISPATCH(Quantized8Asymm, QuantizedS32, dt_quint8, dt_qint32, diff --git a/dnn/src/naive/convolution3d/convolution3d.cpp b/dnn/src/naive/convolution3d/convolution3d.cpp index 3ce93e22..aed017da 100644 --- a/dnn/src/naive/convolution3d/convolution3d.cpp +++ b/dnn/src/naive/convolution3d/convolution3d.cpp @@ -49,7 +49,7 @@ void Convolution3DForwardImpl::exec(_megdnn_tensor_in src, #undef cb break; case Param::DataType::FLOAT_IO16xC32: - MEGDNN_INC_FLOAT16(MEGDNN_DISPATCH_CPU_KERN( + DNN_INC_FLOAT16(MEGDNN_DISPATCH_CPU_KERN( static_cast(handle()), convolution3d::forward< dt_float16 MEGDNN_COMMA dt_float16 MEGDNN_COMMA diff --git a/dnn/src/naive/group_local/opr_impl.cpp b/dnn/src/naive/group_local/opr_impl.cpp index d1f1a0aa..0a8ceba6 100644 --- a/dnn/src/naive/group_local/opr_impl.cpp +++ b/dnn/src/naive/group_local/opr_impl.cpp @@ -149,19 +149,19 @@ void GroupLocalForwardImpl::exec(_megdnn_tensor_in src, dst.ptr(), N, IC, IH, IW, FH, FW, OC, OH, OW, group, param().pad_h, param().pad_w, param().stride_h, param().stride_w)); - } else if (MEGDNN_FLOAT16_SELECT( + } else if (DNN_FLOAT16_SELECT( src.layout.dtype == dtype::Float16() && filter.layout.dtype == dtype::Float16() && dst.layout.dtype == dtype::Float16(), false)) { - MEGDNN_INC_FLOAT16(MEGDNN_DISPATCH_CPU_KERN_OPR(forward( + DNN_INC_FLOAT16(MEGDNN_DISPATCH_CPU_KERN_OPR(forward( src.ptr(), filter.ptr(), dst.ptr(), N, IC, IH, IW, FH, FW, OC, OH, OW, group, param().pad_h, param().pad_w, param().stride_h, param().stride_w));); } else { - megdnn_assert_internal(false); + megdnn_assert_internal(false); } } diff --git a/dnn/src/naive/indexing_multi_axis_vec/opr_impl.cpp b/dnn/src/naive/indexing_multi_axis_vec/opr_impl.cpp index e52b8d56..0e1b1115 100644 --- a/dnn/src/naive/indexing_multi_axis_vec/opr_impl.cpp +++ b/dnn/src/naive/indexing_multi_axis_vec/opr_impl.cpp @@ -90,7 +90,7 @@ void dispatch_exec(HandleImpl *handle, MEGDNN_FOREACH_COMPUTING_DTYPE(cb) cb(::megdnn::dtype::Bool) default: - megdnn_throw(megdnn_mangle("bad dtype")); + megdnn_throw("bad dtype"); } #undef cb } diff --git a/dnn/src/naive/indexing_one_hot/opr_impl.cpp b/dnn/src/naive/indexing_one_hot/opr_impl.cpp index b4c6a8ac..092b579e 100644 --- a/dnn/src/naive/indexing_one_hot/opr_impl.cpp +++ b/dnn/src/naive/indexing_one_hot/opr_impl.cpp @@ -99,7 +99,7 @@ void IndexingOneHotForwardImpl::exec( MEGDNN_FOREACH_COMPUTING_DTYPE(cb) cb(megdnn::dtype::Quantized8Asymm) default: - megdnn_throw(megdnn_mangle("bad dtype")); + megdnn_throw("bad dtype"); } #undef cb } @@ -122,7 +122,7 @@ void IndexingSetOneHotForwardImpl::exec( MEGDNN_FOREACH_COMPUTING_DTYPE(cb) cb(megdnn::dtype::Quantized8Asymm) default: - megdnn_throw(megdnn_mangle("bad dtype")); + megdnn_throw("bad dtype"); } #undef cb } diff --git a/dnn/src/naive/local/local.cpp b/dnn/src/naive/local/local.cpp index 1b70afaa..abe8aa82 100644 --- a/dnn/src/naive/local/local.cpp +++ b/dnn/src/naive/local/local.cpp @@ -28,8 +28,8 @@ LocalForwardImpl::dispatch_float_noncontig_batch( } else { return &naive_kern; } - } else if (MEGDNN_FLOAT16_SELECT(src.dtype == dtype::Float16(), false)) { - MEGDNN_INC_FLOAT16( + } else if (DNN_FLOAT16_SELECT(src.dtype == dtype::Float16(), false)) { + DNN_INC_FLOAT16( megdnn_assert(src.dtype == dtype::Float16()); if (param().mode == Mode::CROSS_CORRELATION) { return &naive_kern; diff --git a/dnn/src/naive/pooling/opr_impl.cpp b/dnn/src/naive/pooling/opr_impl.cpp index 834649b8..ed978826 100644 --- a/dnn/src/naive/pooling/opr_impl.cpp +++ b/dnn/src/naive/pooling/opr_impl.cpp @@ -503,7 +503,7 @@ WorkspaceBundle PoolingBackwardImpl::get_workspace_bundle( TensorLayout fdiff = diff; TensorLayout fgrad = grad; auto get_workspace = [&sizes](TensorLayout& layout) { - if (MEGDNN_FLOAT16_SELECT(layout.dtype == dtype::BFloat16(), false)) { + if (DNN_FLOAT16_SELECT(layout.dtype == dtype::BFloat16(), false)) { layout.dtype = dtype::Float32(); sizes.push_back(layout.span().dist_byte()); } diff --git a/dnn/src/naive/reduce/opr_impl.cpp b/dnn/src/naive/reduce/opr_impl.cpp index d59bbcbf..cc68e179 100644 --- a/dnn/src/naive/reduce/opr_impl.cpp +++ b/dnn/src/naive/reduce/opr_impl.cpp @@ -117,64 +117,64 @@ template <> void reduce_fwd(const dt_quint8* __restrict, dt_quint8* __restrict, size_t, size_t, size_t) { megdnn_throw( - megdnn_mangle("Reduce (SUM) with DEFAULT DataType is not supported " - "on Quantized8Asymm")); + "Reduce (SUM) with DEFAULT DataType is not supported " + "on Quantized8Asymm"); } template <> void reduce_fwd(const dt_quint8* __restrict, dt_quint8* __restrict, size_t, size_t, size_t) { megdnn_throw( - megdnn_mangle("Reduce (MEAN) with DEFAULT DataType is not supported " - "on Quantized8Asymm")); + "Reduce (MEAN) with DEFAULT DataType is not supported " + "on Quantized8Asymm"); } template <> void reduce_fwd(const dt_quint8* __restrict, dt_quint8* __restrict, size_t, size_t, size_t) { - megdnn_throw(megdnn_mangle( + megdnn_throw( "Reduce (SUM_SQR) with DEFAULT DataType is not supported " - "on Quantized8Asymm")); + "on Quantized8Asymm"); } template <> void reduce_fwd(const dt_quint8* __restrict, dt_quint8* __restrict, size_t, size_t, size_t) { - megdnn_throw(megdnn_mangle( + megdnn_throw( "Reduce (PRODUCT) with DEFAULT DataType is not supported " - "on Quantized8Asymm")); + "on Quantized8Asymm"); } template <> void reduce_fwd(const dt_qint8* __restrict, dt_qint8* __restrict, size_t, size_t, size_t) { megdnn_throw( - megdnn_mangle("Reduce (SUM) with DEFAULT DataType is not supported " - "on QuantizedS8")); + "Reduce (SUM) with DEFAULT DataType is not supported " + "on QuantizedS8"); } template <> void reduce_fwd(const dt_qint8* __restrict, dt_qint8* __restrict, size_t, size_t, size_t) { megdnn_throw( - megdnn_mangle("Reduce (MEAN) with DEFAULT DataType is not supported " - "on QuantizedS8")); + "Reduce (MEAN) with DEFAULT DataType is not supported " + "on QuantizedS8"); } template <> void reduce_fwd(const dt_qint8* __restrict, dt_qint8* __restrict, size_t, size_t, size_t) { - megdnn_throw(megdnn_mangle( + megdnn_throw( "Reduce (SUM_SQR) with DEFAULT DataType is not supported " - "on QuantizedS8")); + "on QuantizedS8"); } template <> void reduce_fwd(const dt_qint8* __restrict, dt_qint8* __restrict, size_t, size_t, size_t) { - megdnn_throw(megdnn_mangle( + megdnn_throw( "Reduce (PRODUCT) with DEFAULT DataType is not supported " - "on QuantizedS8")); + "on QuantizedS8"); } template diff --git a/dnn/src/naive/relayout_format/opr_impl.cpp b/dnn/src/naive/relayout_format/opr_impl.cpp index f226ea58..62f10b08 100644 --- a/dnn/src/naive/relayout_format/opr_impl.cpp +++ b/dnn/src/naive/relayout_format/opr_impl.cpp @@ -426,7 +426,7 @@ void RelayoutFormatImpl::exec(_megdnn_tensor_in src, _megdnn_tensor_out dst, break; \ } cb(Float32, dt_float32); - MEGDNN_INC_FLOAT16(cb(Float16, dt_float16)); + DNN_INC_FLOAT16(cb(Float16, dt_float16)); cb(Quantized8Asymm, dt_uint8); cb(QuantizedS8, dt_int8); #undef cb diff --git a/dnn/src/naive/remap/opr_impl.cpp b/dnn/src/naive/remap/opr_impl.cpp index f03a147c..76f2e711 100644 --- a/dnn/src/naive/remap/opr_impl.cpp +++ b/dnn/src/naive/remap/opr_impl.cpp @@ -267,8 +267,8 @@ void RemapImpl::exec(_megdnn_tensor_in src, _megdnn_tensor_in map_xy, } support_dtype(dtype::Float32); - MEGDNN_INC_FLOAT16(support_dtype(dtype::Float16)); - MEGDNN_INC_FLOAT16(support_dtype(dtype::BFloat16)); + DNN_INC_FLOAT16(support_dtype(dtype::Float16)); + DNN_INC_FLOAT16(support_dtype(dtype::BFloat16)); support_dtype(dtype::Int8); support_dtype(dtype::Uint8); #undef cb @@ -321,7 +321,7 @@ void RemapBackwardDataImpl::exec(_megdnn_tensor_in map_xy, } support_dtype(dtype::Float32); - MEGDNN_INC_FLOAT16(support_dtype(dtype::BFloat16)); + DNN_INC_FLOAT16(support_dtype(dtype::BFloat16)); #undef cb #undef support_dtype @@ -374,7 +374,7 @@ void RemapBackwardMatImpl::exec(_megdnn_tensor_in src, _megdnn_tensor_in map_xy, } support_dtype(dtype::Float32); - MEGDNN_INC_FLOAT16(support_dtype(dtype::BFloat16)); + DNN_INC_FLOAT16(support_dtype(dtype::BFloat16)); #undef cb #undef support_dtype diff --git a/dnn/src/naive/resize/opr_impl.cpp b/dnn/src/naive/resize/opr_impl.cpp index a13df34c..4feed309 100644 --- a/dnn/src/naive/resize/opr_impl.cpp +++ b/dnn/src/naive/resize/opr_impl.cpp @@ -58,8 +58,8 @@ ResizeImpl::KernParam ResizeImpl::KernParam::from_tensors( ret.ow = dst.layout.shape[3]; } if (src.layout.dtype.enumv() == DTypeEnum::Float32 || - MEGDNN_FLOAT16_SELECT(src.layout.dtype.enumv() == DTypeEnum::Float16, - false) || + DNN_FLOAT16_SELECT(src.layout.dtype.enumv() == DTypeEnum::Float16, + false) || src.layout.dtype.enumv() == DTypeEnum::Int8 || src.layout.dtype.enumv() == DTypeEnum::Uint8 || src.layout.dtype.enumv() == DTypeEnum::QuantizedS8 || @@ -283,7 +283,7 @@ void ResizeImpl::exec(_megdnn_tensor_in src, _megdnn_tensor_in dst, switch (src.layout.dtype.enumv()) { cb(dtype::Float32, float, 0); - MEGDNN_INC_FLOAT16(cb(dtype::Float16, dt_float16, 1)); + DNN_INC_FLOAT16(cb(dtype::Float16, dt_float16, 1)); cb(dtype::Int8, int8_t, 2); cb(dtype::QuantizedS8, int8_t, 3); cb(dtype::Uint8, uint8_t, 4); diff --git a/dnn/src/naive/resize/resize_cv.cpp b/dnn/src/naive/resize/resize_cv.cpp index 7bb06b70..d9bd079f 100644 --- a/dnn/src/naive/resize/resize_cv.cpp +++ b/dnn/src/naive/resize/resize_cv.cpp @@ -1422,7 +1422,7 @@ void megdnn::naive::resize_cv_exec(_megdnn_tensor_in src, } MIDOUT_END(); } else { - megdnn_throw(megdnn_mangle("Unsupported datatype of resize optr.")); + megdnn_throw("Unsupported datatype of resize optr."); } } } diff --git a/dnn/src/naive/separable_filter/opr_impl.cpp b/dnn/src/naive/separable_filter/opr_impl.cpp index 1e74b67f..aaf94f0f 100644 --- a/dnn/src/naive/separable_filter/opr_impl.cpp +++ b/dnn/src/naive/separable_filter/opr_impl.cpp @@ -180,7 +180,7 @@ void SeparableFilterForwardImpl::exec_internal(_megdnn_tensor_in src, cb(BORDER_ISOLATED); #undef cb default: - megdnn_throw(megdnn_mangle("Unexpected border mode")); + megdnn_throw("Unexpected border mode"); } } diff --git a/dnn/src/naive/warp_affine/opr_impl.cpp b/dnn/src/naive/warp_affine/opr_impl.cpp index 87a9f415..8a28cff0 100644 --- a/dnn/src/naive/warp_affine/opr_impl.cpp +++ b/dnn/src/naive/warp_affine/opr_impl.cpp @@ -259,7 +259,7 @@ void WarpAffineImpl::exec(_megdnn_tensor_in src, _megdnn_tensor_in mat, switch (src.layout.dtype.enumv()) { cb(dtype::Float32, float, float, 0); - MEGDNN_INC_FLOAT16(cb(dtype::Float16, dt_float16, dt_float16, 1)); + DNN_INC_FLOAT16(cb(dtype::Float16, dt_float16, dt_float16, 1)); cb(dtype::Int8, int8_t, float, 2); cb(dtype::QuantizedS8, int8_t, float, 3); cb(dtype::Uint8, uint8_t, float, 4); diff --git a/dnn/src/naive/warp_affine/opr_impl.h b/dnn/src/naive/warp_affine/opr_impl.h index a7ce86de..25b0e67b 100644 --- a/dnn/src/naive/warp_affine/opr_impl.h +++ b/dnn/src/naive/warp_affine/opr_impl.h @@ -53,7 +53,7 @@ public: ret.ow = dst.layout.shape[2]; } if (src.layout.dtype.enumv() == DTypeEnum::Float32 || - MEGDNN_FLOAT16_SELECT( + DNN_FLOAT16_SELECT( src.layout.dtype.enumv() == DTypeEnum::Float16, false) || src.layout.dtype.enumv() == DTypeEnum::Int8 || diff --git a/dnn/src/naive/warp_affine/warp_affine_cv.cpp b/dnn/src/naive/warp_affine/warp_affine_cv.cpp index 3ab1f4e4..d16f15e4 100644 --- a/dnn/src/naive/warp_affine/warp_affine_cv.cpp +++ b/dnn/src/naive/warp_affine/warp_affine_cv.cpp @@ -213,7 +213,7 @@ void megdnn::naive::warp_affine_cv_exec(_megdnn_tensor_in src, DISPATCH_IMODE(imode, bmode, ch, cb) #undef cb } else { - megdnn_throw(megdnn_mangle("Unsupported datatype of WarpAffine optr.")); + megdnn_throw("Unsupported datatype of WarpAffine optr."); } } diff --git a/dnn/src/naive/warp_perspective/opr_impl.cpp b/dnn/src/naive/warp_perspective/opr_impl.cpp index 701562d7..c68b25e4 100644 --- a/dnn/src/naive/warp_perspective/opr_impl.cpp +++ b/dnn/src/naive/warp_perspective/opr_impl.cpp @@ -467,9 +467,9 @@ void WarpPerspectiveForwardImpl::exec(_megdnn_tensor_in src, DISPATCH_ST(dtype::Quantized8Asymm, uint8_t, float, KERN_CD4); DISPATCH_ST(dtype::QuantizedS8, int8_t, float, KERN_CD4); - MEGDNN_INC_FLOAT16( + DNN_INC_FLOAT16( DISPATCH_ST_MT(dtype::Float16, dt_float16, KERN_CD4)); - MEGDNN_INC_FLOAT16( + DNN_INC_FLOAT16( DISPATCH_ST_MT(dtype::BFloat16, dt_bfloat16, KERN_CD4)); megdnn_throw(ssprintf("Unsupported input DType in " "WarpPerspective: %s", @@ -560,8 +560,8 @@ void WarpPerspectiveForwardImpl::exec(_megdnn_tensor_in src, DISPATCH_ST(dtype::Uint8, uint8_t, float, KERN); DISPATCH_ST(dtype::Quantized8Asymm, uint8_t, float, KERN); - MEGDNN_INC_FLOAT16(DISPATCH_ST_MT(dtype::Float16, dt_float16, KERN)); - MEGDNN_INC_FLOAT16(DISPATCH_ST_MT(dtype::BFloat16, dt_bfloat16, KERN)); + DNN_INC_FLOAT16(DISPATCH_ST_MT(dtype::Float16, dt_float16, KERN)); + DNN_INC_FLOAT16(DISPATCH_ST_MT(dtype::BFloat16, dt_bfloat16, KERN)); megdnn_throw(ssprintf("Unsupported input DType in " "WarpPerspective: %s", src.layout.dtype.name()) @@ -660,7 +660,7 @@ void WarpPerspectiveBackwardDataImpl::exec(_megdnn_tensor_in mat, } \ } DISPATCH_ST_MT(dtype::Float32, dt_float32); - MEGDNN_INC_FLOAT16(DISPATCH_ST_MT(dtype::BFloat16, dt_bfloat16)); + DNN_INC_FLOAT16(DISPATCH_ST_MT(dtype::BFloat16, dt_bfloat16)); megdnn_throw(ssprintf("Unsupported input DType in " "WarpPerspective: %s", diff.layout.dtype.name()) @@ -801,7 +801,7 @@ void WarpPerspectiveBackwardMatImpl::exec(_megdnn_tensor_in src, } \ } DISPATCH_ST_MT(dtype::Float32, dt_float32); - MEGDNN_INC_FLOAT16(DISPATCH_ST_MT(dtype::BFloat16, dt_bfloat16)); + DNN_INC_FLOAT16(DISPATCH_ST_MT(dtype::BFloat16, dt_bfloat16)); megdnn_throw(ssprintf("Unsupported input DType in " "WarpPerspective: %s", diff.layout.dtype.name()) diff --git a/dnn/src/naive/warp_perspective/opr_impl.h b/dnn/src/naive/warp_perspective/opr_impl.h index 01967b26..a3a97d88 100644 --- a/dnn/src/naive/warp_perspective/opr_impl.h +++ b/dnn/src/naive/warp_perspective/opr_impl.h @@ -88,7 +88,7 @@ protected: ret.ow = dst.layout.shape[3]; } if ((src.layout.dtype.enumv() == DTypeEnum::Float32 || - MEGDNN_FLOAT16_SELECT( + DNN_FLOAT16_SELECT( (src.layout.dtype.enumv() == DTypeEnum::Float16 || src.layout.dtype.enumv() == DTypeEnum::BFloat16), false) || diff --git a/dnn/src/naive/warp_perspective/warp_perspective_cv.cpp b/dnn/src/naive/warp_perspective/warp_perspective_cv.cpp index eae9af10..251f9ebd 100644 --- a/dnn/src/naive/warp_perspective/warp_perspective_cv.cpp +++ b/dnn/src/naive/warp_perspective/warp_perspective_cv.cpp @@ -231,8 +231,7 @@ void megdnn::naive::warp_perspective_cv_exec( DISPATCH_IMODE(imode, bmode, ch, cb) #undef cb } else { - megdnn_throw( - megdnn_mangle("Unsupported datatype of WarpPerspective optr.")); + megdnn_throw("Unsupported datatype of WarpPerspective optr."); } } diff --git a/dnn/src/rocm/add_update/opr_impl.cpp b/dnn/src/rocm/add_update/opr_impl.cpp index 588ff758..bc4d56ae 100644 --- a/dnn/src/rocm/add_update/opr_impl.cpp +++ b/dnn/src/rocm/add_update/opr_impl.cpp @@ -40,7 +40,7 @@ void AddUpdateForwardImpl::exec(_megdnn_tensor_inout dest, #undef cb default: - megdnn_throw(megdnn_mangle("unsupported dtype for AddUpdate")); + megdnn_throw("unsupported dtype for AddUpdate"); } } @@ -59,7 +59,7 @@ void AddUpdateForwardImpl::exec_noncontig(_megdnn_tensor_inout dest, #undef cb default: - megdnn_throw(megdnn_mangle("unsupported dtype for AddUpdate")); + megdnn_throw("unsupported dtype for AddUpdate"); } } diff --git a/dnn/src/rocm/argmxx/opr_impl.cpp b/dnn/src/rocm/argmxx/opr_impl.cpp index 85e94b1d..992406f7 100644 --- a/dnn/src/rocm/argmxx/opr_impl.cpp +++ b/dnn/src/rocm/argmxx/opr_impl.cpp @@ -83,8 +83,7 @@ void ArgmaxForwardImpl::exec(_megdnn_tensor_in src, } MEGDNN_FOREACH_COMPUTING_DTYPE_FLOAT(cb) #undef cb - megdnn_throw(megdnn_mangle(ssprintf("Unsupported DType: %s", - src.layout.dtype.name()))); + megdnn_throw(ssprintf("Unsupported DType: %s", src.layout.dtype.name())); } size_t ArgminForwardImpl::get_workspace_in_bytes(const TensorLayout &src, @@ -119,8 +118,7 @@ void ArgminForwardImpl::exec(_megdnn_tensor_in src, } MEGDNN_FOREACH_COMPUTING_DTYPE_FLOAT(cb) #undef cb - megdnn_throw(megdnn_mangle(ssprintf("Unsupported DType: %s", - src.layout.dtype.name()))); + megdnn_throw(ssprintf("Unsupported DType: %s", src.layout.dtype.name())); } } // namespace rocm diff --git a/dnn/src/rocm/batch_normalization/opr_impl.cpp b/dnn/src/rocm/batch_normalization/opr_impl.cpp index f23ebb27..90f9213b 100644 --- a/dnn/src/rocm/batch_normalization/opr_impl.cpp +++ b/dnn/src/rocm/batch_normalization/opr_impl.cpp @@ -36,8 +36,7 @@ void BNTensorDescHolder::setup(const TensorLayout& x, bn_mode = miopenBNSpatial; break; default: - megdnn_throw(megdnn_mangle( - "Unknown param dim type of batch normalization.")); + megdnn_throw("Unknown param dim type of batch normalization."); } xy_desc.set(TensorLayout(xy_shape, x.dtype)); param_desc.set(xy_desc.desc, bn_mode); @@ -82,8 +81,7 @@ void BNForwardImpl::exec(_megdnn_tensor_in src, _megdnn_tensor_in bn_scale, m_param.epsilon)); break; default: - megdnn_throw(megdnn_mangle( - "Unknown forward mode type of batch normalization.")); + megdnn_throw("Unknown forward mode type of batch normalization."); } } diff --git a/dnn/src/rocm/batched_matrix_mul/algos.cpp b/dnn/src/rocm/batched_matrix_mul/algos.cpp index 4bb75e5c..6bec8e65 100644 --- a/dnn/src/rocm/batched_matrix_mul/algos.cpp +++ b/dnn/src/rocm/batched_matrix_mul/algos.cpp @@ -49,11 +49,11 @@ std::string BatchedMatrixMulForwardImpl::AlgoBase::SizeArgs::to_string() const { MEGDNN_MARK_USED_VAR(m); MEGDNN_MARK_USED_VAR(n); MEGDNN_MARK_USED_VAR(k); - return megdnn_mangle(ssprintf( + return ssprintf( "A={%zux%zu},B={%zux%zu},C={%zux%zu},Transpose A=%d,Transpose " "B=%d,ldA=%zu,ldB=%zu,ldC=%zu", m, k, k, n, m, n, param.transposeA, param.transposeB, - layout_a.stride[0], layout_b.stride[0], layout_c.stride[0])); + layout_a.stride[0], layout_b.stride[0], layout_c.stride[0]); } // vim: syntax=cpp.doxygen diff --git a/dnn/src/rocm/convolution/backward_data/algo.cpp b/dnn/src/rocm/convolution/backward_data/algo.cpp index 9b131bb4..6f0ba595 100644 --- a/dnn/src/rocm/convolution/backward_data/algo.cpp +++ b/dnn/src/rocm/convolution/backward_data/algo.cpp @@ -59,14 +59,14 @@ ConvolutionBackwardDataImpl::AlgoBase::ExecArgs::ExecArgs( std::string ConvolutionBackwardDataImpl::AlgoBase::SizeArgs::to_string() const { auto&& fm = filter_meta; MEGDNN_MARK_USED_VAR(fm); - return megdnn_mangle(ssprintf( + return ssprintf( "filter=%u{%u,%u,%u,%u}, diff=%s, grad=%s, " "pad=%ux%u, stride=%ux%u, dilate=%ux%u, xcorr=%d, dtype=%s,%s", fm.group, fm.ocpg, fm.icpg, fm.spatial[0], fm.spatial[1], diff_layout->to_string().c_str(), grad_layout->to_string().c_str(), fm.padding[0], fm.padding[1], fm.stride[0], fm.stride[1], fm.dilation[0], fm.dilation[1], !fm.should_flip, - diff_layout->dtype.name(), grad_layout->dtype.name())); + diff_layout->dtype.name(), grad_layout->dtype.name()); } convolution::MIOpenCacheKey diff --git a/dnn/src/rocm/convolution/backward_filter/algo.cpp b/dnn/src/rocm/convolution/backward_filter/algo.cpp index 5265ca4a..304913e0 100644 --- a/dnn/src/rocm/convolution/backward_filter/algo.cpp +++ b/dnn/src/rocm/convolution/backward_filter/algo.cpp @@ -62,14 +62,14 @@ std::string ConvolutionBackwardFilterImpl::AlgoBase::SizeArgs::to_string() const { auto&& fm = grad_filter_meta; MEGDNN_MARK_USED_VAR(fm); - return megdnn_mangle(ssprintf( + return ssprintf( "src=%s diff=%s grad_filter=%u{%u,%u,%u,%u}, " "pad=%ux%u, stride=%ux%u, dilate=%ux%u, xcorr=%d, dtype=%s,%s", src_layout->to_string().c_str(), diff_layout->to_string().c_str(), fm.group, fm.ocpg, fm.icpg, fm.spatial[0], fm.spatial[1], fm.padding[0], fm.padding[1], fm.stride[0], fm.stride[1], fm.dilation[0], fm.dilation[1], !fm.should_flip, - src_layout->dtype.name(), diff_layout->dtype.name())); + src_layout->dtype.name(), diff_layout->dtype.name()); } convolution::MIOpenCacheKey diff --git a/dnn/src/rocm/convolution/forward/algo.cpp b/dnn/src/rocm/convolution/forward/algo.cpp index d45d0c25..2aed24f4 100644 --- a/dnn/src/rocm/convolution/forward/algo.cpp +++ b/dnn/src/rocm/convolution/forward/algo.cpp @@ -65,14 +65,14 @@ ConvolutionForwardImpl::AlgoBase::ExecArgs::ExecArgs( std::string ConvolutionForwardImpl::AlgoBase::SizeArgs::to_string() const { auto&& fm = filter_meta; MEGDNN_MARK_USED_VAR(fm); - return megdnn_mangle(ssprintf( + return ssprintf( "src=%s, filter=%u{%u,%u,%u,%u}, dst=%s, " "pad=%ux%u, stride=%ux%u, dilate=%ux%u, xcorr=%d, dtype=%s,%s", src_layout->to_string().c_str(), fm.group, fm.ocpg, fm.icpg, fm.spatial[0], fm.spatial[1], dst_layout->to_string().c_str(), fm.padding[0], fm.padding[1], fm.stride[0], fm.stride[1], fm.dilation[0], fm.dilation[1], !fm.should_flip, - src_layout->dtype.name(), dst_layout->dtype.name())); + src_layout->dtype.name(), dst_layout->dtype.name()); } convolution::MIOpenCacheKey diff --git a/dnn/src/rocm/indexing_multi_axis_vec/opr_impl.cpp b/dnn/src/rocm/indexing_multi_axis_vec/opr_impl.cpp index 68605b0f..4a5e688e 100644 --- a/dnn/src/rocm/indexing_multi_axis_vec/opr_impl.cpp +++ b/dnn/src/rocm/indexing_multi_axis_vec/opr_impl.cpp @@ -199,9 +199,9 @@ size_t IndexingIncrMultiAxisVecImpl::get_workspace_in_bytes( void IndexingIncrMultiAxisVecImpl::exec( _megdnn_tensor_inout data, _megdnn_tensor_in value, const IndexDesc &index, _megdnn_workspace workspace) { - MEGDNN_INC_FLOAT16( + DNN_INC_FLOAT16( megdnn_assert(data.layout.dtype != dtype::Float16(), - "float16 incr on hip currently not supported")); + "float16 incr on hip currently not supported")); auto info = check_exec(data.layout, value.layout, index, workspace.size); info.error_tracker = m_error_tracker; info.error_info = async_error_info(handle()); diff --git a/dnn/src/rocm/indexing_one_hot/opr_impl.cpp b/dnn/src/rocm/indexing_one_hot/opr_impl.cpp index 57d9af45..3c1cf5df 100644 --- a/dnn/src/rocm/indexing_one_hot/opr_impl.cpp +++ b/dnn/src/rocm/indexing_one_hot/opr_impl.cpp @@ -54,7 +54,7 @@ void IndexingOneHotForwardImpl::exec( switch (src.layout.dtype.enumv()) { MEGDNN_FOREACH_COMPUTING_DTYPE(cb) default: - megdnn_throw(megdnn_mangle("bad dtype")); + megdnn_throw("bad dtype"); } #undef cb } @@ -81,7 +81,7 @@ void IndexingSetOneHotForwardImpl::exec( switch (data.layout.dtype.enumv()) { MEGDNN_FOREACH_COMPUTING_DTYPE(cb) default: - megdnn_throw(megdnn_mangle("bad dtype")); + megdnn_throw("bad dtype"); } #undef cb } diff --git a/dnn/src/rocm/matrix_mul/algos.cpp b/dnn/src/rocm/matrix_mul/algos.cpp index aa0076e8..5eb45ad6 100644 --- a/dnn/src/rocm/matrix_mul/algos.cpp +++ b/dnn/src/rocm/matrix_mul/algos.cpp @@ -52,11 +52,11 @@ std::string MatrixMulForwardImpl::AlgoBase::SizeArgs::to_string() const { MEGDNN_MARK_USED_VAR(m); MEGDNN_MARK_USED_VAR(n); MEGDNN_MARK_USED_VAR(k); - return megdnn_mangle(ssprintf( + return ssprintf( "A={%zux%zu},B={%zux%zu},C={%zux%zu},Transpose A=%d,Transpose " "B=%d,ldA=%zu,ldB=%zu,ldC=%zu", m, k, k, n, m, n, param.transposeA, param.transposeB, - layout_a.stride[0], layout_b.stride[0], layout_c.stride[0])); + layout_a.stride[0], layout_b.stride[0], layout_c.stride[0]); } // vim: syntax=cpp.doxygen diff --git a/dnn/src/rocm/miopen_wrapper.cpp b/dnn/src/rocm/miopen_wrapper.cpp index 7f488632..2d4249f0 100644 --- a/dnn/src/rocm/miopen_wrapper.cpp +++ b/dnn/src/rocm/miopen_wrapper.cpp @@ -37,8 +37,7 @@ miopenDataType_t to_miopen_dtype(DType type, case DTypeEnum::Int8: return miopenInt8; default: - megdnn_throw( - megdnn_mangle("dtype must be float16/float32/int8/int32")); + megdnn_throw("dtype must be float16/float32/int8/int32"); } } } // namespace @@ -84,8 +83,7 @@ void ConvDesc::set(const param::Convolution& param, const size_t nr_group, mode = is_depthwise ? miopenDepthwise : miopenGroupConv; } } else { - megdnn_throw(megdnn_mangle( - "for now, miopen do not support non xcorr convolution")); + megdnn_throw("fornow, miopen do not support non xcorr convolution"); } miopen_check(miopenInitConvolutionDescriptor( @@ -119,7 +117,7 @@ void PoolingDesc::set(const param::Pooling& param) { mode = miopenPoolingAverageInclusive; break; default: - megdnn_throw(megdnn_mangle("Unsupported pooling mode for miopen")); + megdnn_throw("Unsupported pooling mode for miopen"); } miopen_check(miopenSet2dPoolingDescriptor( desc, mode, param.window_h, param.window_w, param.pad_h, diff --git a/dnn/src/x86/conv_bias/f32/algos.cpp b/dnn/src/x86/conv_bias/f32/algos.cpp index 543147f0..714e66d2 100644 --- a/dnn/src/x86/conv_bias/f32/algos.cpp +++ b/dnn/src/x86/conv_bias/f32/algos.cpp @@ -187,15 +187,15 @@ void ConvBiasImpl::AlgoDirect::copy_padding_kern( } } }; -#define DISPATCH \ - if (is_supported(SIMDType::FMA)) { \ - DISPATCH_SIMD(fma) \ - } else if (is_supported(SIMDType::AVX)) { \ - DISPATCH_SIMD(avx) \ - } else if (is_supported(SIMDType::SSE)) { \ - DISPATCH_SIMD(sse) \ - } else { \ - megdnn_throw(megdnn_mangle("no fma/avx/sse detected")); \ +#define DISPATCH \ + if (is_supported(SIMDType::FMA)) { \ + DISPATCH_SIMD(fma) \ + } else if (is_supported(SIMDType::AVX)) { \ + DISPATCH_SIMD(avx) \ + } else if (is_supported(SIMDType::SSE)) { \ + DISPATCH_SIMD(sse) \ + } else { \ + megdnn_throw("no fma/avx/sse detected"); \ } #define DISPATCH_SIMD(simd) \ @@ -205,31 +205,31 @@ void ConvBiasImpl::AlgoDirect::copy_padding_kern( DISPATCH_SIMD_MODE(simd, conv) \ } -#define DISPATCH_SIMD_MODE(simd, mode) \ - switch (FH) { \ - case 1: \ - DISPATCH_SIMD_MODE_FSIZE(simd, mode, 1); \ - break; \ - case 2: \ - DISPATCH_SIMD_MODE_FSIZE(simd, mode, 2); \ - break; \ - case 3: \ - DISPATCH_SIMD_MODE_FSIZE(simd, mode, 3); \ - break; \ - case 4: \ - DISPATCH_SIMD_MODE_FSIZE(simd, mode, 4); \ - break; \ - case 5: \ - DISPATCH_SIMD_MODE_FSIZE(simd, mode, 5); \ - break; \ - case 6: \ - DISPATCH_SIMD_MODE_FSIZE(simd, mode, 6); \ - break; \ - case 7: \ - DISPATCH_SIMD_MODE_FSIZE(simd, mode, 7); \ - break; \ - default: \ - megdnn_throw(megdnn_mangle("unsupported filter size")); \ +#define DISPATCH_SIMD_MODE(simd, mode) \ + switch (FH) { \ + case 1: \ + DISPATCH_SIMD_MODE_FSIZE(simd, mode, 1); \ + break; \ + case 2: \ + DISPATCH_SIMD_MODE_FSIZE(simd, mode, 2); \ + break; \ + case 3: \ + DISPATCH_SIMD_MODE_FSIZE(simd, mode, 3); \ + break; \ + case 4: \ + DISPATCH_SIMD_MODE_FSIZE(simd, mode, 4); \ + break; \ + case 5: \ + DISPATCH_SIMD_MODE_FSIZE(simd, mode, 5); \ + break; \ + case 6: \ + DISPATCH_SIMD_MODE_FSIZE(simd, mode, 6); \ + break; \ + case 7: \ + DISPATCH_SIMD_MODE_FSIZE(simd, mode, 7); \ + break; \ + default: \ + megdnn_throw("unsupported filter size"); \ } #define DISPATCH_SIMD_MODE_FSIZE(simd, mode, fsize) \ diff --git a/dnn/src/x86/elemwise/opr_impl.cpp b/dnn/src/x86/elemwise/opr_impl.cpp index 8df7155d..f337c1ae 100644 --- a/dnn/src/x86/elemwise/opr_impl.cpp +++ b/dnn/src/x86/elemwise/opr_impl.cpp @@ -29,9 +29,6 @@ void check_mkl_error(const char* func) { MEGDNN_MARK_USED_VAR(func); int err = vmlClearErrStatus(); if (err != VML_STATUS_OK) { -#if MEGDNN_ENABLE_MANGLING - megdnn_throw("mkl error"); -#else const char* name; switch (err) { #define ON(x) \ @@ -51,10 +48,9 @@ void check_mkl_error(const char* func) { } MEGDNN_MARK_USED_VAR(name); megdnn_throw( - ssprintf("MKL func %s reported error: code=%d(%s); " - "possibly due to input data corruption.", + ssprintf("MKL func %s reported error: code=%d(%s);possibly due " + "to input data corruption.", func, err, name)); -#endif } } #endif diff --git a/dnn/src/x86/local/opr_impl.cpp b/dnn/src/x86/local/opr_impl.cpp index 8b323ca0..7ba1fef9 100644 --- a/dnn/src/x86/local/opr_impl.cpp +++ b/dnn/src/x86/local/opr_impl.cpp @@ -41,7 +41,7 @@ LocalImpl::dispatch_float_noncontig_batch( } else if (is_supported(SIMDType::SSE)) { return local_xcorr_SSE; } else { - megdnn_throw(megdnn_mangle("no fma/avx/sse detected")); + megdnn_throw("no fma/avx/sse detected"); } } else { if (is_supported(SIMDType::FMA)) { @@ -51,7 +51,7 @@ LocalImpl::dispatch_float_noncontig_batch( } else if (is_supported(SIMDType::SSE)) { return local_conv_SSE; } else { - megdnn_throw(megdnn_mangle("no fma/avx/sse detected")); + megdnn_throw("no fma/avx/sse detected"); } } } diff --git a/dnn/src/x86/lrn/opr_impl.cpp b/dnn/src/x86/lrn/opr_impl.cpp index 98f37951..7928fe00 100644 --- a/dnn/src/x86/lrn/opr_impl.cpp +++ b/dnn/src/x86/lrn/opr_impl.cpp @@ -114,7 +114,7 @@ void LRNImpl::exec(_megdnn_tensor_in src, } else if (is_supported(SIMDType::SSE)) { f = &lrn_single_instance; } else { - megdnn_throw(megdnn_mangle("no fma/avx/sse detected")); + megdnn_throw("no fma/avx/sse detected"); } auto n = param().n; auto k = param().k; diff --git a/dnn/src/x86/resize/resize_cv.cpp b/dnn/src/x86/resize/resize_cv.cpp index e18770a1..7f6c6c2c 100644 --- a/dnn/src/x86/resize/resize_cv.cpp +++ b/dnn/src/x86/resize/resize_cv.cpp @@ -2413,7 +2413,7 @@ void megdnn::x86::resize_cv_exec(_megdnn_tensor_in src, break; } } else { - megdnn_throw(megdnn_mangle("Unsupported datatype of resize optr.")); + megdnn_throw("Unsupported datatype of resize optr."); } } } diff --git a/dnn/src/x86/separable_filter/opr_impl.cpp b/dnn/src/x86/separable_filter/opr_impl.cpp index 36fb6a71..2050c31d 100644 --- a/dnn/src/x86/separable_filter/opr_impl.cpp +++ b/dnn/src/x86/separable_filter/opr_impl.cpp @@ -126,8 +126,7 @@ void SeparableFilterImpl::exec(_megdnn_tensor_in src, MEGDNN_DISPATCH_CPU_KERN_OPR( separable_filter_exec_8u(src, filter_x, filter_y, dst)); } else { - megdnn_throw( - megdnn_mangle("Unsupported datatype of SeparableFilter opr.")); + megdnn_throw("Unsupported datatype of SeparableFilter opr."); }; } diff --git a/dnn/src/x86/utils.cpp b/dnn/src/x86/utils.cpp index 790c9ffc..356ab154 100644 --- a/dnn/src/x86/utils.cpp +++ b/dnn/src/x86/utils.cpp @@ -201,7 +201,7 @@ bool x86::is_supported(SIMDType type) { default: break; } - megdnn_throw(megdnn_mangle("unknown cpu feature")); + megdnn_throw("unknown cpu feature"); } void x86::disable_simd_type(SIMDType type) { diff --git a/dnn/src/x86/warp_affine/warp_affine_cv.cpp b/dnn/src/x86/warp_affine/warp_affine_cv.cpp index f60d12a4..e37a1f7d 100644 --- a/dnn/src/x86/warp_affine/warp_affine_cv.cpp +++ b/dnn/src/x86/warp_affine/warp_affine_cv.cpp @@ -297,7 +297,7 @@ void megdnn::x86::warp_affine_cv_exec(_megdnn_tensor_in src, DISPATCH_IMODE(imode, bmode, ch, cb) #undef cb } else { - megdnn_throw(megdnn_mangle("Unsupported datatype of WarpAffine optr.")); + megdnn_throw("Unsupported datatype of WarpAffine optr."); } } diff --git a/dnn/src/x86/warp_perspective/warp_perspective_cv.cpp b/dnn/src/x86/warp_perspective/warp_perspective_cv.cpp index 9e032ad0..bde3ab32 100644 --- a/dnn/src/x86/warp_perspective/warp_perspective_cv.cpp +++ b/dnn/src/x86/warp_perspective/warp_perspective_cv.cpp @@ -234,7 +234,7 @@ void megdnn::x86::warp_perspective_cv_exec( DISPATCH_IMODE(imode, bmode, ch, cb) #undef cb } else { - megdnn_throw(megdnn_mangle("Unsupported datatype of WarpAffine optr.")); + megdnn_throw("Unsupported datatype of WarpAffine optr."); } } diff --git a/dnn/test/common/elemwise.cpp b/dnn/test/common/elemwise.cpp index fcc06005..8a272a73 100644 --- a/dnn/test/common/elemwise.cpp +++ b/dnn/test/common/elemwise.cpp @@ -914,7 +914,7 @@ DEF_TEST(all_modes) { run(dtype::Int32{}); } if (trait.allow_float) { - MEGDNN_FLOAT16_SELECT( + DNN_FLOAT16_SELECT( run(dtype::Float16{}, mode == Mode::FAST_TANH_GRAD ? 0.5 : 0.05), ); run(dtype::Float32{}); diff --git a/dnn/test/common/topk.cpp b/dnn/test/common/topk.cpp index e9991830..e785c3a9 100644 --- a/dnn/test/common/topk.cpp +++ b/dnn/test/common/topk.cpp @@ -45,7 +45,7 @@ public: switch (tensor.layout.dtype.enumv()) { CASE(Float32, float); CASE(Int32, int); - MEGDNN_INC_FLOAT16(CASE(Float16, half_float::half)); + DNN_INC_FLOAT16(CASE(Float16, half_float::half)); default: megdnn_throw("bad dtype"); } @@ -193,7 +193,7 @@ namespace test { INST(dtype::Float32); INST(dtype::Int32); -MEGDNN_INC_FLOAT16(INST(dtype::Float16)); +DNN_INC_FLOAT16(INST(dtype::Float16)); #undef INST } } // namespace megdnn diff --git a/dnn/test/naive/rng.cpp b/dnn/test/naive/rng.cpp index a5240eca..6b837378 100644 --- a/dnn/test/naive/rng.cpp +++ b/dnn/test/naive/rng.cpp @@ -63,7 +63,7 @@ TEST_F(NAIVE, UNIFORM_RNG_F32) { } TEST_F(NAIVE, UNIFORM_RNG_F16) { - MEGDNN_INC_FLOAT16(run_uniform(handle())); + DNN_INC_FLOAT16(run_uniform(handle())); } TEST_F(NAIVE, GAUSSIAN_RNG_F32) { @@ -71,7 +71,7 @@ TEST_F(NAIVE, GAUSSIAN_RNG_F32) { } TEST_F(NAIVE, GAUSSIAN_RNG_F16) { - MEGDNN_INC_FLOAT16(run_gaussian(handle())); + DNN_INC_FLOAT16(run_gaussian(handle())); } } // namespace test diff --git a/dnn/test/rocm/argmxx.cpp b/dnn/test/rocm/argmxx.cpp index 9dcf1d6f..b329adc7 100644 --- a/dnn/test/rocm/argmxx.cpp +++ b/dnn/test/rocm/argmxx.cpp @@ -37,8 +37,8 @@ class ArgmxxRNG final: public RNG { } MEGDNN_FOREACH_COMPUTING_DTYPE_FLOAT(cb); #undef cb - megdnn_throw(megdnn_mangle(ssprintf("Unsupported DType: %s", - tensor.layout.dtype.name()))); + megdnn_throw(ssprintf("Unsupported DType: %s", + tensor.layout.dtype.name())); } }; diff --git a/dnn/test/rocm/batched_matrix_mul.cpp b/dnn/test/rocm/batched_matrix_mul.cpp index fe97718a..f222986e 100644 --- a/dnn/test/rocm/batched_matrix_mul.cpp +++ b/dnn/test/rocm/batched_matrix_mul.cpp @@ -22,7 +22,7 @@ TEST_F(ROCM, BATCHED_MATRIX_MUL) { checker.set_epsilon(1e-2); using Param = MatrixMul::Param; size_t b = 9, m = 10, n = 11, k = 12; - std::vector dtypes{MEGDNN_INC_FLOAT16(dtype::Float16() MEGDNN_COMMA) + std::vector dtypes{DNN_INC_FLOAT16(dtype::Float16() MEGDNN_COMMA) dtype::Float32()}; for (auto dtype : dtypes) for (unsigned mask = 0; mask < 4; ++mask) { diff --git a/dnn/test/rocm/eye.cpp b/dnn/test/rocm/eye.cpp index 7278e971..20223341 100644 --- a/dnn/test/rocm/eye.cpp +++ b/dnn/test/rocm/eye.cpp @@ -23,7 +23,7 @@ TEST_F(ROCM, EYE) { Checker checker(handle_rocm()); for (DType dtype: std::vector{ - MEGDNN_INC_FLOAT16(dtype::Float16() MEGDNN_COMMA) dtype::Int32(), dtype::Float32()}) + DNN_INC_FLOAT16(dtype::Float16() MEGDNN_COMMA) dtype::Int32(), dtype::Float32()}) for (int k = -20; k < 20; ++k) { checker.set_param({k, dtype.enumv()}); checker.set_dtype(0, dtype); diff --git a/dnn/test/rocm/matrix_mul.cpp b/dnn/test/rocm/matrix_mul.cpp index a40d1686..51bd9055 100644 --- a/dnn/test/rocm/matrix_mul.cpp +++ b/dnn/test/rocm/matrix_mul.cpp @@ -24,7 +24,7 @@ TEST_F(ROCM, MATRIX_MUL) { using Param = MatrixMul::Param; size_t m = 12, n = 16, k = 20; //! result error for Int8x8x32, not test correctness - std::vector dtypes{MEGDNN_INC_FLOAT16(dtype::Float16() MEGDNN_COMMA) + std::vector dtypes{DNN_INC_FLOAT16(dtype::Float16() MEGDNN_COMMA) dtype::Float32()/*, dtype::Int32()*/}; for (auto dtype : dtypes) { for (unsigned mask = 0; mask < 4; ++mask) { @@ -45,10 +45,10 @@ TEST_F(ROCM, MATRIX_MUL) { .set_dtype(0, stype) .set_dtype(1, stype) .set_dtype(2, dtype) - .set_epsilon(MEGDNN_FLOAT16_SELECT( - dtype == dtype::Float16(), false) - ? 5e-2 - : 5e-3) + .set_epsilon( + DNN_FLOAT16_SELECT(dtype == dtype::Float16(), false) + ? 5e-2 + : 5e-3) .execs({A, B, {}}); } } diff --git a/dnn/test/rocm/pooling.cpp b/dnn/test/rocm/pooling.cpp index 972d48ef..ca400e1a 100644 --- a/dnn/test/rocm/pooling.cpp +++ b/dnn/test/rocm/pooling.cpp @@ -25,7 +25,7 @@ namespace test { TEST_F(ROCM, POOLING_FORWARD) { auto args = pooling::get_args(); using Format = param::Pooling::Format; - std::vector dtypes{MEGDNN_INC_FLOAT16(dtype::Float16() MEGDNN_COMMA) + std::vector dtypes{DNN_INC_FLOAT16(dtype::Float16() MEGDNN_COMMA) dtype::Float32()}; for (auto dtype : dtypes) for (auto format : {Format::NCHW}) diff --git a/dnn/test/rocm/reduce.cpp b/dnn/test/rocm/reduce.cpp index 6337ae80..4893aa15 100644 --- a/dnn/test/rocm/reduce.cpp +++ b/dnn/test/rocm/reduce.cpp @@ -55,7 +55,7 @@ TEST_F(ROCM, REDUCE) { Reduce::DataType data_type) { for (int32_t axis : {0, 1, 2, 3}) { if (data_type == Reduce::DataType::DEFAULT && - MEGDNN_FLOAT16_SELECT(src_dtype == dtype::Float16(), false)) { + DNN_FLOAT16_SELECT(src_dtype == dtype::Float16(), false)) { checker.set_epsilon(1e-2); } else { checker.set_epsilon(1e-3); @@ -72,7 +72,7 @@ TEST_F(ROCM, REDUCE) { for (auto mode : {Mode::SUM, Mode::MEAN, Mode::SUM_SQR, Mode::PRODUCT, Mode::MIN, Mode::MAX}) { for (auto dtype : std::vector{ - MEGDNN_INC_FLOAT16(dtype::Float16() MEGDNN_COMMA) + DNN_INC_FLOAT16(dtype::Float16() MEGDNN_COMMA) dtype::Float32(), dtype::Int32()}) { check(mode, dtype, dtype, Reduce::DataType::DEFAULT); diff --git a/dnn/test/rocm/type_cvt.cpp b/dnn/test/rocm/type_cvt.cpp index 0ca5eeb7..9c58402a 100644 --- a/dnn/test/rocm/type_cvt.cpp +++ b/dnn/test/rocm/type_cvt.cpp @@ -18,7 +18,7 @@ using namespace test; TEST_F(ROCM, TYPE_CVT) { UniformFloatRNG init(0, 20); - std::vector dtypes = {dtype::Float32(), MEGDNN_INC_FLOAT16(dtype::Float16() MEGDNN_COMMA) + std::vector dtypes = {dtype::Float32(), DNN_INC_FLOAT16(dtype::Float16() MEGDNN_COMMA) dtype::Int32(), dtype::Int16(), dtype::Int8(), dtype::Uint8()}; for (auto sdtype : dtypes) diff --git a/src/core/impl/common.cpp b/src/core/impl/common.cpp index 9ea94cb6..c79c3910 100644 --- a/src/core/impl/common.cpp +++ b/src/core/impl/common.cpp @@ -28,9 +28,21 @@ using namespace mgb; namespace { - LogLevel min_log_level; +LogLevel config_default_log_level() { + auto default_level = LogLevel::ERROR; + //! env to config LogLevel + //! DEBUG = 0, INFO = 1, WARN = 2, ERROR = 3, NO_LOG = 4 + //! for example , export MGE_OVERRIDE_LOG_LEVEL=0, means set LogLevel to + //! DEBUG + if (auto env = MGB_GETENV("MGE_OVERRIDE_LOG_LEVEL")) + default_level = static_cast(std::stoi(env)); + + return default_level; } +LogLevel min_log_level = config_default_log_level(); +} // namespace + #if MGB_ENABLE_LOGGING #if MGB_EXTERN_API_TIME @@ -226,11 +238,18 @@ namespace { #endif // MGB_ENABLE_LOGGING LogLevel mgb::set_log_level(LogLevel level) { + if (auto env = MGB_GETENV("MGE_OVERRIDE_LOG_LEVEL")) + level = static_cast(std::stoi(env)); + auto ret = min_log_level; min_log_level = level; return ret; } +LogLevel mgb::get_log_level() { + return min_log_level; +} + LogHandler mgb::set_log_handler(LogHandler handler) { auto ret = log_handler; log_handler = handler; diff --git a/src/core/impl/exception.cpp b/src/core/impl/exception.cpp index 9ec43b2c..2c3e42c3 100644 --- a/src/core/impl/exception.cpp +++ b/src/core/impl/exception.cpp @@ -20,11 +20,13 @@ namespace { class MegDNNErrorHandler final: public megdnn::ErrorHandler { static MegDNNErrorHandler inst; void do_on_megdnn_error(const std::string &msg) override { - mgb_throw_raw(MegDNNError{msg}); + mgb_throw_raw(MegDNNError{ + LogLevel::NO_LOG == get_log_level() ? "" : msg}); } void do_on_tensor_reshape_error(const std::string &msg) override { - mgb_throw_raw(TensorReshapeError{msg}); + mgb_throw_raw(TensorReshapeError{ + LogLevel::NO_LOG == get_log_level() ? "" : msg}); } public: diff --git a/src/core/include/megbrain/common.h b/src/core/include/megbrain/common.h index cb2781e3..04a59d52 100644 --- a/src/core/include/megbrain/common.h +++ b/src/core/include/megbrain/common.h @@ -134,7 +134,7 @@ void __assert_fail__() __attribute__((noreturn)); #endif // MGB_ASSERT_LOC /* ================ logging ================ */ -//! caused by need remve sensitive words at opt release +//! caused by need remve some words at opt release #if MGB_ENABLE_LOGGING #define mgb_log_debug(fmt...) \ _mgb_do_log(::mgb::LogLevel::DEBUG, __FILE__, __func__, __LINE__, fmt) @@ -145,16 +145,18 @@ void __assert_fail__() __attribute__((noreturn)); #define mgb_log_error(fmt...) \ _mgb_do_log(::mgb::LogLevel::ERROR, __FILE__, __func__, __LINE__, fmt) #else +#define LOC "about location info, please build with debug" #define mgb_log_debug(fmt...) \ - _mgb_do_log(::mgb::LogLevel::DEBUG, "", "", 1, fmt) + _mgb_do_log(::mgb::LogLevel::DEBUG, "", "", __LINE__, fmt) #define mgb_log(fmt...) \ - _mgb_do_log(::mgb::LogLevel::INFO, "", "", 1, fmt) + _mgb_do_log(::mgb::LogLevel::INFO, "", "", __LINE__, fmt) #define mgb_log_warn(fmt...) \ - _mgb_do_log(::mgb::LogLevel::WARN, "", "", 1, fmt) + _mgb_do_log(::mgb::LogLevel::WARN, "", "", __LINE__, fmt) #define mgb_log_error(fmt...) \ - _mgb_do_log(::mgb::LogLevel::ERROR, "", "", 1, fmt) + _mgb_do_log(::mgb::LogLevel::ERROR, LOC, "", __LINE__, fmt) +#undef LOC #endif -enum class LogLevel { DEBUG, INFO, WARN, ERROR }; +enum class LogLevel { DEBUG, INFO, WARN, ERROR, NO_LOG }; typedef void(*LogHandler)(LogLevel level, const char *file, const char *func, int line, const char *fmt, @@ -169,6 +171,13 @@ typedef void(*LogHandler)(LogLevel level, LogLevel set_log_level(LogLevel level); /*! + * \brief get logging level + * + * \return current log level + */ +LogLevel get_log_level(); + +/*! * \brief set callback for receiving log requests * \return previous log handler */ diff --git a/src/core/include/megbrain/dtype.h b/src/core/include/megbrain/dtype.h index 4df0ba6d..38191c6e 100644 --- a/src/core/include/megbrain/dtype.h +++ b/src/core/include/megbrain/dtype.h @@ -17,8 +17,8 @@ namespace mgb { using ::megdnn::dt_byte; -MEGDNN_INC_FLOAT16(using ::megdnn::dt_float16;) -MEGDNN_INC_FLOAT16(using ::megdnn::dt_bfloat16;) +DNN_INC_FLOAT16(using ::megdnn::dt_float16;) +DNN_INC_FLOAT16(using ::megdnn::dt_bfloat16;) using ::megdnn::dt_float32; using ::megdnn::dt_int8; using ::megdnn::dt_uint8; diff --git a/src/jit/impl/mlir/ir/types.h b/src/jit/impl/mlir/ir/types.h index 51f29fe2..ab121ee6 100644 --- a/src/jit/impl/mlir/ir/types.h +++ b/src/jit/impl/mlir/ir/types.h @@ -22,15 +22,15 @@ namespace mgb { namespace jit { -#define FOR_EACH_DNN_DTYPE(cb) \ - cb(Float32, dt_float32); \ - cb(Uint8, dt_uint8); \ - cb(Int8, dt_int8); \ - cb(Int16, dt_int16); \ - cb(Int32, dt_int32); \ - cb(Byte, dt_byte); \ - MEGDNN_INC_FLOAT16(cb(Float16, dt_float16)); \ - MEGDNN_INC_FLOAT16(cb(BFloat16, dt_bfloat16)); \ +#define FOR_EACH_DNN_DTYPE(cb) \ + cb(Float32, dt_float32); \ + cb(Uint8, dt_uint8); \ + cb(Int8, dt_int8); \ + cb(Int16, dt_int16); \ + cb(Int32, dt_int32); \ + cb(Byte, dt_byte); \ + DNN_INC_FLOAT16(cb(Float16, dt_float16)); \ + DNN_INC_FLOAT16(cb(BFloat16, dt_bfloat16)); \ cb(Bool, dt_bool); mlir::Type megdnn_dtype_to_mlir_type(megdnn::DType type, diff --git a/src/opr/impl/dnn/convolution.cpp b/src/opr/impl/dnn/convolution.cpp index 8a061dcc..64433b9e 100644 --- a/src/opr/impl/dnn/convolution.cpp +++ b/src/opr/impl/dnn/convolution.cpp @@ -877,8 +877,8 @@ void ConvBiasForward::check_winograd_param_valid( "winograd param, got %u", param.channel_block_size); } else { - mgb_assert((MEGDNN_FLOAT16_SELECT(dtype.enumv() == DTypeEnum::Float16, - false) || + mgb_assert((DNN_FLOAT16_SELECT(dtype.enumv() == DTypeEnum::Float16, + false) || dtype.enumv() == DTypeEnum::QuantizedS8 || dtype.enumv() == DTypeEnum::Quantized8Asymm) && (param.channel_block_size == 1 || diff --git a/src/serialization/test/extern_c_opr.cpp b/src/serialization/test/extern_c_opr.cpp index 98ffad2f..dc153ff8 100644 --- a/src/serialization/test/extern_c_opr.cpp +++ b/src/serialization/test/extern_c_opr.cpp @@ -101,7 +101,7 @@ class MGBOprDescImpl { for (size_t x = 0; x < i.shape.shape[0]; ++x) { output_p[x] = input_p[x] + bias; } - } else if (MEGDNN_FLOAT16_SELECT(out_dtype == MGB_DTYPE_FLOAT16, + } else if (DNN_FLOAT16_SELECT(out_dtype == MGB_DTYPE_FLOAT16, false)) { #if !MEGDNN_DISABLE_FLOAT16 auto output_p = static_cast(output[0].data);