Browse Source

chore(dnn/test): refactor megdnn arm_common test

GitOrigin-RevId: 4168910301
release-1.2
Megvii Engine Team 4 years ago
parent
commit
0560a218af
6 changed files with 2655 additions and 2392 deletions
  1. +190
    -2391
      dnn/test/arm_common/conv_bias_multi_thread.cpp
  2. +410
    -0
      dnn/test/arm_common/conv_bias_multi_thread_conv1x1.cpp
  3. +575
    -0
      dnn/test/arm_common/conv_bias_multi_thread_im2col.cpp
  4. +1240
    -0
      dnn/test/arm_common/conv_bias_multi_thread_weight_preprocess.cpp
  5. +193
    -0
      dnn/test/common/conv_bias.cpp
  6. +47
    -1
      dnn/test/common/conv_bias.h

+ 190
- 2391
dnn/test/arm_common/conv_bias_multi_thread.cpp
File diff suppressed because it is too large
View File


+ 410
- 0
dnn/test/arm_common/conv_bias_multi_thread_conv1x1.cpp View File

@@ -0,0 +1,410 @@
/**
* \file dnn/test/arm_common/conv_bias_multi_thread_conv1x1.cpp
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/
#include "megdnn/dtype.h"
#include "test/arm_common/fixture.h"
#include "test/common/benchmarker.h"
#include "test/common/conv_bias.h"

#include "test/arm_common/cpuinfo_help.h"

using namespace megdnn;
using namespace test;
using namespace conv_bias;

#ifdef __ARM_FEATURE_DOTPROD
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_CONV1x1_QUANTIZEDSYM_MK4_DOT) {
UniformIntRNG rng{-50, 50};

#define cb(name) \
checker_conv_bias_common( \
get_nchw44_conv_bias_args({1}, QUAN_NLMODE, ONLY_BR_BIASMODE, 1, \
true, false, true), \
handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
dtype::QuantizedS8(60.25f), name); \
checker_conv_bias_common( \
get_nchw44_conv_bias_args({1}, ONLY_IDENTITY_NLMODE, \
ONLY_NO_BIASMODE, 1, true, false, true), \
handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), {}, name); \
checker_conv_bias_common( \
get_nchw44_conv_bias_args({1}, ONLY_IDENTITY_NLMODE, \
ONLY_NO_BIASMODE, 1, true, false, true), \
handle(), &rng, epsilon, dtype::Int8(), dtype::Int8(), \
dtype::Int32(), {}, name);

float epsilon = 0.001;
#if MEGDNN_AARCH64
cb("CONV1x1:AARCH64_INT8X8X32_MK4_8X12X4_DOTPROD");
#elif MEGDNN_ARMV7
cb("CONV1x1:AARCH32_INT8_MK4_8X4X4_DOTPROD");
#endif
#undef cb
}
#endif

// clang-format on
/***************************** Conv1x1 Algo Test ***********************/
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_F32) {
using namespace conv_bias;
std::vector<conv_bias::TestArg> args = get_conv_bias_1x1_args(false, false);
#if MEGDNN_AARCH64
check_conv_bias(args, handle(), "CONV1x1:AARCH64_F32K8X12X1:24");
#elif MEGDNN_ARMV7
check_conv_bias(args, handle(), "CONV1x1:ARMV7_F32:48");
#endif
std::vector<conv_bias::TestArg> gemv_args;
for (auto&& arg : args)
if (arg.src.shape[2] == 1 && arg.src.shape[3] == 1) {
gemv_args.emplace_back(arg);
}
check_conv_bias(gemv_args, handle(), "CONV1x1_GEMV");
}

TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_MK4_PACK_F32) {
using namespace conv_bias;
std::vector<conv_bias::TestArg> args =
get_nchw44_conv_bias_args({1},FULL_NLMODE,ALL_BIASMODE, 1, true);
#if MEGDNN_AARCH64
check_conv_bias(args, handle(), "CONV1x1:AARCH64_F32_MK4_K8X12X1:24");
#elif MEGDNN_ARMV7
check_conv_bias(args, handle(), "CONV1x1:ARMV7_F32_MK4_PACK_4X12:24");
#endif
std::vector<conv_bias::TestArg> gemv_args;
for (auto&& arg : args)
if (arg.src.shape[2] == 1 && arg.src.shape[3] == 1) {
gemv_args.emplace_back(arg);
}
check_conv_bias(gemv_args, handle(), "CONV1x1_GEMV");
}

TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_MK4_NO_PACK_F32) {
using namespace conv_bias;
std::vector<conv_bias::TestArg> args =
get_nchw44_conv_bias_args({1},FULL_NLMODE,ALL_BIASMODE, 1, true);
std::vector<conv_bias::TestArg> args_of_4;
for (auto&& arg : args) {
if (arg.src.shape[2] * arg.src.shape[3] % 4 == 0) {
args_of_4.push_back(arg);
}
}
#if MEGDNN_AARCH64
check_conv_bias(args_of_4, handle(), "CONV1x1:AARCH64_F32_MK4_4x16:24");
#elif MEGDNN_ARMV7
check_conv_bias(args_of_4, handle(), "CONV1x1:ARMV7_F32_MK4_4x8:48");
#endif
}

#if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_F16) {
using namespace conv_bias;
std::vector<conv_bias::TestArg> args = get_conv_bias_1x1_args(false, false);
NormalRNG rng(1);
#if MEGDNN_AARCH64
checker_conv_bias_common(args, handle(), &rng, 0.03, dtype::Float16{},
dtype::Float16{}, dtype::Float16{}, dtype::Float16{},
"CONV1x1:AARCH64_F16_K8X24X1:48");
#elif MEGDNN_ARMV7
checker_conv_bias_common(args, handle(), &rng, 0.03, dtype::Float16{},
dtype::Float16{}, dtype::Float16{}, dtype::Float16{},
"CONV1x1:AARCH32_F16_K4X16X1:24");
#endif
std::vector<conv_bias::TestArg> gemv_args;
for (auto&& arg : args)
if (arg.src.shape[2] == 1 && arg.src.shape[3] == 1) {
gemv_args.emplace_back(arg);
}
check_conv_bias(gemv_args, handle(), "CONV1x1_GEMV");
}
#endif

TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_QUANTIZEDSYM) {
UniformIntRNG rng{-50, 50};
float epsilon = 0.001;
std::vector<conv_bias::TestArg> args =
get_conv_bias_1x1_args(false, false, true, true);
#define cb(name) \
checker_conv_bias_common( \
args, handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
dtype::QuantizedS8(60.25f), name);
#if MEGDNN_AARCH64
#if __ARM_FEATURE_DOTPROD
cb("CONV1x1:AARCH64_INT8X8X32_K8X12X4_DOTPROD:24");
#else
cb("CONV1x1:AARCH64_INT8X8X32_K8X8X8:24");
cb("CONV1x1:AARCH64_INT8X8X32_K4X4X16:48");
#endif
#elif MEGDNN_ARMV7
epsilon = 1;
cb("CONV1x1:ARMV7_INT8X8X32_K4X8X8:48");
#endif
#undef cb
std::vector<conv_bias::TestArg> gemv_args;
for (auto&& arg : args)
if (arg.src.shape[2] == 1 && arg.src.shape[3] == 1) {
gemv_args.emplace_back(arg);
}
checker_conv_bias_common(gemv_args, handle(), &rng, epsilon,
dtype::QuantizedS8(2.5f), dtype::QuantizedS8(2.5f),
dtype::QuantizedS32(6.25f),
dtype::QuantizedS8(60.25f), "CONV1x1_GEMV");
}

#if MEGDNN_AARCH64 || MEGDNN_ARMV7
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_QUANTIZEDASYM) {
UniformIntRNG rng{-50, 50};
std::vector<conv_bias::TestArg> args =
get_conv_bias_1x1_args(false, false, true, true);
#define cb(name) \
checker_conv_bias_common(args, handle(), &rng, epsilon, \
dtype::Quantized8Asymm(1.2f, (uint8_t)125), \
dtype::Quantized8Asymm(1.3f, (uint8_t)129), \
dtype::QuantizedS32(1.2 * 1.3), \
dtype::Quantized8Asymm(50.3f, (uint8_t)120), \
name);
float epsilon = 0.001;
#if MEGDNN_AARCH64
#if __ARM_FEATURE_DOTPROD
cb("CONV1x1:AARCH64_QUINT8_K8X8X4_DOTPROD:48");
#else
cb("CONV1x1:AARCH64_QUINT8_K8X8X8:24");
#endif
#elif MEGDNN_ARMV7
epsilon = 1;
cb("CONV1x1:ARMV7_QUINT8_K4X8X8:48");
#endif
#undef cb
std::vector<conv_bias::TestArg> gemv_args;
for (auto&& arg : args)
if (arg.src.shape[2] == 1 && arg.src.shape[3] == 1) {
gemv_args.emplace_back(arg);
}
checker_conv_bias_common(gemv_args, handle(), &rng, epsilon,
dtype::Quantized8Asymm(1.2f, (uint8_t)125),
dtype::Quantized8Asymm(1.3f, (uint8_t)129),
dtype::QuantizedS32(1.2 * 1.3),
dtype::Quantized8Asymm(50.3f, (uint8_t)120),
"CONV1x1_GEMV");
}
#endif

#if MEGDNN_AARCH64 || MEGDNN_ARMV7
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_QUINT8x8x32) {
NormalRNG rng(128.f);
float epsilon = 0.001;
std::vector<conv_bias::TestArg> args = get_conv_bias_1x1_args(true, true);
#define cb(name) \
checker_conv_bias_common(args, handle(), &rng, epsilon, \
dtype::Quantized8Asymm(1.2f, (uint8_t)125), \
dtype::Quantized8Asymm(1.3f, (uint8_t)129), \
dtype::QuantizedS32(1.2 * 1.3), {}, name);

#if MEGDNN_AARCH64
#if __ARM_FEATURE_DOTPROD
cb("CONV1x1:AARCH64_QUINT8_K8X8X4_DOTPROD:24");
#else
cb("CONV1x1:AARCH64_QUINT8_K8X8X8:48");
#endif
#elif MEGDNN_ARMV7
#if __ARM_FEATURE_DOTPROD
cb("CONV1x1:AARCH32_QUINT8_K4X8X4:48");
#endif
cb("CONV1x1:ARMV7_QUINT8_K4X8X8:24");
#endif
#undef cb

std::vector<conv_bias::TestArg> gemv_args;
for (auto&& arg : args)
if (arg.src.shape[2] == 1 && arg.src.shape[3] == 1) {
gemv_args.emplace_back(arg);
}
checker_conv_bias_common(gemv_args, handle(), &rng, epsilon,
dtype::Quantized8Asymm(1.2f, (uint8_t)125),
dtype::Quantized8Asymm(1.3f, (uint8_t)129),
dtype::QuantizedS32(1.2 * 1.3), {},
"CONV1x1_GEMV");
}

TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_1X1_S1_INT8x8x16) {
UniformIntRNG rng{-50, 50};
float epsilon = 0.001;
std::vector<conv_bias::TestArg> args =
get_conv_bias_1x1_args(false, true, false, false);
std::vector<conv_bias::TestArg> args_nchw44 = get_nchw44_conv_bias_args(
{1},ONLY_IDENTITY_NLMODE,BR_AND_BIAS_BIASMODE, 1, true);
#define cb(name) \
checker_conv_bias_common(args, handle(), &rng, epsilon, dtype::Int8{}, \
dtype::Int8{}, dtype::Int16{}, dtype::Int16{}, \
name);

#define cb_nchw44(name) \
checker_conv_bias_common(args_nchw44, handle(), &rng, epsilon, \
dtype::Int8{}, dtype::Int8{}, dtype::Int16{}, \
dtype::Int16{}, name);

#if MEGDNN_AARCH64
cb("CONV1x1:AARCH64_INT8X8X16_K8X8X8:24");
cb("CONV1x1:AARCH64_INT8X8X16_K4X4X16:24");
cb_nchw44("CONV1x1:AARCH64_INT8X8X16_MK4_4X4X8:48");
cb_nchw44("CONV1x1:AARCH64_INT8X8X16_MK4_16X12X4:48");
#elif MEGDNN_ARMV7
cb("CONV1x1:ARMV7_INT8X8X16_K4X8X8:24");
cb("CONV1x1:ARMV7_INT8X8X16_K4X2X16:48");
cb_nchw44("CONV1x1:ARMV7_INT8X8X16_MK4_K8X8X4:48");
#endif
cb("CONV1x1:ARM_COMMON_INT8X8X16:48");

#undef cb
#undef cb_nchw44

std::vector<conv_bias::TestArg> gemv_args;
for (auto&& arg : args)
if (arg.src.shape[2] == 1 && arg.src.shape[3] == 1) {
gemv_args.emplace_back(arg);
}

checker_conv_bias_common(gemv_args, handle(), &rng, epsilon, dtype::Int8{},
dtype::Int8{}, dtype::Int16{}, dtype::Int16{},
"CONV1x1_GEMV");
}
#endif

TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_INT8x8x32) {
using namespace conv_bias;
std::vector<conv_bias::TestArg> args =
get_conv_bias_1x1_args(false, true, false, false);

#define cb(name) checker_conv_bias_mul_int8x8x32(args, handle(), name);

#if MEGDNN_AARCH64
#if __ARM_FEATURE_DOTPROD
cb("CONV1x1:AARCH64_INT8X8X32_K8X12X4_DOTPROD:48");
#else
cb("CONV1x1:AARCH64_INT8X8X32_K8X8X8:24");
cb("CONV1x1:AARCH64_INT8X8X32_K4X4X16:24");
#endif
#elif MEGDNN_ARMV7
#if __ARM_FEATURE_DOTPROD
cb("CONV1x1:AARCH32_INT8_K6X8X4:48");
#endif
cb("CONV1x1:ARMV7_INT8X8X32_K4X8X8:24");
#endif

#if MEGDNN_ARMV7
cb("CONV1x1:ARMV7_INT8X8X32_K4X2X16:48");
#endif
#undef cb

std::vector<conv_bias::TestArg> gemv_args;
for (auto&& arg : args)
if (arg.src.shape[2] == 1 && arg.src.shape[3] == 1) {
gemv_args.emplace_back(arg);
}
checker_conv_bias_mul_int8x8x32(gemv_args, handle(), "CONV1x1_GEMV");
}

#ifndef __ARM_FEATURE_DOTPROD
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_INT8x8x32_MK4) {
using namespace conv_bias;
std::vector<conv_bias::TestArg> args =
get_nchw44_conv_bias_args({1},ONLY_IDENTITY_NLMODE,ONLY_NO_BIASMODE, 1,true);

#define cb(name) checker_conv_bias_mul_int8x8x32(args, handle(), name);

#if MEGDNN_AARCH64
cb("CONV1x1:AARCH64_INT8X8X32_MK4_4X4X16:24");
#elif MEGDNN_ARMV7
cb("CONV1x1:ARMV7_INT8X8X32_MK4_4X2X16:24");
#endif
#undef cb

UniformIntRNG rng{-50, 50};
float epsilon = 0.001;
#define cb(name) \
checker_conv_bias_common( \
get_nchw44_conv_bias_args({1}, QUAN_NLMODE, BR_AND_NO_BIASMODE, 1, \
true), \
handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
dtype::QuantizedS8(60.25f), name);
#if MEGDNN_AARCH64
cb("CONV1x1:AARCH64_INT8X8X32_MK4_4X4X16:24");
#elif MEGDNN_ARMV7
epsilon = 1;
cb("CONV1x1:ARMV7_INT8X8X32_MK4_4X2X16:24");
#endif
#undef cb
}
#endif

TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_INT8x8x32_NCHW44) {
using namespace conv_bias;
std::vector<conv_bias::TestArg> args =
get_nchw44_conv_bias_args({1},QUAN_NLMODE,BR_AND_NO_BIASMODE, 1, true);
UniformIntRNG rng{-50, 50};
float epsilon = 0.001;
std::vector<conv_bias::TestArg> gemv_args;
for (auto&& arg : args)
if (arg.src.shape[2] == 1 && arg.src.shape[3] == 1) {
gemv_args.emplace_back(arg);
}
checker_conv_bias_common(gemv_args, handle(), &rng, epsilon,
dtype::QuantizedS8(2.5f), dtype::QuantizedS8(2.5f),
dtype::QuantizedS32(6.25f), dtype::QuantizedS8(60.25f),
"CONV1x1_GEMV");
}

#ifdef __ARM_FEATURE_DOTPROD
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_INT8x8x32_NCHW44_DOT) {
using namespace conv_bias;
std::vector<conv_bias::TestArg> args = get_nchw44_conv_bias_args(
{1}, QUAN_NLMODE, BR_AND_NO_BIASMODE, 1, true, false, true);
UniformIntRNG rng{-50, 50};
float epsilon = 0.001;
std::vector<conv_bias::TestArg> gemv_args;
for (auto&& arg : args)
if (arg.src.shape[2] == 1 && arg.src.shape[3] == 1) {
gemv_args.emplace_back(arg);
}
checker_conv_bias_common(gemv_args, handle(), &rng, epsilon,
dtype::QuantizedS8(2.5f), dtype::QuantizedS8(2.5f),
dtype::QuantizedS32(6.25f),
dtype::QuantizedS8(60.25f), "CONV1x1_GEMV");
}
#endif

#if MEGDNN_AARCH64
#if MGB_ENABLE_CPUINFO
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_MK4_PACK_F32_A55) {
CpuInfoTmpReplace cpu_replace_guard(cpuinfo_uarch_cortex_a55);
using namespace conv_bias;
std::vector<conv_bias::TestArg> args =
get_nchw44_conv_bias_args({1},FULL_NLMODE,ALL_BIASMODE, 1, true);
check_conv_bias(args, handle(), "CONV1x1:AARCH64_F32_MK4_K8X12X1:24");
}
#endif
#endif

#if MEGDNN_AARCH64
#if MGB_ENABLE_CPUINFO
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_MK4_PACK_F32_A53) {
CpuInfoTmpReplace cpu_replace_guard(cpuinfo_uarch_cortex_a53);
using namespace conv_bias;
std::vector<conv_bias::TestArg> args =
get_nchw44_conv_bias_args({1},FULL_NLMODE,ALL_BIASMODE, 1, true);
check_conv_bias(args, handle(), "CONV1x1:AARCH64_F32_MK4_K8X12X1:24");
}
#endif
#endif

// vim: syntax=cpp.doxygen

+ 575
- 0
dnn/test/arm_common/conv_bias_multi_thread_im2col.cpp View File

@@ -0,0 +1,575 @@
/**
* \file dnn/test/arm_common/conv_bias_multi_thread_im2col.cpp
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied.
*/

#include "megdnn/dtype.h"
#include "test/arm_common/fixture.h"
#include "test/common/benchmarker.h"
#include "test/common/conv_bias.h"

#include "test/arm_common/cpuinfo_help.h"

using namespace megdnn;
using namespace test;
using namespace conv_bias;


TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_IM2COL_FP32_STRIDE2) {
#define cb(name) \
check_conv_bias( \
get_conv_bias_args({1, 2, 3, 4, 5, 6, 7}, 2, false, false, false), \
handle(), name);
#if MEGDNN_AARCH64
cb("IM2COLMATMUL:AARCH64_F32K8X12X1")
cb("IM2COLMATMUL:AARCH64_F32K4X16X1")
cb("IM2COLMATMUL:FB_F32_K8X12X1")
#elif MEGDNN_ARMV7
cb("IM2COLMATMUL:ARMV7_F32")
#endif
#undef cb

}



TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_IM2COL_FP32_STRIDE1) {
#define cb(name) \
check_conv_bias( \
get_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, false, false), \
handle(), name);
#if MEGDNN_AARCH64
cb("IM2COLMATMUL:AARCH64_F32K8X12X1")
cb("IM2COLMATMUL:AARCH64_F32K4X16X1")
cb("IM2COLMATMUL:FB_F32_K8X12X1")
#elif MEGDNN_ARMV7
cb("IM2COLMATMUL:ARMV7_F32")
cb("IM2COLMATMUL:FB_F32_K8X12X1")
#endif
#undef cb
}

//! CPUINFO ralated test
#if MEGDNN_AARCH64
#if MGB_ENABLE_CPUINFO
TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_IM2COL_FP32_A55) {
CpuInfoTmpReplace cpu_replace_guard(cpuinfo_uarch_cortex_a55);
#define cb(name,stride) \
check_conv_bias( \
get_conv_bias_args({2, 3, 4, 5, 6, 7}, stride, false, false, false), \
handle(), name);

cb("IM2COLMATMUL:AARCH64_F32K8X12X1", 1)
cb("IM2COLMATMUL:AARCH64_F32K8X12X1", 2)
#undef cb
}
#endif
#endif

#if MEGDNN_AARCH64
#if MGB_ENABLE_CPUINFO
TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_IM2COL_FP32_A53) {
CpuInfoTmpReplace cpu_replace_guard(cpuinfo_uarch_cortex_a53);
#define cb(name,stride) \
check_conv_bias( \
get_conv_bias_args({2, 3, 4, 5, 6, 7}, stride, false, false, false), \
handle(), name);

cb("IM2COLMATMUL:AARCH64_F32K8X12X1", 1)
cb("IM2COLMATMUL:AARCH64_F32K8X12X1", 2)
#undef cb
}
#endif
#endif

#if MEGDNN_AARCH64
#if MGB_ENABLE_CPUINFO
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COL_MK4_PACK_F32_A55) {
CpuInfoTmpReplace cpu_replace_guard(cpuinfo_uarch_cortex_a55);
using namespace conv_bias;
std::vector<conv_bias::TestArg> args =
get_nchw44_conv_bias_args({2,3,7},FULL_NLMODE,ONLY_NO_BIASMODE,1);
check_conv_bias(args, handle(), "IM2COLMATMUL:AARCH64_F32_MK4_K8X12X1");
args = get_nchw44_conv_bias_args({2,3,7},FULL_NLMODE,ONLY_NO_BIASMODE,2);
check_conv_bias(args, handle(), "IM2COLMATMUL:AARCH64_F32_MK4_K8X12X1");
}
#endif
#endif

#if MEGDNN_AARCH64
#if MGB_ENABLE_CPUINFO
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COL_MK4_PACK_F32_A53) {
CpuInfoTmpReplace cpu_replace_guard(cpuinfo_uarch_cortex_a53);
using namespace conv_bias;
std::vector<conv_bias::TestArg> args =
get_nchw44_conv_bias_args({2,3,7},FULL_NLMODE,ONLY_NO_BIASMODE,1);
check_conv_bias(args, handle(), "IM2COLMATMUL:AARCH64_F32_MK4_K8X12X1");
args = get_nchw44_conv_bias_args({2,3,7},FULL_NLMODE,ONLY_NO_BIASMODE,2);
check_conv_bias(args, handle(), "IM2COLMATMUL:AARCH64_F32_MK4_K8X12X1");
}
#endif
#endif

TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_QUANTIZEDSYM) {
UniformIntRNG rng{-50, 50};

#define cb(name) \
checker_conv_bias_common( \
get_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, false, false, \
true, true), \
handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
dtype::QuantizedS8(60.25f), name); \
checker_conv_bias_common( \
get_conv_bias_args({1}, 2, false, false, false, true, true), \
handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
dtype::QuantizedS8(60.25f), name);

float epsilon = 0.001;
#if MEGDNN_AARCH64
#if __ARM_FEATURE_DOTPROD
cb("IM2COLMATMUL:AARCH64_INT8X8X32_K8X12X4_DOTPROD");
#else
cb("IM2COLMATMUL:AARCH64_INT8X8X32_K8X8X8");
cb("IM2COLMATMUL:AARCH64_INT8X8X32_K4X4X16");
#endif
#elif MEGDNN_ARMV7
epsilon = 1;
cb("IM2COLMATMUL:ARMV7_INT8X8X32_K4X8X8");
#endif
#undef cb
}

#if __ARM_FEATURE_DOTPROD

TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_QUANTIZEDSYM_MK4_DOT) {
UniformIntRNG rng{-50, 50};

#define cb(name) \
checker_conv_bias_common( \
get_nchw44_conv_bias_args({2, 3, 4, 5, 6, 7}, QUAN_NLMODE, \
BR_AND_NO_BIASMODE, 1, false, false, \
true), \
handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
dtype::QuantizedS8(60.25f), name); \
checker_conv_bias_common( \
get_nchw44_conv_bias_args({1}, ONLY_IDENTITY_NLMODE, \
ONLY_BR_BIASMODE, 2, false, false, \
true), \
handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
dtype::QuantizedS8(60.25f), name);

float epsilon = 0.001;
#if MEGDNN_AARCH64
cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_8X12X4_DOTPROD:96");
#elif MEGDNN_ARMV7
cb("IM2COLMATMUL:AARCH32_INT8_MK4_8X4X4_DOTPROD:96");
#endif
#undef cb
}

TEST_F(ARM_COMMON_MULTI_THREADS,
CONV_BIAS_IM2COLMATMUL_QUANTIZEDSYM_MK4_DOT_S2_FUSE) {
UniformIntRNG rng{-50, 50};

#define cb(name) \
checker_conv_bias_common( \
get_nchw44_conv_bias_args({3}, QUAN_NLMODE, BR_AND_NO_BIASMODE, 2, \
false, false, true), \
handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
dtype::QuantizedS8(60.25f), name);

float epsilon = 0.001;
#if MEGDNN_AARCH64
cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_8X12X4_DOTPROD:96");
#elif MEGDNN_ARMV7
cb("IM2COLMATMUL:AARCH32_INT8_MK4_8X4X4_DOTPROD:96");
#endif
#undef cb
}

TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_S8x8x32_MK4_DOT) {
UniformIntRNG rng{-50, 50};

#define cb(name) \
checker_conv_bias_common( \
get_nchw44_conv_bias_args( \
{2, 3, 4, 5, 6, 7}, ONLY_IDENTITY_NLMODE, \
BR_AND_BIAS_BIASMODE, 1, false, false, true), \
handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), {}, name); \
checker_conv_bias_common( \
get_nchw44_conv_bias_args({1}, ONLY_IDENTITY_NLMODE, \
BR_AND_BIAS_BIASMODE, 2, false, false, \
true), \
handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), {}, name);
float epsilon = 0.001;
#if MEGDNN_AARCH64
cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_8X12X4_DOTPROD:96");
#elif MEGDNN_ARMV7
cb("IM2COLMATMUL:AARCH32_INT8_MK4_8X4X4_DOTPROD:96");
#endif
#undef cb
}

TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_INT8x8x32_MK4_DOT) {
UniformIntRNG rng{-50, 50};

#define cb(name) \
checker_conv_bias_common(get_nchw44_conv_bias_args({2, 3, 4, 5, 6, 7}, \
ONLY_IDENTITY_NLMODE, \
BR_AND_NO_BIASMODE, 1, \
false, false, true), \
handle(), &rng, epsilon, dtype::Int8(), \
dtype::Int8(), dtype::Int32(), {}, name); \
checker_conv_bias_common( \
get_nchw44_conv_bias_args({1}, ONLY_IDENTITY_NLMODE, \
BR_AND_BIAS_BIASMODE, 2, false, false, \
true), \
handle(), &rng, epsilon, dtype::Int8(), dtype::Int8(), \
dtype::Int32(), {}, name);

float epsilon = 0.001;
#if MEGDNN_AARCH64
cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_8X12X4_DOTPROD:96");
#elif MEGDNN_ARMV7
cb("IM2COLMATMUL:AARCH32_INT8_MK4_8X4X4_DOTPROD:96");
#endif
#undef cb
}
#endif

// clang-format on
#if MEGDNN_AARCH64 || MEGDNN_ARMV7
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_QUANTIZEDASYM) {
NormalRNG rng(128.f);
#define cb(name) \
checker_conv_bias_common(get_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, \
false, false, true, true), \
handle(), &rng, epsilon, \
dtype::Quantized8Asymm(1.2f, (uint8_t)125), \
dtype::Quantized8Asymm(1.3f, (uint8_t)129), \
dtype::QuantizedS32(1.2 * 1.3), \
dtype::Quantized8Asymm(50.3f, (uint8_t)120), \
name); \
checker_conv_bias_common( \
get_conv_bias_args({1}, 2, false, false, false, true, true), \
handle(), &rng, epsilon, \
dtype::Quantized8Asymm(1.2f, (uint8_t)125), \
dtype::Quantized8Asymm(1.3f, (uint8_t)129), \
dtype::QuantizedS32(1.2 * 1.3), \
dtype::Quantized8Asymm(50.3f, (uint8_t)120), name);
float epsilon = 0.001;
#if MEGDNN_AARCH64
#if __ARM_FEATURE_DOTPROD
cb("IM2COLMATMUL:AARCH64_QUINT8_K8X8X4_DOTPROD");
#else
cb("IM2COLMATMUL:AARCH64_QUINT8_K8X8X8");
#endif
#elif MEGDNN_ARMV7
epsilon = 1;
cb("IM2COLMATMUL:ARMV7_QUINT8_K4X8X8");
#endif
#undef cb
}
#endif

#if MEGDNN_AARCH64 || MEGDNN_ARMV7
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_QUINT8x8x32) {
UniformIntRNG rng{-50, 50};
float epsilon = 0.001;
#define cb(name) \
checker_conv_bias_common(get_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, \
false, true, true, false), \
handle(), &rng, epsilon, \
dtype::Quantized8Asymm(1.2f, (uint8_t)125), \
dtype::Quantized8Asymm(1.3f, (uint8_t)129), \
dtype::QuantizedS32(1.2 * 1.3), {}, name); \
checker_conv_bias_common( \
get_conv_bias_args({1}, 2, false, false, true, true, false), \
handle(), &rng, epsilon, \
dtype::Quantized8Asymm(1.2f, (uint8_t)125), \
dtype::Quantized8Asymm(1.3f, (uint8_t)129), \
dtype::QuantizedS32(1.2 * 1.3), {}, name);

#if MEGDNN_AARCH64
#if __ARM_FEATURE_DOTPROD
cb("IM2COLMATMUL:AARCH64_QUINT8_K8X8X4_DOTPROD");
#else
cb("IM2COLMATMUL:AARCH64_QUINT8_K8X8X8");
#endif
#elif MEGDNN_ARMV7
#if __ARM_FEATURE_DOTPROD
cb("IM2COLMATMUL:AARCH32_QUINT8_K4X8X4");
#endif
cb("IM2COLMATMUL:ARMV7_QUINT8_K4X8X8");
#endif
#undef cb
}

TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_IM2COLMATMUL_INT8x8x16) {
UniformIntRNG rng{-50, 50};
float epsilon = 0.001;
std::vector<conv_bias::TestArg> args_nchw44 =
get_nchw44_conv_bias_args({2, 3, 4, 5, 6, 7}, ONLY_IDENTITY_NLMODE,
BR_AND_BIAS_BIASMODE, 1, true);
std::vector<conv_bias::TestArg> args_nchw44_1x1s2 =
get_nchw44_conv_bias_args({1}, ONLY_IDENTITY_NLMODE,
BR_AND_BIAS_BIASMODE, 2, true);
#define cb(name) \
checker_conv_bias_common( \
get_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, false, true), \
handle(), &rng, epsilon, dtype::Int8{}, dtype::Int8{}, \
dtype::Int16{}, dtype::Int16{}, name); \
checker_conv_bias_common(get_conv_bias_args({1}, 2, false, false, true), \
handle(), &rng, epsilon, dtype::Int8{}, \
dtype::Int8{}, dtype::Int16{}, dtype::Int16{}, \
name);

#define cb_nchw44(name) \
checker_conv_bias_common(args_nchw44, handle(), &rng, epsilon, \
dtype::Int8{}, dtype::Int8{}, dtype::Int16{}, \
dtype::Int16{}, name); \
checker_conv_bias_common(args_nchw44_1x1s2, handle(), &rng, epsilon, \
dtype::Int8{}, dtype::Int8{}, dtype::Int16{}, \
dtype::Int16{}, name);

#if MEGDNN_AARCH64
cb("IM2COLMATMUL:AARCH64_INT8X8X16_K8X8X8");
cb("IM2COLMATMUL:AARCH64_INT8X8X16_K4X4X16");
cb_nchw44("IM2COLMATMUL:AARCH64_INT8X8X16_MK4_4X4X8");
cb_nchw44("IM2COLMATMUL:AARCH64_INT8X8X16_MK4_16X12X4");
#elif MEGDNN_ARMV7
cb("IM2COLMATMUL:ARMV7_INT8X8X16_K4X8X8");
cb("IM2COLMATMUL:ARMV7_INT8X8X16_K4X2X16");
cb_nchw44("IM2COLMATMUL:ARMV7_INT8X8X16_MK4_K8X8X4");
#endif
cb("IM2COLMATMUL:ARM_COMMON_INT8X8X16");

#undef cb
#undef cb_nchw44
}

#endif

#if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_FP16) {
using namespace conv_bias;

param::ConvBias cur_param;

std::vector<conv_bias::TestArg> args =
get_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, false, false);
std::vector<conv_bias::TestArg> args1 =
get_conv_bias_args({1}, 2, false, false, false);
args.insert(args.begin(), args1.begin(), args1.end());

NormalRNG rng(1);
#define cb(name) \
checker_conv_bias_common(args, handle(), &rng, 0.03, dtype::Float16{}, \
dtype::Float16{}, dtype::Float16{}, \
dtype::Float16{}, name);

#if MEGDNN_AARCH64
cb("IM2COLMATMUL:AARCH64_F16_K8X24X1");
#elif MEGDNN_ARMV7
cb("IM2COLMATMUL:AARCH32_F16_K4X16X1");
#endif
#undef cb
}
#endif

#if MEGDNN_AARCH64 || MEGDNN_ARMV7
#if !__ARM_FEATURE_DOTPROD
TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_INT8x8x32NCHW44_S2) {
using namespace conv_bias;
std::vector<conv_bias::TestArg> args = get_nchw44_conv_bias_args(
{2, 5, 7}, ONLY_IDENTITY_NLMODE, BR_AND_NO_BIASMODE, 2, false);

#define cb(name) checker_conv_bias_mul_int8x8x32(args, handle(), name);
#if MEGDNN_AARCH64
cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_4X4X16:96");
#else
cb("IM2COLMATMUL:ARMV7_INT8X8X32_MK4_4X2X16:96");
#endif
#undef cb
}

TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_INT8x8x32NCHW44_S1) {
using namespace conv_bias;
std::vector<conv_bias::TestArg> args = get_nchw44_conv_bias_args(
{3, 4, 6}, ONLY_IDENTITY_NLMODE, BR_AND_NO_BIASMODE, 1);

#define cb(name) checker_conv_bias_mul_int8x8x32(args, handle(), name);
#if MEGDNN_AARCH64
cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_4X4X16:96");
#else
cb("IM2COLMATMUL:ARMV7_INT8X8X32_MK4_4X2X16:96");
#endif

#undef cb
}

TEST_F(ARM_COMMON_MULTI_THREADS,
CONV_BIAS_IM2COLMATMUL_QUANTIZEDSYM_NCHW44_S2) {
UniformIntRNG rng{-50, 50};

#define cb(name) \
checker_conv_bias_common( \
get_nchw44_conv_bias_args({3, 4, 6}, QUAN_NLMODE, \
BR_AND_NO_BIASMODE, 2), \
handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
dtype::QuantizedS8(60.25f), name);
float epsilon = 0.001;
#if MEGDNN_AARCH64
cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_4X4X16:96");
#else
epsilon = 1;
cb("IM2COLMATMUL:ARMV7_INT8X8X32_MK4_4X2X16:96");
#endif
#undef cb
}

TEST_F(ARM_COMMON_MULTI_THREADS,
CONV_BIAS_IM2COLMATMUL_QUANTIZEDSYM_NCHW44_S1) {
UniformIntRNG rng{-50, 50};

#define cb(name) \
checker_conv_bias_common( \
get_nchw44_conv_bias_args({2, 5, 7}, QUAN_NLMODE, \
BR_AND_NO_BIASMODE, 1), \
handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
dtype::QuantizedS8(60.25f), name);
float epsilon = 0.001;
#if MEGDNN_AARCH64
cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_4X4X16:96");
#else
epsilon = 1;
cb("IM2COLMATMUL:ARMV7_INT8X8X32_MK4_4X2X16:96");
#endif
#undef cb
}

#if MEGDNN_AARCH64
TEST_F(ARM_COMMON_MULTI_THREADS,
CONV_BIAS_IM2COLMATMUL_QUANTIZEDSYM_NCHW44_FUSE) {
UniformIntRNG rng{-50, 50};

#define cb(name) \
checker_conv_bias_common( \
get_nchw44_conv_bias_args({3}, QUAN_NLMODE, BR_AND_NO_BIASMODE, \
1), \
handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
dtype::QuantizedS8(60.25f), name);
float epsilon = 0.001;
cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_4X4X16:96");
#undef cb
}

#endif
#endif
#endif

#if MEGDNN_AARCH64
#if __ARM_FEATURE_DOTPROD
TEST_F(ARM_COMMON_MULTI_THREADS,
CONV_BIAS_IM2COLMATMUL_QUANTIZEDSYM_NCHW44DOT_FUSE) {
UniformIntRNG rng{-50, 50};

#define cb(name) \
checker_conv_bias_common( \
get_nchw44_conv_bias_args({3}, QUAN_NLMODE, BR_AND_NO_BIASMODE, 1, \
false, false, true), \
handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
dtype::QuantizedS8(60.25f), name);
float epsilon = 0.001;
cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_8X12X4_DOTPROD:96");
#undef cb
}
#endif
#endif

TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_INT8x8x32) {
using namespace conv_bias;
std::vector<conv_bias::TestArg> args =
get_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, true, true);
std::vector<conv_bias::TestArg> args1 =
get_conv_bias_args({1}, 2, false, true, true);
args.insert(args.begin(), args1.begin(), args1.end());

#define cb(name) checker_conv_bias_mul_int8x8x32(args, handle(), name);

#if MEGDNN_AARCH64
#if __ARM_FEATURE_DOTPROD
cb("IM2COLMATMUL:AARCH64_INT8X8X32_K8X12X4_DOTPROD");
#else
cb("IM2COLMATMUL:AARCH64_INT8X8X32_K8X8X8");
cb("IM2COLMATMUL:AARCH64_INT8X8X32_K4X4X16");
#endif
#elif MEGDNN_ARMV7
#if __ARM_FEATURE_DOTPROD
cb("IM2COLMATMUL:AARCH32_INT8_K6X8X4");
#endif
cb("IM2COLMATMUL:ARMV7_INT8X8X32_K4X8X8");
#endif

#if MEGDNN_ARMV7
cb("IM2COLMATMUL:ARMV7_INT8X8X32_K4X2X16");
#endif
#undef cb
}

TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COL_S1_MK4_PACK_F32) {
using namespace conv_bias;
std::vector<conv_bias::TestArg> args = get_nchw44_conv_bias_args(
{2, 4, 7},FULL_NLMODE,BR_AND_BIAS_BIASMODE, 1);
#if MEGDNN_AARCH64
check_conv_bias(args, handle(), "IM2COLMATMUL:AARCH64_F32_MK4_K8X12X1");
#elif MEGDNN_ARMV7
check_conv_bias(args, handle(), "IM2COLMATMUL:ARMV7_F32_MK4_PACK_4X12");
#endif
}

TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COL_S2_MK4_PACK_F32) {
using namespace conv_bias;
std::vector<conv_bias::TestArg> args = get_nchw44_conv_bias_args(
{3, 5, 6},FULL_NLMODE,BR_AND_BIAS_BIASMODE, 2);
#define cb(name) check_conv_bias(args, handle(), name);
#if MEGDNN_AARCH64
cb("IM2COLMATMUL:AARCH64_F32_MK4_K8X12X1");
#elif MEGDNN_ARMV7
cb("IM2COLMATMUL:ARMV7_F32_MK4_PACK_4X12");
#endif
#undef cb
}

TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COL_S2_MK4_PACK_F32_FUSE) {
using namespace conv_bias;
std::vector<conv_bias::TestArg> args = get_nchw44_conv_bias_args(
{3},FULL_NLMODE,ALL_BIASMODE, 2);
#define cb(name) check_conv_bias(args, handle(), name);
#if MEGDNN_AARCH64
cb("IM2COLMATMUL:AARCH64_F32_MK4_K8X12X1");
#elif MEGDNN_ARMV7
cb("IM2COLMATMUL:ARMV7_F32_MK4_PACK_4X12");
#endif
#undef cb
}

// vim: syntax=cpp.doxygen

+ 1240
- 0
dnn/test/arm_common/conv_bias_multi_thread_weight_preprocess.cpp
File diff suppressed because it is too large
View File


+ 193
- 0
dnn/test/common/conv_bias.cpp View File

@@ -1196,6 +1196,199 @@ void winograd_algo_extra_impl(const TensorNDArray& tensors, uint32_t m,
free(wb.ptr());
};

void checker_conv_bias_common(std::vector<conv_bias::TestArg> args, Handle* handle,
RNG* rng, float epsilon, DType type0, DType type1,
DType type2, DType type3, const char* algo_name) {
using namespace conv_bias;

Checker<ConvBias> checker(handle);
checker.set_before_exec_callback(
conv_bias::ConvBiasAlgoChecker<ConvBias>(algo_name));
checker.set_dtype(0, type0);
checker.set_dtype(1, type1);
checker.set_dtype(2, type2);
checker.set_dtype(4, type3);
checker.set_epsilon(epsilon);
if (NULL != rng) {
checker.set_rng(0, rng).set_rng(1, rng).set_rng(2, rng).set_rng(3, rng);
}
for (auto&& arg : args) {
checker.set_param(arg.param).execs(
{arg.src, arg.filter, arg.bias, {}, {}});
}
}

void checker_conv_bias_mul_int8x8x32(std::vector<conv_bias::TestArg> args,
Handle* handle, const char* algo_name) {
using namespace conv_bias;
float epsilon = 0.001;
#if MEGDNN_ARMV7
epsilon = 1.0;
#endif
Checker<ConvBias> checker(handle);
checker.set_before_exec_callback(
conv_bias::ConvBiasAlgoChecker<ConvBias>(algo_name));
checker.set_dtype(0, dtype::Int8());
checker.set_dtype(1, dtype::Int8());
checker.set_dtype(2, dtype::Int32());
checker.set_dtype(4, dtype::Int32());
checker.set_epsilon(epsilon);
for (auto&& arg : args) {
checker.set_param(arg.param).execs({arg.src, arg.filter, {}, {}, {}});
}

UniformIntRNG rng{-50, 50};
for (auto&& arg : args) {
checker.set_dtype(0, dtype::QuantizedS8(2.5f))
.set_dtype(1, dtype::QuantizedS8(2.5f))
.set_dtype(2, dtype::QuantizedS32(6.25f))
.set_dtype(4, dtype::QuantizedS32(6.25f))
.set_rng(0, &rng)
.set_rng(1, &rng)
.set_rng(2, &rng)
.set_param(arg.param)
.set_epsilon(epsilon)
.execs({arg.src, arg.filter, {}, {}, {}});
}
}

void checker_conv_bias_int8x8x32_preprocess(
std::vector<conv_bias::TestArg> args, Handle* handle,
const char* algo_name) {
using namespace conv_bias;

Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
handle);
checker.set_before_exec_callback(
conv_bias::ConvBiasAlgoChecker<ConvBias>(algo_name));
checker.set_dtype(0, dtype::Int8());
checker.set_dtype(1, dtype::Int8());
checker.set_dtype(2, dtype::Int32());
checker.set_dtype(4, dtype::Int32());
for (auto&& arg : args) {
checker.set_param(arg.param).execs({arg.src, arg.filter, {}, {}, {}});
}

UniformIntRNG rng{-50, 50};
for (auto&& arg : args) {
checker.set_dtype(0, dtype::QuantizedS8(2.5f))
.set_dtype(1, dtype::QuantizedS8(2.5f))
.set_dtype(2, dtype::QuantizedS32(6.25f))
.set_dtype(4, dtype::QuantizedS32(6.25f))
.set_rng(0, &rng)
.set_rng(1, &rng)
.set_rng(2, &rng)
.set_param(arg.param)
.execs({arg.src, arg.filter, {}, {}, {}});
}
}

std::vector<conv_bias::TestArg> get_nchw44_conv_bias_args(
std::vector<size_t> kernel_vec,
std::vector<param::ConvBias::NonlineMode> nlmode_vec,
std::vector<megdnn::BiasMode> biasmode_vec, size_t stride, bool no_pad,
bool is_input_nchw, bool is_nchw44_dot) {
using namespace conv_bias;
using NLMode = param::ConvBias::NonlineMode;

std::vector<TestArg> args;
MEGDNN_MARK_USED_VAR(no_pad);

auto pack = [&](size_t n, size_t oc, size_t ic, size_t h, size_t w,
size_t kernel, size_t stride, size_t group, NLMode nlmode,
megdnn::BiasMode bias_mode, int any_pad = -1) {
constexpr int pack_c = 4;
const size_t pad = any_pad >= 0 ? any_pad : kernel / 2;
auto oc_per_group = oc / group;
auto ic_per_group = ic / group;
bool ok_group = (oc % group == 0 && ic % group == 0) &&
oc_per_group % pack_c == 0 && oc_per_group > 0 &&
ic_per_group > 0;
bool nchw_disable = group > 1 || ic_per_group >= 4;
bool nchw44_disable = ic_per_group % pack_c != 0;
bool invalid_pad = (w + 2 * pad < kernel) || (h + 2 * pad < kernel);
if (!(ok_group) || invalid_pad) {
return;
}
if ((is_input_nchw && nchw_disable) ||
(!is_input_nchw && nchw44_disable)) {
return;
}

size_t kernel_h = kernel;
size_t kernel_w = kernel;
param::ConvBias param;
if (!is_nchw44_dot) {
param.format = param::ConvBias::Format::NCHW44;
} else {
param.format = param::ConvBias::Format::NCHW44_DOT;
}
param.stride_h = stride;
param.stride_w = stride;
param.pad_h = pad;
param.pad_w = pad;
param.nonlineMode = nlmode;

auto src_tensor_shape = TensorShape{n, ic / pack_c, h, w, pack_c};
auto weight_tensor_shape = TensorShape{
oc / pack_c, ic / pack_c, kernel_h, kernel_w, pack_c, pack_c};
auto bias_tensor_shape = TensorShape{};
if (bias_mode == megdnn::BiasMode::BROADCAST_CHANNEL_BIAS) {
bias_tensor_shape = {1, oc / pack_c, 1, 1, pack_c};
} else if (bias_mode == megdnn::BiasMode::BIAS) {
bias_tensor_shape = {n, oc / pack_c,
(h + 2 * pad - kernel) / stride + 1,
(w + 2 * pad - kernel) / stride + 1, pack_c};
}
if (group == 1) {
param.sparse = param::ConvBias::Sparse::DENSE;
} else if (group > 1 && ic / group == 1 && oc / group == 1) {
megdnn_assert(0, "not support channel wise");
param.sparse = param::ConvBias::Sparse::GROUP;
weight_tensor_shape = TensorShape{group / pack_c, 1, 1,
kernel_h, kernel_w, pack_c};
} else if (group > 1 && oc_per_group % pack_c == 0 && oc / group > 0 &&
ic_per_group % pack_c == 0 && ic / group > 0) {
param.sparse = param::ConvBias::Sparse::GROUP;
weight_tensor_shape = TensorShape{group,
oc_per_group / pack_c,
ic_per_group / pack_c,
kernel_h,
kernel_w,
pack_c,
pack_c};
}
if (is_input_nchw) {
src_tensor_shape = TensorShape{n, ic, h, w};
weight_tensor_shape =
TensorShape{oc / pack_c, kernel_h, kernel_w, ic, pack_c};
}
args.emplace_back(param, src_tensor_shape, weight_tensor_shape,
bias_tensor_shape);
};

for (auto bias : biasmode_vec)
for (auto nlmode : nlmode_vec)
for (size_t n : {1, 2})
for (size_t kernel : kernel_vec)
for (size_t oc : {4, 12})
for (size_t ic : {1, 3, 4, 12})
for (size_t h : {1, 3, 12})
for (size_t w : {1, 16, 23}) {
for (size_t group = 1;
group <=
std::min(std::min(oc, ic), 4_z);
++group) {
if (kernel != 1 && (h == 1 || w == 1)) {
continue;
}
pack(n, oc, ic, h, w, kernel, stride,
group, nlmode, bias);
}
}
return args;
}

} // namespace conv_bias
} // namespace test
} // namespace megdnn


+ 47
- 1
dnn/test/common/conv_bias.h View File

@@ -97,7 +97,53 @@ void checker_conv_bias_int8x8x16(
void winograd_algo_extra_impl(const TensorNDArray& tensors, uint32_t m,
param::ConvBias param, Handle* handle,
param::MatrixMul::Format format);

void checker_conv_bias_common(std::vector<conv_bias::TestArg> args,
Handle* handle, RNG* rng, float epsilon,
DType type0, DType type1, DType type2,
DType type3, const char* algo_name);
std::vector<conv_bias::TestArg> get_nchw44_conv_bias_args(
std::vector<size_t> kernel_vec,
std::vector<param::ConvBias::NonlineMode> nlmode_vec,
std::vector<megdnn::BiasMode> biasmode_vec, size_t stride,
bool no_pad = false, bool is_input_nchw = false,
bool is_nchw44_dot = false);
void checker_conv_bias_mul_int8x8x32(std::vector<conv_bias::TestArg> args,
Handle* handle, const char* algo_name);
void checker_conv_bias_int8x8x32_preprocess(
std::vector<conv_bias::TestArg> args, Handle* handle,
const char* algo_name);

#define FULL_NLMODE \
{ \
param::ConvBias::NonlineMode::IDENTITY, \
param::ConvBias::NonlineMode::RELU, \
param::ConvBias::NonlineMode::H_SWISH, \
param::ConvBias::NonlineMode::SIGMOID \
}
#define QUAN_NLMODE \
{ \
param::ConvBias::NonlineMode::IDENTITY, \
param::ConvBias::NonlineMode::RELU, \
param::ConvBias::NonlineMode::H_SWISH \
}
#define ONLY_IDENTITY_NLMODE \
{ param::ConvBias::NonlineMode::IDENTITY }

#define ALL_BIASMODE \
{ \
megdnn::BiasMode::NO_BIAS, megdnn::BiasMode::BROADCAST_CHANNEL_BIAS, \
megdnn::BiasMode::BIAS \
}
#define BR_AND_NO_BIASMODE \
{ megdnn::BiasMode::NO_BIAS, megdnn::BiasMode::BROADCAST_CHANNEL_BIAS }
#define BR_AND_BIAS_BIASMODE \
{ megdnn::BiasMode::NO_BIAS, megdnn::BiasMode::BIAS }
#define ONLY_BR_BIASMODE \
{ megdnn::BiasMode::BROADCAST_CHANNEL_BIAS }
#define ONLY_NO_BIASMODE \
{ megdnn::BiasMode::NO_BIAS }
#define ONLY_BIAS_BIASMODE \
{ megdnn::BiasMode::BIAS }
} // namespace conv_bias
} // namespace test
} // namespace megdnn


Loading…
Cancel
Save