|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179 |
- #include "megdnn/oprs/nn.h"
-
- #include "src/common/utils.h"
- #include "src/cuda/cudnn_with_check.h"
- #include "test/common/checker.h"
- #include "test/common/conv_bias.h"
- #include "test/common/tensor.h"
- #include "test/common/workspace_wrapper.h"
- #include "test/cuda/benchmark.h"
- #include "test/cuda/conv_test_utils.h"
- #include "test/cuda/fixture.h"
- #include "test/cuda/utils.h"
-
- namespace megdnn {
- namespace test {
- namespace conv {
-
- TEST_F(CUDA, CONV_BIAS_INT8_NCHW4_CUDNN_CONVOLUTION) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "DEFAULT:CUDNN:ConvBiasActivation:", param::ConvBias::Format::NCHW4);
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_NCHW4_1x1) {
- require_compute_capability(6, 1);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_NCHW4_DOTPROD_IMPLICIT_GEMM", param::ConvBias::Format::NCHW4,
- conv_bias::get_int8_nchw4_args(1));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_NCHW4_3x3) {
- require_compute_capability(6, 1);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_NCHW4_DOTPROD_IMPLICIT_GEMM", param::ConvBias::Format::NCHW4);
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_NCHW4_5x5) {
- require_compute_capability(6, 1);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_NCHW4_DOTPROD_IMPLICIT_GEMM", param::ConvBias::Format::NCHW4,
- conv_bias::get_int8_nchw4_args(5));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_NCHW4_7x7) {
- require_compute_capability(6, 1);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_NCHW4_DOTPROD_IMPLICIT_GEMM", param::ConvBias::Format::NCHW4,
- conv_bias::get_int8_nchw4_args(7));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_NCHW4_WITH_Z) {
- require_compute_capability(6, 1);
- Checker<ConvBiasForward> checker(handle_cuda());
- checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBiasForward>(
- "INT8_NCHW4_DOTPROD_IMPLICIT_GEMM"));
- UniformIntRNG rng{-3, 3};
- UniformIntRNG bias_rng{-50, 50};
- checker.set_rng(0, &rng)
- .set_rng(1, &rng)
- .set_rng(2, &bias_rng)
- .set_rng(3, &rng)
- .set_dtype(0, dtype::QuantizedS8{1.2f})
- .set_dtype(1, dtype::QuantizedS8{1.3f})
- .set_dtype(2, dtype::QuantizedS32{1.2f * 1.3f})
- .set_dtype(3, dtype::QuantizedS8{1.1f})
- .set_dtype(4, dtype::QuantizedS8{1.0f})
- .set_epsilon(1 + 1e-3)
- .set_max_avg_error(1e-1)
- .set_max_avg_biased_error(1e-1);
- param::ConvBias param;
- param.pad_h = param.pad_w = 1;
- param.stride_h = param.stride_w = 1;
- param.format = param::ConvBias::Format::NCHW4;
- checker.set_param(param).execs(
- {{32, 4, 12, 12, 4},
- {16, 4, 3, 3, 4},
- {1, 4, 1, 1, 4},
- {32, 4, 12, 12, 4},
- {}});
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_NCHW4_STRIDE2_WITH_Z) {
- require_compute_capability(6, 1);
- Checker<ConvBiasForward> checker(handle_cuda());
- checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBiasForward>(
- "INT8_NCHW4_DOTPROD_IMPLICIT_GEMM"));
- UniformIntRNG rng{-3, 3};
- UniformIntRNG bias_rng{-50, 50};
- checker.set_rng(0, &rng)
- .set_rng(1, &rng)
- .set_rng(2, &bias_rng)
- .set_rng(3, &rng)
- .set_dtype(0, dtype::QuantizedS8{1.2f})
- .set_dtype(1, dtype::QuantizedS8{1.3f})
- .set_dtype(2, dtype::QuantizedS32{1.2f * 1.3f})
- .set_dtype(3, dtype::QuantizedS8{1.1f})
- .set_dtype(4, dtype::QuantizedS8{1.0f})
- .set_epsilon(1 + 1e-3)
- .set_max_avg_error(1e-1)
- .set_max_avg_biased_error(1e-1);
- param::ConvBias param;
- param.pad_h = param.pad_w = 1;
- param.stride_h = param.stride_w = 2;
- param.format = param::ConvBias::Format::NCHW4;
- checker.set_param(param).execs(
- {{32, 4, 12, 12, 4},
- {16, 4, 3, 3, 4},
- {1, 4, 1, 1, 4},
- {32, 4, 6, 6, 4},
- {}});
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_NCHW4_CHECK_BOUNDS_1x1) {
- require_compute_capability(6, 1);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_NCHW4_DOTPROD_IMPLICIT_GEMM", param::ConvBias::Format::NCHW4,
- conv_bias::get_int8_nchw4_args_check_bounds(1));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_NCHW4_CHECK_BOUNDS_3x3) {
- require_compute_capability(6, 1);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_NCHW4_DOTPROD_IMPLICIT_GEMM", param::ConvBias::Format::NCHW4,
- conv_bias::get_int8_nchw4_args_check_bounds(3));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_NCHW4_CHECK_BOUNDS_5x5) {
- require_compute_capability(6, 1);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_NCHW4_DOTPROD_IMPLICIT_GEMM", param::ConvBias::Format::NCHW4,
- conv_bias::get_int8_nchw4_args_check_bounds(5));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_NCHW4_CHECK_BOUNDS_7x7) {
- require_compute_capability(6, 1);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_NCHW4_DOTPROD_IMPLICIT_GEMM", param::ConvBias::Format::NCHW4,
- conv_bias::get_int8_nchw4_args_check_bounds(7));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4) {
- require_compute_capability(6, 1);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_CHWN4_DOTPROD_IMPLICIT_GEMM", param::ConvBias::Format::CHWN4);
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_WITH_Z) {
- require_compute_capability(6, 1);
- Checker<ConvBiasForward> checker(handle_cuda());
- checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBiasForward>(
- "INT8_CHWN4_DOTPROD_IMPLICIT_GEMM"));
- UniformIntRNG rng{-3, 3};
- UniformIntRNG bias_rng{-50, 50};
- checker.set_rng(0, &rng)
- .set_rng(1, &rng)
- .set_rng(2, &bias_rng)
- .set_rng(3, &rng)
- .set_dtype(0, dtype::QuantizedS8{1.2f})
- .set_dtype(1, dtype::QuantizedS8{1.3f})
- .set_dtype(2, dtype::QuantizedS32{1.2f * 1.3f})
- .set_dtype(3, dtype::QuantizedS8{1.1f})
- .set_dtype(4, dtype::QuantizedS8{1.1f})
- .set_epsilon(1 + 1e-3)
- .set_max_avg_error(1e-1)
- .set_max_avg_biased_error(1e-1);
- param::ConvBias param;
- param.pad_h = param.pad_w = 1;
- param.stride_h = param.stride_w = 1;
- param.format = param::ConvBias::Format::CHWN4;
- checker.set_param(param).execs(
- {{4, 12, 12, 32, 4},
- {4, 3, 3, 16, 4},
- {4, 1, 1, 1, 4},
- {4, 12, 12, 32, 4},
- {}});
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_HSWISH) {
- require_compute_capability(6, 1);
- Checker<ConvBiasForward> checker(handle_cuda());
- checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBiasForward>(
- "INT8_CHWN4_DOTPROD_IMPLICIT_GEMM"));
- UniformIntRNG rng{-3, 3};
- UniformIntRNG bias_rng{-50, 50};
- checker.set_rng(0, &rng)
- .set_rng(1, &rng)
- .set_rng(2, &bias_rng)
- .set_rng(3, &rng)
- .set_dtype(0, dtype::QuantizedS8{1.2f})
- .set_dtype(1, dtype::QuantizedS8{1.3f})
- .set_dtype(2, dtype::QuantizedS32{1.2f * 1.3f})
- .set_dtype(4, dtype::QuantizedS8{0.001f})
- .set_epsilon(1 + 1e-3)
- .set_max_avg_error(1e-1)
- .set_max_avg_biased_error(1e-1);
- param::ConvBias param;
- param.pad_h = param.pad_w = 1;
- param.stride_h = param.stride_w = 1;
- param.format = param::ConvBias::Format::CHWN4;
- param.nonlineMode = param::ConvBias::NonlineMode::H_SWISH;
- checker.set_param(param).execs(
- {{4, 12, 12, 32, 4}, {4, 3, 3, 16, 4}, {4, 1, 1, 1, 4}, {}, {}});
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_CHECK_BOUNDS) {
- require_compute_capability(6, 1);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_CHWN4_DOTPROD_IMPLICIT_GEMM", param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_args_check_bounds(3));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_SMALL_CHANNEL_1x1) {
- require_compute_capability(6, 1);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_CHWN4_DOTPROD_IMPLICIT_GEMM", param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_small_channel_args(1));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_SMALL_CHANNEL_3x3) {
- require_compute_capability(6, 1);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_CHWN4_DOTPROD_IMPLICIT_GEMM", param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_small_channel_args(3));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_SMALL_CHANNEL_5x5) {
- require_compute_capability(6, 1);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_CHWN4_DOTPROD_IMPLICIT_GEMM", param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_small_channel_args(5));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_SMALL_CHANNEL_7x7) {
- require_compute_capability(6, 1);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_CHWN4_DOTPROD_IMPLICIT_GEMM", param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_small_channel_args(7));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_NCHW4_SMALL_CHANNEL_CHECK_BOUNDS) {
- require_compute_capability(6, 1);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_NCHW4_DOTPROD_IMPLICIT_GEMM", param::ConvBias::Format::NCHW4,
- conv_bias::get_int8_nchw4_small_channel_args_check_bounds(3));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_SMALL_CHANNEL_1x1_CHECK_BOUNDS) {
- require_compute_capability(6, 1);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_CHWN4_DOTPROD_IMPLICIT_GEMM", param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_small_channel_args_check_bounds(1));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_SMALL_CHANNEL_5x5_CHECK_BOUNDS) {
- require_compute_capability(6, 1);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_CHWN4_DOTPROD_IMPLICIT_GEMM", param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_small_channel_args_check_bounds(5));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_SMALL_CHANNEL_7x7_CHECK_BOUNDS) {
- require_compute_capability(6, 1);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_CHWN4_DOTPROD_IMPLICIT_GEMM", param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_small_channel_args_check_bounds(7));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_NCHW4_TENSORCORE_1x1) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_NCHW4_IMMA_IMPLICIT_GEMM_mma16x16x16", param::ConvBias::Format::NCHW4,
- conv_bias::get_int8_nchw4_tensorcore_args(1));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_NCHW4_TENSORCORE_3x3) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_NCHW4_IMMA_IMPLICIT_GEMM_mma16x16x16", param::ConvBias::Format::NCHW4,
- conv_bias::get_int8_nchw4_tensorcore_args(3));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_NCHW4_TENSORCORE_5x5) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_NCHW4_IMMA_IMPLICIT_GEMM_mma16x16x16", param::ConvBias::Format::NCHW4,
- conv_bias::get_int8_nchw4_tensorcore_args(5));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_NCHW4_TENSORCORE_7x7) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_NCHW4_IMMA_IMPLICIT_GEMM_mma16x16x16", param::ConvBias::Format::NCHW4,
- conv_bias::get_int8_nchw4_tensorcore_args(7));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_NCHW4_TENSORCORE_CHECK_BOUNDS_ALGO_0) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_NCHW4_IMMA_IMPLICIT_GEMM_mma16x16x16", param::ConvBias::Format::NCHW4,
- conv_bias::get_int8_nchw4_args_check_bounds(3));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_NCHW4_TENSORCORE_CHECK_BOUNDS_ALGO_1) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_NCHW4_IMMA_IMPLICIT_GEMM_mma8x32x16", param::ConvBias::Format::NCHW4,
- conv_bias::get_int8_nchw4_args_check_bounds(3));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_NCHW4_TENSORCORE_CHECK_BOUNDS_ALGO_2) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_NCHW4_IMMA_IMPLICIT_GEMM_mma32x8x16", param::ConvBias::Format::NCHW4,
- conv_bias::get_int8_nchw4_args_check_bounds(3));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_TENSORCORE_ALGO_0) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.1f}, handle_cuda(),
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_mma16x16x16", param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_tensorcore_args(3));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_TENSORCORE_ALGO_1) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.1f}, handle_cuda(),
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_mma32x8x16", param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_tensorcore_args(3));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_TENSORCORE_ALGO_2) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.1f}, handle_cuda(),
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_mma8x32x16", param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_tensorcore_args(3));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_TENSORCORE_CHECK_BOUNDS_1x1) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_mma16x16x16", param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_args_check_bounds(1));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_TENSORCORE_CHECK_BOUNDS_5x5) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_mma16x16x16", param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_args_check_bounds(5));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_TENSORCORE_CHECK_BOUNDS_7x7) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_mma16x16x16", param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_args_check_bounds(7));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_NCHW4_TENSORCORE_WITH_Z) {
- require_compute_capability(7, 5);
- Checker<ConvBiasForward> checker(handle_cuda());
- checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBiasForward>(
- "INT8_NCHW4_IMMA_IMPLICIT_GEMM_mma16x16x16"));
- UniformIntRNG rng{-3, 3};
- UniformIntRNG bias_rng{-50, 50};
- checker.set_rng(0, &rng)
- .set_rng(1, &rng)
- .set_rng(2, &bias_rng)
- .set_rng(3, &rng)
- .set_dtype(0, dtype::QuantizedS8{1.2f})
- .set_dtype(1, dtype::QuantizedS8{1.3f})
- .set_dtype(2, dtype::QuantizedS32{1.2f * 1.3f})
- .set_dtype(3, dtype::QuantizedS8{1.1f})
- .set_dtype(4, dtype::QuantizedS8{1.0f})
- .set_epsilon(1 + 1e-3)
- .set_max_avg_error(1e-1)
- .set_max_avg_biased_error(1e-1);
- param::ConvBias param;
- param.pad_h = param.pad_w = 1;
- param.stride_h = param.stride_w = 1;
- param.format = param::ConvBias::Format::NCHW4;
- checker.set_param(param).execs(
- {{64, 8, 12, 12, 4},
- {64, 8, 3, 3, 4},
- {1, 16, 1, 1, 4},
- {64, 16, 12, 12, 4},
- {}});
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_TENSORCORE_WITH_Z) {
- require_compute_capability(7, 5);
- Checker<ConvBiasForward> checker(handle_cuda());
- checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBiasForward>(
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_mma16x16x16"));
- UniformIntRNG rng{-3, 3};
- UniformIntRNG bias_rng{-50, 50};
- checker.set_rng(0, &rng)
- .set_rng(1, &rng)
- .set_rng(2, &bias_rng)
- .set_rng(3, &rng)
- .set_dtype(0, dtype::QuantizedS8{1.2f})
- .set_dtype(1, dtype::QuantizedS8{1.3f})
- .set_dtype(2, dtype::QuantizedS32{1.2f * 1.3f})
- .set_dtype(3, dtype::QuantizedS8{1.1f})
- .set_dtype(4, dtype::QuantizedS8{1.0f})
- .set_epsilon(1 + 1e-3)
- .set_max_avg_error(1e-1)
- .set_max_avg_biased_error(1e-1);
- param::ConvBias param;
- param.pad_h = param.pad_w = 1;
- param.stride_h = param.stride_w = 1;
- param.format = param::ConvBias::Format::CHWN4;
- checker.set_param(param).execs(
- {{8, 12, 12, 64, 4},
- {8, 3, 3, 64, 4},
- {16, 1, 1, 1, 4},
- {16, 12, 12, 64, 4},
- {}});
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_REFORMAT_FILTER_TENSORCORE_CHECK_BOUNDS_ALGO_0) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_REORDER_FILTER_mma16x16x16",
- param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_args_check_bounds(3));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_REFORMAT_FILTER_TENSORCORE_CHECK_BOUNDS_ALGO_1) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_REORDER_FILTER_mma8x32x16",
- param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_args_check_bounds(3));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_REFORMAT_FILTER_TENSORCORE_CHECK_BOUNDS_ALGO_2) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_REORDER_FILTER_mma32x8x16",
- param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_args_check_bounds(3));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_REFORMAT_FILTER_TENSORCORE_ALGO_0) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_REORDER_FILTER_mma16x16x16",
- param::ConvBias::Format::CHWN4, conv_bias::get_int8_chwn4_args(3));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_REFORMAT_FILTER_TENSORCORE_ALGO_1) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_REORDER_FILTER_mma8x32x16",
- param::ConvBias::Format::CHWN4, conv_bias::get_int8_chwn4_args(3));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_REFORMAT_FILTER_TENSORCORE_ALGO_2) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_REORDER_FILTER_mma32x8x16",
- param::ConvBias::Format::CHWN4, conv_bias::get_int8_chwn4_args(3));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_UNROLL_WIDTH_TENSORCORE_ALGO_0) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.1f}, handle_cuda(),
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_UNROLL_WIDTH_mma16x16x16",
- param::ConvBias::Format::CHWN4, conv_bias::get_int8_chwn4_args(3));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_UNROLL_WIDTH_TENSORCORE_ALGO_1) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_UNROLL_WIDTH_mma8x32x16",
- param::ConvBias::Format::CHWN4, conv_bias::get_int8_chwn4_args(3));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_UNROLL_WIDTH_TENSORCORE_ALGO_2) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.3f}, handle_cuda(),
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_UNROLL_WIDTH_mma32x8x16",
- param::ConvBias::Format::CHWN4, conv_bias::get_int8_chwn4_args(3));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_UNROLL_WIDTH_TENSORCORE_1x1) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.1f}, handle_cuda(),
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_UNROLL_WIDTH_mma16x16x16",
- param::ConvBias::Format::CHWN4, conv_bias::get_int8_chwn4_args(1));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_UNROLL_WIDTH_TENSORCORE_5x5) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.1f}, handle_cuda(),
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_UNROLL_WIDTH_mma16x16x16",
- param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_args_small_batch(5));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_UNROLL_WIDTH_TENSORCORE_7x7) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.1f}, handle_cuda(),
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_UNROLL_WIDTH_mma16x16x16",
- param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_args_small_batch(7));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_UNROLL_WIDTH_TENSORCORE_5x5_ALGO_1) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.1f}, handle_cuda(),
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_UNROLL_WIDTH_mma32x8x16",
- param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_args_small_batch(5));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_UNROLL_WIDTH_TENSORCORE_5x5_ALGO_2) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.1f}, handle_cuda(),
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_UNROLL_WIDTH_mma8x32x16",
- param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_args_small_batch(5));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_UNROLL_WIDTH_TENSORCORE_1x1_ALGO_1) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.1f}, handle_cuda(),
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_UNROLL_WIDTH_mma32x8x16",
- param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_args_small_batch(1));
- }
-
- TEST_F(CUDA, CONV_BIAS_INT8_CHWN4_UNROLL_WIDTH_TENSORCORE_1x1_ALGO_2) {
- require_compute_capability(7, 5);
- conv_bias::check_conv_bias(
- dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.1f}, handle_cuda(),
- "INT8_CHWN4_IMMA_IMPLICIT_GEMM_UNROLL_WIDTH_mma8x32x16",
- param::ConvBias::Format::CHWN4,
- conv_bias::get_int8_chwn4_args_small_batch(1));
- }
-
- TEST_F(CUDA, FALLBACK_CONV_QS8) {
- require_compute_capability_eq(7, 5);
- Checker<ConvBiasForward> checker(handle_cuda());
- auto check = [&checker](const std::string&& algo, const std::string&& sub_algo) {
- checker.set_before_exec_callback(
- conv_bias::ConvBiasAlgoChecker<ConvBiasForward>(
- {algo.c_str(), {sub_algo.c_str()}}));
- UniformIntRNG rng{-3, 3};
- UniformIntRNG bias_rng{-50, 50};
- checker.set_rng(0, &rng)
- .set_rng(1, &rng)
- .set_rng(2, &bias_rng)
- .set_rng(3, &rng)
- .set_dtype(0, dtype::QuantizedS8{1.2f})
- .set_dtype(1, dtype::QuantizedS8{1.3f})
- .set_dtype(2, dtype::QuantizedS32{1.2f * 1.3f})
- .set_dtype(3, dtype::QuantizedS8{19.990229f})
- .set_dtype(4, dtype::QuantizedS8{19.990228f})
- .set_epsilon(1e-3)
- .set_max_avg_error(1e-1)
- .set_max_avg_biased_error(1e-3);
- param::ConvBias param;
- param.pad_h = param.pad_w = 1;
- param.stride_h = param.stride_w = 2;
- param.format = param::ConvBias::Format::NCHW;
- checker.set_param(param).execs(
- {{16, 15, 14, 14}, {28, 15, 3, 3}, {1, 28, 1, 1}, {16, 28, 7, 7}, {}});
- checker.set_param(param).execs(
- {{16, 32, 14, 14}, {32, 32, 3, 3}, {1, 32, 1, 1}, {}, {}});
- };
- check("FALLBACK_CONV_NCHW_QS8", "INT8_NCHW4_DOTPROD_IMPLICIT_GEMM");
- }
-
- TEST_F(CUDA, FALLBACK_CONV_QS8_F32) {
- require_compute_capability_eq(7, 5);
- Checker<ConvBiasForward> checker(handle_cuda());
- auto check = [&checker](const std::string&& algo, const std::string&& sub_algo) {
- checker.set_before_exec_callback(
- conv_bias::ConvBiasAlgoChecker<ConvBiasForward>(
- {algo.c_str(), {sub_algo.c_str()}}));
- UniformIntRNG rng{-3, 3};
- UniformFloatRNG bias_rng{-50.f, 50.f};
- checker.set_rng(0, &rng)
- .set_rng(1, &rng)
- .set_rng(2, &bias_rng)
- .set_rng(3, &rng)
- .set_dtype(0, dtype::QuantizedS8{1.2f})
- .set_dtype(1, dtype::QuantizedS8{1.3f})
- .set_dtype(2, dtype::Float32{})
- .set_dtype(3, dtype::Float32{})
- .set_dtype(4, dtype::Float32{})
- .set_epsilon(1e-3)
- .set_max_avg_error(1e-1)
- .set_max_avg_biased_error(1e-3);
- param::ConvBias param;
- param.pad_h = param.pad_w = 1;
- param.stride_h = param.stride_w = 2;
- param.format = param::ConvBias::Format::NCHW;
- checker.set_param(param).execs(
- {{16, 15, 14, 14}, {28, 15, 3, 3}, {1, 28, 1, 1}, {16, 28, 7, 7}, {}});
- checker.set_param(param).execs(
- {{16, 32, 14, 14}, {32, 32, 3, 3}, {1, 32, 1, 1}, {}, {}});
- };
- check("FALLBACK_CONV_NCHW_QS8", "INT8_NCHW4_DOTPROD_IMPLICIT_GEMM");
- }
-
- TEST_F(CUDA, CUTLASS_CONV_BIAS_INT8_WEIGHT_PREPROCESS) {
- require_compute_capability(6, 1);
- Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
- handle_cuda());
- auto check = [&checker](const std::string& algo) {
- checker.set_before_exec_callback(
- conv_bias::ConvBiasAlgoChecker<ConvBiasForward>(algo.c_str()));
- UniformIntRNG rng{-16, 16};
- UniformIntRNG bias_rng{-50, 50};
- UniformIntRNG const_rng{1, 1};
- checker.set_rng(0, &rng)
- .set_rng(1, &rng)
- .set_rng(2, &bias_rng)
- .set_rng(3, &rng)
- .set_dtype(0, dtype::QuantizedS8{1.2f})
- .set_dtype(1, dtype::QuantizedS8{1.3f})
- .set_dtype(2, dtype::QuantizedS32{1.2f * 1.3f})
- .set_dtype(3, dtype::QuantizedS8{1.3f})
- .set_dtype(4, dtype::QuantizedS8{1.0f})
- .set_epsilon(1 + 1e-3)
- .set_max_avg_error(1e-1)
- .set_max_avg_biased_error(1e-3);
- param::ConvBias param;
- param.pad_h = param.pad_w = 1;
- param.stride_h = param.stride_w = 2;
- param.format = param::ConvBias::Format::NCHW4;
- checker.set_param(param).execs(
- {{16, 4, 14, 14, 4}, {16, 4, 3, 3, 4}, {1, 4, 1, 1, 4}, {}, {}});
- };
- check("INT8_NCHW4_DOTPROD_IMPLICIT_GEMM_128X32X32_64X32X32");
- check("INT8_NCHW4_DOTPROD_IMPLICIT_GEMM_16X64X8_16X64X8");
- }
-
- #if CUDA_VERSION >= 10020
- /// \note: we only check several cases and block sizes in megdnn_test, the
- /// full testcases are written in cutlass repository
- TEST_F(CUDA, CUTLASS_CONV_BIAS_INT8_NCHW32_IMMA) {
- require_compute_capability_eq(7, 5);
- Checker<ConvBiasForward> checker(handle_cuda());
- auto check = [&checker](const std::string& algo) {
- checker.set_before_exec_callback(
- conv_bias::ConvBiasAlgoChecker<ConvBiasForward>(algo.c_str()));
- UniformIntRNG rng{-8, 8};
- UniformIntRNG bias_rng{-50, 50};
- UniformIntRNG const_rng{1, 1};
- // use scale that are all integers to avoid rouding error
- checker.set_rng(0, &rng)
- .set_rng(1, &rng)
- .set_rng(2, &bias_rng)
- .set_rng(3, &rng)
- .set_dtype(0, dtype::QuantizedS8{6.0f})
- .set_dtype(1, dtype::QuantizedS8{1.0f})
- .set_dtype(2, dtype::QuantizedS32{6.0f})
- .set_dtype(3, dtype::QuantizedS8{1.0f})
- .set_dtype(4, dtype::QuantizedS8{6.0f})
- .set_epsilon(1e-3);
- param::ConvBias param;
- param.pad_h = param.pad_w = 1;
- param.stride_h = param.stride_w = 1;
- param.format = param::ConvBias::Format::NCHW32;
- checker.set_param(param).execs(
- {{16, 8, 7, 7, 32}, {256, 8, 3, 3, 32}, {1, 8, 1, 1, 32}, {}, {}});
- param.nonlineMode = param::ConvBias::NonlineMode::RELU;
- checker.set_param(param).execs(
- {{16, 8, 7, 7, 32}, {256, 8, 1, 1, 32}, {1, 8, 1, 1, 32}, {}, {}});
- param.nonlineMode = param::ConvBias::NonlineMode::H_SWISH;
- checker.set_param(param).execs(
- {{16, 8, 7, 7, 32}, {256, 8, 3, 3, 32}, {1, 8, 1, 1, 32}, {}, {}});
- // use non integer scale
- param.nonlineMode = param::ConvBias::NonlineMode::H_SWISH;
- checker.set_dtype(0, dtype::QuantizedS8{1.1f})
- .set_dtype(1, dtype::QuantizedS8{1.2f})
- .set_dtype(2, dtype::QuantizedS32{1.1f * 1.2f})
- .set_dtype(3, dtype::QuantizedS8{1.1f})
- .set_dtype(4, dtype::QuantizedS8{6.0f})
- .set_epsilon(1 + 1e-3)
- .set_max_avg_error(1e-1)
- .set_max_avg_biased_error(1e-1)
- .execs({{16, 8, 7, 7, 32},
- {256, 8, 3, 3, 32},
- {1, 8, 1, 1, 32},
- {16, 8, 7, 7, 32},
- {}});
- };
- std::string algo = ConvBias::algo_name<ConvBias::DirectParam>(
- "INT8_NCHW32_IMMA_IMPLICIT_GEMM_128X128X64_64X64X64_2",
- ConvBias::DirectParam{});
- check(algo);
- algo = ConvBias::algo_name<ConvBias::DirectParam>(
- "INT8_NCHW32_IMMA_IMPLICIT_GEMM_128X32X32_64X32X32_1",
- ConvBias::DirectParam{});
- check(algo);
- }
-
- TEST_F(CUDA, CUTLASS_CONV_BIAS_INT8_NHWC) {
- require_compute_capability(7, 5);
- Checker<ConvBiasForward> checker(handle_cuda());
- auto check = [&checker](const std::string& algo) {
- checker.set_before_exec_callback(
- conv_bias::ConvBiasAlgoChecker<ConvBiasForward>(algo.c_str()));
- UniformIntRNG rng{-8, 8};
- UniformIntRNG bias_rng{-50, 50};
- checker.set_rng(0, &rng)
- .set_rng(1, &rng)
- .set_rng(2, &bias_rng)
- .set_rng(3, &rng)
- .set_dtype(0, dtype::QuantizedS8{1.2f})
- .set_dtype(1, dtype::QuantizedS8{1.3f})
- .set_dtype(2, dtype::QuantizedS32{1.2f * 1.3f})
- .set_dtype(3, dtype::QuantizedS8{19.990229f})
- .set_dtype(4, dtype::QuantizedS8{19.990228f})
- .set_epsilon(1e-3);
- param::ConvBias param;
- param.pad_h = param.pad_w = 1;
- param.stride_h = param.stride_w = 1;
- param.format = param::ConvBias::Format::NHWC;
- checker.set_param(param).execs(
- {{16, 7, 7, 16}, {32, 3, 3, 16}, {1, 1, 1, 32}, {}, {}});
- param.pad_h = param.pad_w = 0;
- param.nonlineMode = param::ConvBias::NonlineMode::RELU;
- checker.set_param(param).execs(
- {{16, 7, 7, 16}, {16, 1, 1, 16}, {1, 1, 1, 16}, {}, {}});
- };
- std::string algo = ConvBias::algo_name<ConvBias::DirectParam>(
- "INT8_NHWC_IMMA_IMPLICIT_GEMM_64X16X32_64X16X32_2_16",
- ConvBias::DirectParam{});
- check(algo);
- algo = ConvBias::algo_name<ConvBias::DirectParam>(
- "INT8_NHWC_IMMA_IMPLICIT_GEMM_128X32X32_64X32X32_1_16",
- ConvBias::DirectParam{});
- check(algo);
- }
-
- TEST_F(CUDA, CUTLASS_CONV_BIAS_INT8_NHWC_UINT4_WEIGHT_PREPROCESS) {
- require_compute_capability(7, 5);
- Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
- handle_cuda());
- auto check = [&checker](const std::string& algo) {
- checker.set_before_exec_callback(
- conv_bias::ConvBiasAlgoChecker<ConvBiasForward>(algo.c_str()));
- UniformIntRNG rng{-8, 8};
- UniformIntRNG bias_rng{-50, 50};
- UniformIntRNG rng_u4{0, 15};
- checker.set_rng(0, &rng)
- .set_rng(1, &rng)
- .set_rng(2, &bias_rng)
- .set_rng(3, &rng_u4)
- .set_dtype(0, dtype::QuantizedS8{0.2f})
- .set_dtype(1, dtype::QuantizedS8{0.3f})
- .set_dtype(2, dtype::QuantizedS32{0.2f * 0.3f})
- .set_dtype(3, dtype::Quantized4Asymm{0.5f, 8})
- .set_dtype(4, dtype::Quantized4Asymm{0.5f, 4})
- .set_epsilon(1 + 1e-3);
- param::ConvBias param;
- param.pad_h = param.pad_w = 1;
- param.stride_h = param.stride_w = 1;
- param.format = param::ConvBias::Format::NHWC;
- checker.set_param(param).execs(
- {{16, 7, 7, 16}, {32, 3, 3, 16}, {1, 1, 1, 32}, {}, {}});
- param.pad_h = param.pad_w = 0;
- param.nonlineMode = param::ConvBias::NonlineMode::RELU;
- checker.set_param(param).execs(
- {{16, 7, 7, 16}, {16, 1, 1, 16}, {1, 1, 1, 16}, {}, {}});
- };
- std::string algo = ConvBias::algo_name<ConvBias::DirectParam>(
- "INT8_NHWC_IMMA_IMPLICIT_GEMM_64X16X32_64X16X32_2_16",
- ConvBias::DirectParam{});
- check(algo);
- algo = ConvBias::algo_name<ConvBias::DirectParam>(
- "INT8_NHWC_IMMA_IMPLICIT_GEMM_128X32X32_64X32X32_1_16",
- ConvBias::DirectParam{});
- check(algo);
- }
-
- TEST_F(CUDA, CUTLASS_CONV_BIAS_INT8_NHWC_FLOAT) {
- require_compute_capability(7, 5);
- Checker<ConvBiasForward> checker(handle_cuda());
- auto check = [&checker](const std::string& algo) {
- checker.set_before_exec_callback(
- conv_bias::ConvBiasAlgoChecker<ConvBiasForward>(algo.c_str()));
- UniformIntRNG rng{-8, 8};
- UniformFloatRNG float_rng{-50, 50};
- checker.set_rng(0, &rng)
- .set_rng(1, &rng)
- .set_rng(2, &float_rng)
- .set_rng(3, &float_rng)
- .set_dtype(0, dtype::QuantizedS8(1.9980618f))
- .set_dtype(1, dtype::QuantizedS8(1.9980927f))
- .set_dtype(2, dtype::Float32())
- .set_dtype(3, dtype::Float32())
- .set_dtype(4, dtype::Float32());
- param::ConvBias param;
- param.pad_h = param.pad_w = 1;
- param.stride_h = param.stride_w = 1;
- param.format = param::ConvBias::Format::NHWC;
- checker.set_param(param).execs(
- {{16, 7, 7, 16}, {32, 3, 3, 16}, {1, 1, 1, 32}, {}, {}});
- param.pad_h = param.pad_w = 0;
- param.nonlineMode = param::ConvBias::NonlineMode::RELU;
- checker.set_param(param).execs(
- {{16, 7, 7, 16}, {16, 1, 1, 16}, {1, 1, 1, 16}, {}, {}});
- };
- std::string algo = ConvBias::algo_name<ConvBias::DirectParam>(
- "INT8_NHWC_IMMA_IMPLICIT_GEMM_64X16X32_64X16X32_2_16",
- ConvBias::DirectParam{});
- check(algo);
- algo = ConvBias::algo_name<ConvBias::DirectParam>(
- "INT8_NHWC_IMMA_IMPLICIT_GEMM_128X32X32_64X32X32_1_16",
- ConvBias::DirectParam{});
- check(algo);
- }
-
- #endif
-
- TEST_F(CUDA, CUTLASS_CONV_BIAS_INT8_NCHW4_NCHW) {
- require_compute_capability(6, 1);
- using namespace conv_bias;
- Checker<ConvBiasForward> checker(handle_cuda());
- UniformIntRNG int_rng{-3, 3};
- UniformFloatRNG float_rng{-50, 50};
- ConvBias::Param param;
- param.format = ConvBias::Param::Format::NCHW4_NCHW;
- param.nonlineMode = ConvBias::Param::NonlineMode::IDENTITY;
- checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBiasForward>(
- "INT8_NCHW4_DOTPROD_IMPLICIT_GEMM"));
- checker.set_dtype(0, dtype::QuantizedS8(1.9980618f))
- .set_dtype(1, dtype::QuantizedS8(1.9980927f))
- .set_dtype(2, dtype::Float32())
- .set_dtype(3, dtype::Float32())
- .set_dtype(4, dtype::Float32())
- .set_rng(0, &int_rng)
- .set_rng(1, &int_rng)
- .set_rng(2, &float_rng)
- .set_rng(3, &float_rng)
- .set_param(param);
-
- auto opr = handle_cuda()->create_operator<ConvBias>();
-
- auto run = [&](const TensorShapeArray& shapes) {
- opr->param() = param;
- TensorLayout dst_layout;
- opr->deduce_layout(
- {shapes[0], dtype::Float32()}, {shapes[1], dtype::Float32()}, {}, {},
- dst_layout);
- checker.execs({shapes[0], shapes[1], shapes[2], dst_layout, {}});
- };
-
- run({{16, 4, 23, 40, 4}, {20, 4, 3, 3, 4}, {1, 20, 1, 1}});
- run({{16, 4, 92, 160, 4}, {24, 4, 3, 3, 4}, {1, 24, 1, 1}});
- run({{16, 4, 92, 160, 4}, {20, 4, 3, 3, 4}, {1, 20, 1, 1}});
- run({{16, 4, 92, 160, 4}, {16, 4, 3, 3, 4}, {1, 16, 1, 1}});
- run({{16, 4, 92, 160, 4}, {8, 4, 3, 3, 4}, {1, 8, 1, 1}});
- run({{16, 4, 46, 80, 4}, {4, 4, 3, 3, 4}, {1, 4, 1, 1}});
- }
-
- TEST_F(CUDA, CUTLASS_CONV_BIAS_INT8_NCHW4_NCHW32) {
- require_compute_capability(6, 1);
- using namespace conv_bias;
- Checker<ConvBiasForward> checker(handle_cuda());
- UniformIntRNG int_rng{-3, 3};
- UniformIntRNG bias_rng{-50, 50};
- ConvBias::Param param;
- param.format = ConvBias::Param::Format::NCHW4_NCHW32;
- param.nonlineMode = ConvBias::Param::NonlineMode::IDENTITY;
- checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBiasForward>(
- "INT8_NCHW4_DOTPROD_IMPLICIT_GEMM"));
- checker.set_dtype(0, dtype::QuantizedS8(1.9980618f))
- .set_dtype(1, dtype::QuantizedS8(1.9980927f))
- .set_dtype(2, dtype::QuantizedS32(1.9980618f * 1.9980927f))
- .set_dtype(3, dtype::QuantizedS8(1.9980618f))
- .set_dtype(4, dtype::QuantizedS8(1.9980618f))
- .set_rng(0, &int_rng)
- .set_rng(1, &int_rng)
- .set_rng(2, &bias_rng)
- .set_rng(3, &int_rng)
- .set_param(param);
- auto run = [&](const TensorShapeArray& shapes) {
- checker.execs({shapes[0], shapes[1], shapes[2], {}, {}});
- };
-
- run({{16, 4, 23, 40, 4}, {32, 4, 3, 3, 4}, {1, 1, 1, 1, 32}});
- run({{16, 4, 92, 160, 4}, {32, 4, 3, 3, 4}, {1, 1, 1, 1, 32}});
- run({{16, 4, 46, 80, 4}, {32, 4, 3, 3, 4}, {1, 1, 1, 1, 32}});
- }
-
- #if CUDA_VERSION >= 10020
- TEST_F(CUDA, CUTLASS_CONV_BIAS_INT8_NCHW32_NCHW4) {
- require_compute_capability(7, 5);
- using namespace conv_bias;
- Checker<ConvBiasForward> checker(handle_cuda());
- UniformIntRNG int_rng{-3, 3};
- UniformIntRNG bias_rng{-50, 50};
- ConvBias::Param param;
- param.format = ConvBias::Param::Format::NCHW32_NCHW4;
- param.nonlineMode = ConvBias::Param::NonlineMode::IDENTITY;
- checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBiasForward>(
- ConvBias::algo_name<ConvBias::DirectParam>(
- "INT8_NCHW32_IMMA_IMPLICIT_GEMM_32X128X32_32X64X32_1",
- ConvBias::DirectParam{})
- .c_str()));
- checker.set_dtype(0, dtype::QuantizedS8(1.9980618f))
- .set_dtype(1, dtype::QuantizedS8(1.9980927f))
- .set_dtype(2, dtype::QuantizedS32(1.9980618f * 1.9980927f))
- .set_dtype(3, dtype::QuantizedS8(1.9980618f))
- .set_dtype(4, dtype::QuantizedS8(1.9980618f))
- .set_rng(0, &int_rng)
- .set_rng(1, &int_rng)
- .set_rng(2, &bias_rng)
- .set_rng(3, &int_rng)
- .set_param(param);
- auto run = [&](const TensorShapeArray& shapes) {
- checker.execs({shapes[0], shapes[1], shapes[2], {}, {}});
- };
-
- run({{16, 2, 23, 40, 32}, {20, 2, 3, 3, 32}, {1, 5, 1, 1, 4}});
- run({{16, 1, 92, 160, 32}, {24, 1, 3, 3, 32}, {1, 6, 1, 1, 4}});
- run({{16, 2, 46, 80, 32}, {4, 2, 3, 3, 32}, {1, 1, 1, 1, 4}});
- }
- #endif
-
- #if MEGDNN_WITH_BENCHMARK
- TEST_F(CUDA, BENCHMARK_CONV_BIAS_INT8_CHWN4) {
- require_compute_capability(6, 1);
- benchmark_target_algo(
- handle_cuda(), get_resnet50_bench_args(), dtype::QuantizedS8{1.2f},
- dtype::QuantizedS8{1.3f}, dtype::QuantizedS32{1.2f * 1.3f},
- dtype::QuantizedS8{1.0f}, "INT8_CHWN4_DOTPROD_IMPLICIT_GEMM",
- param::ConvBias::Format::CHWN4);
- }
-
- TEST_F(CUDA, BENCHMARK_CONV_BIAS_INT8_NCHW4) {
- require_compute_capability(6, 1);
- benchmark_target_algo(
- handle_cuda(), get_resnet50_bench_args(), dtype::QuantizedS8{1.2f},
- dtype::QuantizedS8{1.3f}, dtype::QuantizedS32{1.2f * 1.3f},
- dtype::QuantizedS8{1.0f}, "INT8_NCHW4_DOTPROD_IMPLICIT_GEMM",
- param::ConvBias::Format::NCHW4);
- }
-
- TEST_F(CUDA, BENCHMARK_CONV_BIAS_INT8_CHWN4_TENSORCORE) {
- require_compute_capability(7, 5);
- benchmark_target_algo_with_cudnn_tsc(
- handle_cuda(), get_resnet50_bench_args(256), dtype::QuantizedS8{1.2f},
- dtype::QuantizedS8{1.3f}, dtype::QuantizedS32{1.2f * 1.3f},
- dtype::QuantizedS8{1.0f}, "INT8_CHWN4_IMMA_IMPLICIT_GEMM_mma16x16x16",
- param::ConvBias::Format::CHWN4);
- }
-
- TEST_F(CUDA, BENCHMARK_CONV_BIAS_INT8_CHWN4_TENSORCORE_ALL_ALGO) {
- require_compute_capability(7, 5);
- benchmark_target_algo_with_cudnn_tsc(
- handle_cuda(), get_resnet50_bench_args(256), dtype::QuantizedS8{1.2f},
- dtype::QuantizedS8{1.3f}, dtype::QuantizedS32{1.2f * 1.3f},
- dtype::QuantizedS8{1.0f}, nullptr, param::ConvBias::Format::CHWN4);
- }
-
- TEST_F(CUDA, BENCHMARK_CONV_BIAS_INT8_CHWN4_DET_ALL_ALGO) {
- require_compute_capability(7, 5);
- benchmark_target_algo_with_cudnn_tsc(
- handle_cuda(), get_detection_bench_args(), dtype::QuantizedS8{1.2f},
- dtype::QuantizedS8{1.3f}, dtype::QuantizedS32{1.2f * 1.3f},
- dtype::QuantizedS8{1.0f}, nullptr, param::ConvBias::Format::CHWN4);
- }
-
- TEST_F(CUDA, BENCHMARK_CONV_BIAS_INT8_NCHW4_TENSORCORE) {
- require_compute_capability(7, 5);
- benchmark_target_algo_with_cudnn_tsc(
- handle_cuda(), get_resnet50_bench_args(256), dtype::QuantizedS8{1.2f},
- dtype::QuantizedS8{1.3f}, dtype::QuantizedS32{1.2f * 1.3f},
- dtype::QuantizedS8{1.0f}, "INT8_NCHW4_IMMA_IMPLICIT_GEMM_mma16x16x16",
- param::ConvBias::Format::NCHW4);
- }
-
- TEST_F(CUDA, BENCHMARK_CONV_BIAS_INT8_CHWN4_SMALL_CHANNEL) {
- require_compute_capability(6, 1);
- std::vector<BenchArgs> args;
- args.push_back(BenchArgs{64, 4, 224, 224, 64, 7, 2});
- benchmark_target_algo(
- handle_cuda(), args, dtype::QuantizedS8{1.2f}, dtype::QuantizedS8{1.3f},
- dtype::QuantizedS32{1.2f * 1.3f}, dtype::QuantizedS8{1.0f},
- "INT8_CHWN4_DOTPROD_IMPLICIT_GEMM", param::ConvBias::Format::CHWN4);
- }
-
- TEST_F(CUDA, BENCHMARK_CONV_BIAS_INT8_NCHW4_NCHW) {
- CUBenchmarker<ConvBiasForward> benchmarker(handle_cuda());
- size_t RUNS = 1000;
- benchmarker.set_display(false).set_times(RUNS);
-
- using namespace conv_bias;
- UniformIntRNG int_rng{-3, 3};
- UniformIntRNG bias_rng{-50, 50};
- ConvBias::Param param;
- param.format = ConvBias::Param::Format::NCHW4_NCHW;
- param.nonlineMode = ConvBias::Param::NonlineMode::IDENTITY;
-
- benchmarker.set_before_exec_callback(
- conv_bias::ConvBiasAlgoChecker<ConvBiasForward>(
- "INT8_NCHW4_DOTPROD_IMPLICIT_GEMM"));
-
- benchmarker.set_dtype(0, dtype::QuantizedS8(1.9980618f))
- .set_dtype(1, dtype::QuantizedS8(1.9980927f))
- .set_dtype(2, dtype::Float32())
- .set_dtype(3, dtype::Float32())
- .set_dtype(4, dtype::Float32())
- .set_rng(0, &int_rng)
- .set_rng(1, &int_rng)
- .set_param(param);
-
- auto run = [&](const TensorShapeArray& shapes) {
- auto time_in_ms =
- benchmarker.execs({shapes[0], shapes[1], shapes[2], {}, {}}) / RUNS;
-
- printf("src=%s, filter=%s, dst=%s, time=%.2f\n", shapes[0].to_string().c_str(),
- shapes[1].to_string().c_str(), shapes[2].to_string().c_str(),
- time_in_ms);
- };
-
- run({{16, 16, 224, 224, 4}, {32, 16, 3, 3, 4}, {1, 32, 1, 1}});
- run({{16, 16, 92, 160, 4}, {32, 16, 3, 3, 4}, {1, 32, 1, 1}});
- run({{16, 16, 46, 80, 4}, {32, 16, 3, 3, 4}, {1, 32, 1, 1}});
- }
-
- #if CUDA_VERSION >= 10020
- TEST_F(CUDA, BENCHMARK_CUTLASS_CONV_BIAS_INT8_NCHW32) {
- require_compute_capability(7, 5);
- benchmark_target_algo_with_cudnn_tsc(
- handle_cuda(), get_resnet50_bench_args(256), dtype::QuantizedS8{1.2f},
- dtype::QuantizedS8{1.3f}, dtype::QuantizedS32{1.2f * 1.3f},
- dtype::QuantizedS8{1.0f}, "DIRECT:INT8_NCHW32_IMMA_IMPLICIT_GEMM",
- param::ConvBias::Format::NCHW32);
- }
-
- TEST_F(CUDA, BENCHMARK_CUTLASS_CONV_BIAS_INT8_NHWC) {
- require_compute_capability(7, 5);
- benchmark_target_algo_with_cudnn_tsc(
- handle_cuda(), get_det_first_bench_args(16), dtype::QuantizedS8{1.2f},
- dtype::QuantizedS8{1.3f}, dtype::QuantizedS32{1.2f * 1.3f},
- dtype::QuantizedS8{1.0f}, "DIRECT:INT8_NHWC_IMMA_IMPLICIT_GEMM",
- param::ConvBias::Format::NHWC);
- }
- #endif
-
- TEST_F(CUDA, BENCHMARK_CUTLASS_CONV_BIAS_INT8_NCHW4) {
- require_compute_capability(6, 1);
- benchmark_target_algo(
- handle_cuda(), get_resnet50_bench_args(64), dtype::QuantizedS8{1.2f},
- dtype::QuantizedS8{1.3f}, dtype::QuantizedS32{1.2f * 1.3f},
- dtype::QuantizedS8{1.0f}, "INT8_NCHW4_DOTPROD_IMPLICIT_GEMM",
- param::ConvBias::Format::NCHW4);
- }
-
- TEST_F(CUDA, BENCHMARK_SASS_CONV_BIAS_INT8_NCHW4_DET_FIRST) {
- require_compute_capability(6, 1);
- std::string algo = ConvBias::algo_name<ConvBias::DirectParam>(
- "SASS_INT8_NCHW4_DOTPROD_IMPLICIT_GEMM_128X32_64", ConvBias::DirectParam{});
- benchmark_target_algo(
- handle_cuda(), get_det_first_bench_args(16), dtype::QuantizedS8{1.2f},
- dtype::QuantizedS8{1.3f}, dtype::QuantizedS32{1.2f * 1.3f},
- dtype::QuantizedS8{1.0f}, algo.c_str(), param::ConvBias::Format::NCHW4);
- }
-
- TEST_F(CUDA, BENCHMARK_CUTLASS_CONV_BIAS_INT8_NCHW4_DET_FIRST) {
- require_compute_capability(6, 1);
- benchmark_target_algo(
- handle_cuda(), get_det_first_bench_args(16), dtype::QuantizedS8{1.2f},
- dtype::QuantizedS8{1.3f}, dtype::QuantizedS32{1.2f * 1.3f},
- dtype::QuantizedS8{1.0f}, "INT8_NCHW4_DOTPROD_IMPLICIT_GEMM_16",
- param::ConvBias::Format::NCHW4);
- }
-
- #endif
- } // namespace conv
- } // namespace test
- } // namespace megdnn
-
- // vim: syntax=cpp.doxygen
|