You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

matrix_mul.cpp 36 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955
  1. /**
  2. * \file dnn/test/aarch64/matrix_mul.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
  10. * implied.
  11. */
  12. #include "test/aarch64/fixture.h"
  13. #include "test/common/benchmarker.h"
  14. #include "test/common/checker.h"
  15. #include "test/common/matrix_mul.h"
  16. #include "test/common/rng.h"
  17. #include "test/arm_common/cpuinfo_help.h"
  18. using namespace megdnn;
  19. using namespace test;
  20. TEST_F(AARCH64, MATRIX_MUL_FP32K8X12) {
  21. matrix_mul::check_matrix_mul(
  22. dtype::Float32{}, dtype::Float32{}, dtype::Float32{}, handle(),
  23. "AARCH64_F32K8X12X1");
  24. }
  25. #if MGB_ENABLE_CPUINFO
  26. TEST_F(AARCH64, MATRIX_MUL_FP32K8X12_A53) {
  27. CpuInfoTmpReplace cpu_replace_guard(cpuinfo_uarch_cortex_a53);
  28. matrix_mul::check_matrix_mul(
  29. dtype::Float32{}, dtype::Float32{}, dtype::Float32{}, handle(),
  30. "AARCH64_F32K8X12X1");
  31. }
  32. TEST_F(AARCH64, MATRIX_MUL_FP32K8X12_A55) {
  33. CpuInfoTmpReplace cpu_replace_guard(cpuinfo_uarch_cortex_a55);
  34. matrix_mul::check_matrix_mul(
  35. dtype::Float32{}, dtype::Float32{}, dtype::Float32{}, handle(),
  36. "AARCH64_F32K8X12X1");
  37. }
  38. #endif
  39. TEST_F(AARCH64, MATRIX_MUL_FP32K4X16) {
  40. matrix_mul::check_matrix_mul(
  41. dtype::Float32{}, dtype::Float32{}, dtype::Float32{}, handle(),
  42. "AARCH64_F32K4X16X1");
  43. }
  44. TEST_F(AARCH64, MATRIX_MUL_FP32_PACK_MK4) {
  45. matrix_mul::check_matrix_mul(
  46. dtype::Float32{}, dtype::Float32{}, dtype::Float32{}, handle(),
  47. "AARCH64_F32_MK4_K8X12X1", param::MatrixMul::Format::MK4, 1);
  48. }
  49. #if MGB_ENABLE_CPUINFO
  50. TEST_F(AARCH64, MATRIX_MUL_FP32_PACK_MK4_A53) {
  51. CpuInfoTmpReplace cpu_replace_guard(cpuinfo_uarch_cortex_a53);
  52. matrix_mul::check_matrix_mul(
  53. dtype::Float32{}, dtype::Float32{}, dtype::Float32{}, handle(),
  54. "AARCH64_F32_MK4_K8X12X1", param::MatrixMul::Format::MK4, 1);
  55. }
  56. TEST_F(AARCH64, MATRIX_MUL_FP32_PACK_MK4_A55) {
  57. CpuInfoTmpReplace cpu_replace_guard(cpuinfo_uarch_cortex_a55);
  58. matrix_mul::check_matrix_mul(
  59. dtype::Float32{}, dtype::Float32{}, dtype::Float32{}, handle(),
  60. "AARCH64_F32_MK4_K8X12X1", param::MatrixMul::Format::MK4, 1);
  61. }
  62. #endif
  63. TEST_F(AARCH64, MATRIX_MUL_FP32_MK4) {
  64. matrix_mul::check_matrix_mul(
  65. dtype::Float32{}, dtype::Float32{}, dtype::Float32{}, handle(),
  66. "AARCH64_F32_MK4_4x16", param::MatrixMul::Format::MK4, 1);
  67. }
  68. #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  69. TEST_F(AARCH64, MATRIX_MUL_F16_K8X24X1) {
  70. matrix_mul::check_matrix_mul(
  71. dtype::Float16{}, dtype::Float16{}, dtype::Float16{}, handle(),
  72. "AARCH64_F16_K8X24X1");
  73. }
  74. TEST_F(AARCH64, MATRIX_MUL_F16_MK8) {
  75. matrix_mul::check_matrix_mul(
  76. dtype::Float16{}, dtype::Float16{}, dtype::Float16{}, handle(),
  77. "AARCH64_F16_MK8_8X8", param::MatrixMul::Format::MK8, 1);
  78. }
  79. #endif
  80. #if MGB_ENABLE_DOT
  81. TEST_F(AARCH64, MATRIX_MUL_INT8X8X32_K8X12X4_DOTPROD) {
  82. matrix_mul::check_matrix_mul(
  83. dtype::Int8{}, dtype::Int8{}, dtype::Int32{}, handle(),
  84. "AARCH64_INT8X8X32_K8X12X4_DOTPROD");
  85. }
  86. TEST_F(AARCH64, MATRIX_MUL_INT8X8X32_MK4_8X12X4_DOTPROD) {
  87. std::vector<matrix_mul::TestArg> args;
  88. for (size_t m : {1, 2, 3, 4, 5, 6, 7, 10, 11})
  89. for (size_t n : {2, 3, 4, 5, 8, 12, 13, 14, 15, 16, 31})
  90. for (size_t k : {1, 2, 3, 4, 5, 6, 7, 8, 16, 32, 33, 34})
  91. args.emplace_back(m, n, k, 0);
  92. matrix_mul::check_matrix_mul(
  93. dtype::Int8{}, dtype::Int8{}, dtype::Int32{}, handle(),
  94. "AARCH64_INT8X8X32_MK4_8X12X4_DOTPROD", param::MatrixMul::Format::MK4_DOT,
  95. 1, 1e-3, std::move(args));
  96. }
  97. #else
  98. TEST_F(AARCH64, MATRIX_MUL_INT8X8X32_K4X4X16) {
  99. matrix_mul::check_matrix_mul(
  100. dtype::Int8{}, dtype::Int8{}, dtype::Int32{}, handle(),
  101. "AARCH64_INT8X8X32_K4X4X16");
  102. }
  103. TEST_F(AARCH64, MATRIX_MUL_INT8_MK4) {
  104. std::vector<matrix_mul::TestArg> args;
  105. for (size_t m : {1, 2, 3, 4, 5, 7, 10, 11})
  106. for (size_t n : {1, 2, 3, 4, 5, 8, 16, 24, 25, 32})
  107. for (size_t k : {1, 2, 3, 4, 5, 6, 7, 8, 16, 32, 33, 34})
  108. args.emplace_back(m, n, k, 0);
  109. matrix_mul::check_matrix_mul(
  110. dtype::Int8{}, dtype::Int8{}, dtype::Int32{}, handle(),
  111. "AARCH64_INT8X8X32_MK4_4X4X16", param::MatrixMul::Format::MK4, 1, 1e-3,
  112. std::move(args));
  113. }
  114. TEST_F(AARCH64, MATRIX_MUL_INT8x8x16_MK4) {
  115. std::vector<matrix_mul::TestArg> args;
  116. for (size_t m : {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17})
  117. for (size_t n : {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 24})
  118. for (size_t k : {2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
  119. 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29})
  120. args.emplace_back(m, n, k, 0);
  121. matrix_mul::check_matrix_mul(
  122. dtype::Int8{}, dtype::Int8{}, dtype::Int16{}, handle(),
  123. "AARCH64_INT8X8X16_MK4_K8X8X8", param::MatrixMul::Format::MK4, 1, 1e-3,
  124. std::move(args));
  125. }
  126. TEST_F(AARCH64, MATRIX_MUL_MK4_8x8x16_4x4) {
  127. matrix_mul::check_matrix_mul(
  128. dtype::Int8{}, dtype::Int8{}, dtype::Int16{}, handle(),
  129. "AARCH64_INT8X8X16_MK4_4X4X8", param::MatrixMul::Format::MK4, 1);
  130. }
  131. TEST_F(AARCH64, MATRIX_MUL_MK4_8x8x16) {
  132. matrix_mul::check_matrix_mul(
  133. dtype::Int8{}, dtype::Int8{}, dtype::Int16{}, handle(),
  134. "AARCH64_INT8X8X16_MK4_16X12X4", param::MatrixMul::Format::MK4, 1);
  135. }
  136. TEST_F(AARCH64, MATRIX_MUL_INT8x8x32_K8x8x8) {
  137. matrix_mul::check_matrix_mul(
  138. dtype::Int8{}, dtype::Int8{}, dtype::Int32{}, handle(),
  139. "AARCH64_INT8X8X32_K8X8X8");
  140. }
  141. #endif
  142. TEST_F(AARCH64, MATRIX_MUL_INT8x8x16_K8x8x8) {
  143. matrix_mul::check_matrix_mul(
  144. dtype::Int8{}, dtype::Int8{}, dtype::Int16{}, handle(),
  145. "AARCH64_INT8X8X16_K8X8X8");
  146. }
  147. TEST_F(AARCH64, MATRIX_MUL_INT8x8x16_K4x4x16) {
  148. matrix_mul::check_matrix_mul(
  149. dtype::Int8{}, dtype::Int8{}, dtype::Int16{}, handle(),
  150. "AARCH64_INT8X8X16_K4X4X16");
  151. }
  152. TEST_F(AARCH64, MATRIX_MUL_INT4x4x16_K8x8x8_QUANTIZEDS4) {
  153. param::MatrixMul param;
  154. param.transposeA = false;
  155. param.transposeB = false;
  156. Checker<MatrixMul> checker(handle());
  157. checker.set_dtype(0, dtype::QuantizedS4{0.6})
  158. .set_dtype(1, dtype::QuantizedS4{0.5})
  159. .set_dtype(2, dtype::QuantizedS16{0.6 * 0.5})
  160. .set_param(param);
  161. checker.set_before_exec_callback(
  162. AlgoChecker<MatrixMul>("AARCH64_INT4X4X16_K8X8X8"));
  163. auto run = [&](size_t M, size_t N, size_t K) {
  164. printf("M N K %zu %zu %zu \n", M, N, K);
  165. TensorShape A, B;
  166. if (param.transposeA) {
  167. A = TensorShape{K, M};
  168. } else {
  169. A = TensorShape{M, K};
  170. }
  171. if (param.transposeB) {
  172. B = TensorShape{N, K};
  173. } else {
  174. B = TensorShape{K, N};
  175. }
  176. checker.exec({A, B, {}});
  177. };
  178. for (size_t m : {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 16, 20})
  179. for (size_t n : {2, 4, 6, 8, 10, 12, 14, 16, 24})
  180. for (size_t k : {2, 4, 6, 8, 10, 12, 14, 16, 32})
  181. run(m, n, k);
  182. for (size_t k = 4; k <= 256; k *= 8) {
  183. for (size_t m = 4; m <= 256; m *= 4) {
  184. for (size_t n = 4; n <= 256; n *= 4) {
  185. run(m, n, k);
  186. }
  187. }
  188. }
  189. param.transposeA = true;
  190. run(8, 8, 8);
  191. run(16, 8, 16);
  192. param.transposeB = true;
  193. run(8, 8, 8);
  194. run(16, 16, 16);
  195. }
  196. TEST_F(AARCH64, MATRIX_MUL_INT16x16x32_K12X8X1) {
  197. matrix_mul::check_matrix_mul(
  198. dtype::Int16{}, dtype::Int16{}, dtype::Int32{}, handle(),
  199. "AARCH64_INT16X16X32_K12X8X1");
  200. }
  201. TEST_F(AARCH64, MATRIX_MUL_INT16x16x32_MK8) {
  202. matrix_mul::check_matrix_mul(
  203. dtype::Int16{}, dtype::Int16{}, dtype::Int32{}, handle(),
  204. "AARCH64_INT16X16X32_MK8_8X8", param::MatrixMul::Format::MK8, 1);
  205. }
  206. //! FIXME: need to add tests of GEMV and QUINT8
  207. #if MEGDNN_WITH_BENCHMARK
  208. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_FP32_K4X16) {
  209. constexpr size_t RUNS = 50;
  210. param::MatrixMul param;
  211. param.transposeA = false;
  212. param.transposeB = false;
  213. Benchmarker<MatrixMul> benchmarker_K4X16(handle());
  214. Benchmarker<MatrixMul> benchmarker_K12X8(handle());
  215. benchmarker_K4X16.set_times(RUNS)
  216. .set_dtype(0, dtype::Float32{})
  217. .set_dtype(1, dtype::Float32{})
  218. .set_dtype(2, dtype::Float32{})
  219. .set_param(param)
  220. .set_display(false);
  221. benchmarker_K4X16.set_before_exec_callback(
  222. AlgoChecker<MatrixMul>("AARCH64_F32K4X16X1"));
  223. benchmarker_K12X8.set_before_exec_callback(
  224. AlgoChecker<MatrixMul>("AARCH64_F32K8X12X1"));
  225. benchmarker_K12X8.set_times(RUNS)
  226. .set_dtype(0, dtype::Float32{})
  227. .set_dtype(1, dtype::Float32{})
  228. .set_dtype(2, dtype::Float32{})
  229. .set_param(param)
  230. .set_display(false);
  231. auto run = [&](size_t M, size_t N, size_t K) {
  232. TensorShape A, B;
  233. if (param.transposeA) {
  234. A = TensorShape{K, M};
  235. } else {
  236. A = TensorShape{M, K};
  237. }
  238. if (param.transposeB) {
  239. B = TensorShape{N, K};
  240. } else {
  241. B = TensorShape{K, N};
  242. }
  243. auto k4x16_used = benchmarker_K4X16.exec({A, B, {}}) / RUNS;
  244. auto k12x8_used = benchmarker_K12X8.exec({A, B, {}}) / RUNS;
  245. float computations = 2.f * M * K * N * 1e-6;
  246. printf("run: {%zu{M} %zu{K} %zu{N}} k4x16: %f ms %f Gflops k12x8: %f "
  247. "ms "
  248. "%f Gflops k4x16_vs_k12x8: %f\n",
  249. M, K, N, k4x16_used, computations / k4x16_used, k12x8_used,
  250. computations / k12x8_used, k12x8_used / k4x16_used);
  251. };
  252. run(256, 256, 128);
  253. run(384, 384, 384);
  254. for (size_t k = 4; k <= 256; k *= 8) {
  255. for (size_t m = 4; m <= 256; m *= 4) {
  256. for (size_t n = 4; n <= 256; n *= 4) {
  257. run(m, n, k);
  258. }
  259. printf("\n");
  260. }
  261. printf("\n");
  262. }
  263. }
  264. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_INT16_8X8X8) {
  265. constexpr size_t RUNS = 50;
  266. param::MatrixMul param;
  267. param.transposeA = false;
  268. param.transposeB = false;
  269. Benchmarker<MatrixMul> benchmarker_int(handle());
  270. Benchmarker<MatrixMul> benchmarker_int32(handle());
  271. benchmarker_int.set_times(RUNS)
  272. .set_dtype(0, dtype::Int8{})
  273. .set_dtype(1, dtype::Int8{})
  274. .set_dtype(2, dtype::Int16{})
  275. .set_param(param)
  276. .set_display(false);
  277. benchmarker_int.set_before_exec_callback(
  278. AlgoChecker<MatrixMul>("AARCH64_INT8X8X16_K8X8X8"));
  279. benchmarker_int32.set_before_exec_callback(
  280. AlgoChecker<MatrixMul>("AARCH64_INT8X8X32_K8X8X8"));
  281. benchmarker_int32.set_times(RUNS)
  282. .set_dtype(0, dtype::Int8{})
  283. .set_dtype(1, dtype::Int8{})
  284. .set_dtype(2, dtype::Int32{})
  285. .set_param(param)
  286. .set_display(false);
  287. Benchmarker<MatrixMul> benchmarker_float(handle());
  288. benchmarker_float.set_param(param).set_display(false).set_times(RUNS);
  289. auto run = [&](size_t M, size_t N, size_t K) {
  290. TensorShape A, B;
  291. if (param.transposeA) {
  292. A = TensorShape{K, M};
  293. } else {
  294. A = TensorShape{M, K};
  295. }
  296. if (param.transposeB) {
  297. B = TensorShape{N, K};
  298. } else {
  299. B = TensorShape{K, N};
  300. }
  301. auto int_used = benchmarker_int.exec({A, B, {}}) / RUNS;
  302. auto float_used = benchmarker_float.exec({A, B, {}}) / RUNS;
  303. auto int32_used = benchmarker_int32.exec({A, B, {}}) / RUNS;
  304. float computations = 2.f * M * K * N * 1e-6;
  305. printf("run: {%zu{M} %zu{K} %zu{N}} float: %f ms %f Gflops int: %f ms "
  306. "%f Gflops speedup_vs_fp32: %f, speedup_vs_int32: %f\n",
  307. M, K, N, float_used, computations / float_used, int_used,
  308. computations / int_used, float_used / int_used, int32_used / int_used);
  309. };
  310. run(256, 256, 256);
  311. for (size_t k = 4; k <= 256; k *= 8) {
  312. for (size_t m = 4; m <= 256; m *= 4) {
  313. for (size_t n = 4; n <= 256; n *= 4) {
  314. run(m, n, k);
  315. }
  316. std::cout << std::endl;
  317. }
  318. std::cout << std::endl;
  319. }
  320. }
  321. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_INT32_MK_4X4X16) {
  322. constexpr size_t RUNS = 50;
  323. param::MatrixMul param;
  324. param.transposeA = false;
  325. param.transposeB = false;
  326. Benchmarker<MatrixMul> benchmarker(handle());
  327. Benchmarker<MatrixMul> benchmarker_mk4(handle());
  328. benchmarker.set_times(RUNS)
  329. .set_dtype(0, dtype::Int8{})
  330. .set_dtype(1, dtype::Int8{})
  331. .set_dtype(2, dtype::Int32{})
  332. .set_param(param)
  333. .set_display(false);
  334. benchmarker.set_before_exec_callback(
  335. AlgoChecker<MatrixMul>("AARCH64_INT8X8X32_K4X4X16"));
  336. param.format = MatrixMul::Param::Format::MK4;
  337. benchmarker_mk4.set_before_exec_callback(
  338. AlgoChecker<MatrixMul>("AARCH64_INT8X8X32_MK4_4X4X16"));
  339. benchmarker_mk4.set_times(RUNS)
  340. .set_dtype(0, dtype::Int8{})
  341. .set_dtype(1, dtype::Int8{})
  342. .set_dtype(2, dtype::Int32{})
  343. .set_param(param)
  344. .set_display(false);
  345. auto run = [&](size_t M, size_t N, size_t K) {
  346. auto default_used = benchmarker.exec({{M, K}, {K, N}, {}}) / RUNS;
  347. auto mk_used =
  348. benchmarker_mk4.exec({{M / 4, K / 4, 4, 4}, {K / 4, N, 4}, {}}) / RUNS;
  349. float computations = 2.f * M * K * N * 1e-6;
  350. printf("run: {%zu{M} %zu{K} %zu{N}} normal: %f ms %f Gflops mk4: %f ms "
  351. "%f Gflops speedup_vs_normal: %f\n",
  352. M, K, N, default_used, computations / default_used, mk_used,
  353. computations / mk_used, default_used / mk_used);
  354. };
  355. run(256, 256, 128);
  356. for (size_t k = 4; k <= 512; k *= 2) {
  357. for (size_t m = 4; m <= 512; m *= 2) {
  358. for (size_t n = 4; n <= 512; n *= 2) {
  359. run(m, n, k);
  360. }
  361. }
  362. std::cout << std::endl;
  363. }
  364. }
  365. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_MK4_8x8x16) {
  366. constexpr size_t RUNS = 50;
  367. param::MatrixMul param;
  368. param.transposeA = false;
  369. param.transposeB = false;
  370. Benchmarker<MatrixMul> benchmarker(handle());
  371. Benchmarker<MatrixMul> benchmarker_mk4(handle());
  372. Benchmarker<MatrixMul> benchmarker_mk4_16x12(handle());
  373. benchmarker.set_times(RUNS)
  374. .set_dtype(0, dtype::Int8{})
  375. .set_dtype(1, dtype::Int8{})
  376. .set_dtype(2, dtype::Int16{})
  377. .set_param(param)
  378. .set_display(false);
  379. benchmarker.set_before_exec_callback(
  380. AlgoChecker<MatrixMul>("AARCH64_INT8X8X16_K4X4X16"));
  381. param.format = MatrixMul::Param::Format::MK4;
  382. benchmarker_mk4.set_before_exec_callback(
  383. AlgoChecker<MatrixMul>("AARCH64_INT8X8X16_MK4_4X4X8"));
  384. benchmarker_mk4.set_times(RUNS)
  385. .set_dtype(0, dtype::Int8{})
  386. .set_dtype(1, dtype::Int8{})
  387. .set_dtype(2, dtype::Int16{})
  388. .set_param(param)
  389. .set_display(false);
  390. benchmarker_mk4_16x12.set_before_exec_callback(
  391. AlgoChecker<MatrixMul>("AARCH64_INT8X8X16_MK4_16X12X4"));
  392. benchmarker_mk4_16x12.set_times(RUNS)
  393. .set_dtype(0, dtype::Int8{})
  394. .set_dtype(1, dtype::Int8{})
  395. .set_dtype(2, dtype::Int16{})
  396. .set_param(param)
  397. .set_display(false);
  398. auto run = [&](size_t M, size_t N, size_t K) {
  399. auto default_used = benchmarker.exec({{M, K}, {K, N}, {}}) / RUNS;
  400. auto mk_used =
  401. benchmarker_mk4.exec({{M / 4, K / 4, 4, 4}, {K / 4, N, 4}, {}}) / RUNS;
  402. auto mk4_16x12_used =
  403. benchmarker_mk4_16x12.exec({{M / 4, K / 4, 4, 4}, {K / 4, N, 4}, {}}) /
  404. RUNS;
  405. float computations = 2.f * M * K * N * 1e-6;
  406. printf("run: {%zu{M} %zu{K} %zu{N}} normal: %f ms %f Gflops mk4: %f ms "
  407. "%f Gflops speedup: %f, mk4_16x12 %f Gflops speedup: %f\n",
  408. M, K, N, default_used, computations / default_used, mk_used,
  409. computations / mk_used, default_used / mk_used,
  410. computations / mk4_16x12_used, default_used / mk4_16x12_used);
  411. };
  412. run(384, 384, 384);
  413. }
  414. TEST_F(AARCH64, BENCHMARK_4x4x16_vs_8x8x16) {
  415. constexpr size_t RUNS = 50;
  416. param::MatrixMul param;
  417. param.transposeA = false;
  418. param.transposeB = false;
  419. Benchmarker<MatrixMul> benchmarker(handle());
  420. Benchmarker<MatrixMul> benchmarker_int4_4x4x16(handle());
  421. benchmarker_int4_4x4x16.set_times(RUNS)
  422. .set_dtype(0, dtype::QuantizedS4{0.3})
  423. .set_dtype(1, dtype::QuantizedS4{0.3})
  424. .set_dtype(2, dtype::QuantizedS16{0.09})
  425. .set_param(param)
  426. .set_display(false);
  427. benchmarker.set_times(RUNS)
  428. .set_dtype(0, dtype::Int8{})
  429. .set_dtype(1, dtype::Int8{})
  430. .set_dtype(2, dtype::Int16{})
  431. .set_param(param)
  432. .set_display(false);
  433. benchmarker.set_before_exec_callback(
  434. AlgoChecker<MatrixMul>("AARCH64_INT8X8X16_K4X4X16"));
  435. auto run = [&](size_t M, size_t N, size_t K) {
  436. auto default_used = benchmarker.exec({{M, K}, {K, N}, {}}) / RUNS;
  437. auto int4416_used = benchmarker_int4_4x4x16.exec({{M, K}, {K, N}, {}}) / RUNS;
  438. float computations = 2.f * M * K * N * 1e-6;
  439. printf("run: {%zu{M} %zu{K} %zu{N}} normal 8x8x16 used: %f ms %f "
  440. "Gflops int4416 used %f int4416_gflops %f speedup %f\n",
  441. M, K, N, default_used, computations / default_used, int4416_used,
  442. computations / int4416_used, default_used / int4416_used);
  443. };
  444. for (int m = 32; m <= 1024; m += 32)
  445. for (int n = 32; n <= 1024; n += 32)
  446. for (int k = 32; k <= 512; k += 32)
  447. run(m, n, k);
  448. run(32, 32, 32);
  449. run(32, 32, 8);
  450. run(32, 32, 16);
  451. run(32, 32, 24);
  452. run(32 * 2, 32 * 2, 32);
  453. run(32 * 4, 32 * 4, 32);
  454. run(32 * 6, 32 * 6, 32);
  455. run(32 * 8, 32 * 8, 32);
  456. run(32 * 2, 32 * 2, 32 * 2);
  457. run(32 * 4, 32 * 4, 32 * 3);
  458. run(32 * 6, 32 * 6, 32 * 4);
  459. run(32 * 8, 32 * 8, 32 * 5);
  460. run(32 * 10, 32 * 10, 32 * 10);
  461. run(384, 384, 384);
  462. run(256, 256, 384);
  463. run(512, 512, 384);
  464. run(1024, 1024, 384);
  465. }
  466. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_MK4_8x8x8_8x8x16_vs_4x4x16_8x8x16) {
  467. constexpr size_t RUNS = 50;
  468. param::MatrixMul param;
  469. param.transposeA = false;
  470. param.transposeB = false;
  471. Benchmarker<MatrixMul> benchmarker(handle());
  472. Benchmarker<MatrixMul> benchmarker_mk4(handle());
  473. Benchmarker<MatrixMul> benchmarker_mk4_4x4x8(handle());
  474. benchmarker.set_times(RUNS)
  475. .set_dtype(0, dtype::Int8{})
  476. .set_dtype(1, dtype::Int8{})
  477. .set_dtype(2, dtype::Int16{})
  478. .set_param(param)
  479. .set_display(false);
  480. benchmarker.set_before_exec_callback(
  481. AlgoChecker<MatrixMul>("AARCH64_INT8X8X16_K4X4X16"));
  482. param.format = MatrixMul::Param::Format::MK4;
  483. benchmarker_mk4.set_before_exec_callback(
  484. AlgoChecker<MatrixMul>("AARCH64_INT8X8X16_MK4_K8X8X8"));
  485. benchmarker_mk4.set_times(RUNS)
  486. .set_dtype(0, dtype::Int8{})
  487. .set_dtype(1, dtype::Int8{})
  488. .set_dtype(2, dtype::Int16{})
  489. .set_param(param)
  490. .set_display(false);
  491. benchmarker_mk4_4x4x8.set_before_exec_callback(
  492. AlgoChecker<MatrixMul>("AARCH64_INT8X8X16_MK4_4X4X8"));
  493. benchmarker_mk4_4x4x8.set_times(RUNS)
  494. .set_dtype(0, dtype::Int8{})
  495. .set_dtype(1, dtype::Int8{})
  496. .set_dtype(2, dtype::Int16{})
  497. .set_param(param)
  498. .set_display(false);
  499. auto run = [&](size_t M, size_t N, size_t K) {
  500. auto default_used = benchmarker.exec({{M, K}, {K, N}, {}}) / RUNS;
  501. auto mk_used =
  502. benchmarker_mk4.exec({{M / 4, K / 4, 4, 4}, {K / 4, N, 4}, {}}) / RUNS;
  503. auto mk4_4x4x8_used =
  504. benchmarker_mk4_4x4x8.exec({{M / 4, K / 4, 4, 4}, {K / 4, N, 4}, {}}) /
  505. RUNS;
  506. float computations = 2.f * M * K * N * 1e-6;
  507. printf("run: {%zu{M} %zu{K} %zu{N}} normal: %f ms %f Gflops mk4: %f ms "
  508. "%f Gflops speedup: %f, mk4_4x4x8 %f Gflops %f ms speedup: %f\n",
  509. M, K, N, default_used, computations / default_used, mk_used,
  510. computations / mk_used, default_used / mk_used,
  511. computations / mk4_4x4x8_used, mk4_4x4x8_used, mk4_4x4x8_used / mk_used);
  512. };
  513. run(384, 384, 384);
  514. run(512, 512, 512);
  515. run(1024, 1024, 384);
  516. run(256, 256, 384);
  517. for (int m = 32; m <= 512; m *= 2)
  518. for (int n = 32; n <= 512; n *= 2)
  519. for (int k = 32; k < 512; k *= 2) {
  520. run(m, n, k);
  521. }
  522. }
  523. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_INT16_4X4X16) {
  524. constexpr size_t RUNS = 50;
  525. param::MatrixMul param;
  526. param.transposeA = false;
  527. param.transposeB = false;
  528. Benchmarker<MatrixMul> benchmarker_int(handle());
  529. Benchmarker<MatrixMul> benchmarker_int32(handle());
  530. benchmarker_int.set_times(RUNS)
  531. .set_dtype(0, dtype::Int8{})
  532. .set_dtype(1, dtype::Int8{})
  533. .set_dtype(2, dtype::Int16{})
  534. .set_param(param)
  535. .set_display(false);
  536. benchmarker_int.set_before_exec_callback(
  537. AlgoChecker<MatrixMul>("AARCH64_INT8X8X16_K4X4X16"));
  538. benchmarker_int32.set_before_exec_callback(
  539. AlgoChecker<MatrixMul>("AARCH64_INT8X8X32_K4X4X16"));
  540. benchmarker_int32.set_times(RUNS)
  541. .set_dtype(0, dtype::Int8{})
  542. .set_dtype(1, dtype::Int8{})
  543. .set_dtype(2, dtype::Int32{})
  544. .set_param(param)
  545. .set_display(false);
  546. Benchmarker<MatrixMul> benchmarker_float(handle());
  547. benchmarker_float.set_param(param).set_display(false).set_times(RUNS);
  548. auto run = [&](size_t M, size_t N, size_t K) {
  549. TensorShape A, B;
  550. if (param.transposeA) {
  551. A = TensorShape{K, M};
  552. } else {
  553. A = TensorShape{M, K};
  554. }
  555. if (param.transposeB) {
  556. B = TensorShape{N, K};
  557. } else {
  558. B = TensorShape{K, N};
  559. }
  560. auto int_used = benchmarker_int.exec({A, B, {}}) / RUNS;
  561. auto float_used = benchmarker_float.exec({A, B, {}}) / RUNS;
  562. auto int32_used = benchmarker_int32.exec({A, B, {}}) / RUNS;
  563. float computations = 2.f * M * K * N * 1e-6;
  564. printf("run: {%zu{M} %zu{K} %zu{N}} float: %f ms %f Gflops int: %f ms "
  565. "%f Gflops speedup_vs_fp32: %f, speedup_vs_int32: %f\n",
  566. M, K, N, float_used, computations / float_used, int_used,
  567. computations / int_used, float_used / int_used, int32_used / int_used);
  568. };
  569. run(256, 256, 128);
  570. run(256, 256, 256);
  571. for (size_t k = 4; k <= 256; k *= 4) {
  572. for (size_t m = 4; m <= 256; m *= 4) {
  573. for (size_t n = 4; n <= 256; n *= 4) {
  574. run(m, n, k);
  575. }
  576. }
  577. std::cout << std::endl;
  578. }
  579. }
  580. TEST_F(AARCH64, BENCHMARK_GEMV) {
  581. int exec_times = 10;
  582. Benchmarker<MatrixMul> benchmarker_gemm(handle());
  583. benchmarker_gemm.set_times(exec_times);
  584. float mod = 1000 * exec_times / 1e9;
  585. auto run = [&](size_t M, size_t K, size_t N) {
  586. float time = 1.f, perf = 1.f;
  587. std::cout << "GEMM: (" << M << ", " << K << ", " << N << ")" << std::endl;
  588. benchmarker_gemm.set_dtype(0, dtype::Float32()).set_dtype(1, dtype::Float32());
  589. time = benchmarker_gemm.exec({{M, K}, {K, N}, {}});
  590. perf = 2.f * M * K * N / time * mod;
  591. std::cout << "gemm fp32, Performance is " << perf << " Gflops" << std::endl;
  592. #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  593. benchmarker_gemm.set_dtype(0, dtype::Float16()).set_dtype(1, dtype::Float16());
  594. time = benchmarker_gemm.exec({{M, K}, {K, N}, {}});
  595. perf = 2.f * M * K * N / time * mod;
  596. std::cout << "gemm fp16, Performance is " << perf << " Gflops" << std::endl;
  597. #endif
  598. };
  599. std::cout << "warm up:\n";
  600. for (int i = 0; i < 50; i++) {
  601. benchmarker_gemm.set_dtype(0, dtype::Float32())
  602. .set_dtype(1, dtype::Float32())
  603. .set_display(false)
  604. .exec({{256, 256}, {256, 256}, {}});
  605. benchmarker_gemm.set_display(true);
  606. }
  607. // run gemv
  608. for (size_t M : {1, 2, 3, 4, 5, 6, 7, 8, 64, 256})
  609. for (size_t K : {1, 2, 3, 4, 5, 6, 7, 8, 64, 256})
  610. for (size_t N : {112})
  611. run(M, K, N);
  612. }
  613. #if MGB_ENABLE_DOT
  614. TEST_F(AARCH64, BENCHMARK_TRANSPOSED_MATRIX_MUL_INT_8X8X32) {
  615. constexpr size_t RUNS = 50;
  616. param::MatrixMul param;
  617. param.transposeA = param.transposeB = true;
  618. Benchmarker<MatrixMul> benchmarker_int(handle());
  619. benchmarker_int.set_times(RUNS)
  620. .set_dtype(0, dtype::Int8{})
  621. .set_dtype(1, dtype::Int8{})
  622. .set_dtype(2, {})
  623. .set_param(param)
  624. .set_display(false);
  625. Benchmarker<MatrixMul> benchmarker_float(handle());
  626. benchmarker_float.set_param(param).set_display(false).set_times(RUNS);
  627. auto run = [&](size_t M, size_t N, size_t K) {
  628. auto int_used = benchmarker_int.exec({{K, M}, {N, K}, {}}) / RUNS;
  629. auto float_used = benchmarker_float.exec({{K, M}, {N, K}, {}}) / RUNS;
  630. float computations = 2.f * M * K * N * 1e-6;
  631. printf("run: {%zu{M} %zu{K} %zu{N}} float: %f ms %f Gflops int: %f ms "
  632. "%f Gflops speedup: %f\n",
  633. M, K, N, float_used, computations / float_used, int_used,
  634. computations / int_used, float_used / int_used);
  635. };
  636. run(256, 12 * 24, 256);
  637. for (size_t M : {8, 64, 112, 256}) {
  638. for (size_t K : {8, 64, 112, 256}) {
  639. for (size_t N : {8, 64, 112, 256}) {
  640. run(M, N, K);
  641. }
  642. }
  643. }
  644. }
  645. TEST_F(AARCH64, BENCHMARK_GEMV_INT_8X8X32) {
  646. constexpr size_t RUNS = 50;
  647. param::MatrixMul param;
  648. Benchmarker<MatrixMul> benchmarker_int(handle());
  649. benchmarker_int.set_times(RUNS)
  650. .set_dtype(0, dtype::Int8{})
  651. .set_dtype(1, dtype::Int8{})
  652. .set_dtype(2, {})
  653. .set_param(param)
  654. .set_display(false);
  655. Benchmarker<MatrixMul> benchmarker_float(handle());
  656. benchmarker_float.set_display(false).set_times(RUNS);
  657. auto run = [&](size_t M, size_t N, size_t K) {
  658. auto int_used = benchmarker_int.exec({{M, K}, {K, N}, {}}) / RUNS;
  659. auto float_used = benchmarker_float.exec({{M, K}, {K, N}, {}}) / RUNS;
  660. float computations = 2.f * M * K * N * 1e-6;
  661. printf("run: {%zu{M} %zu{K} %zu{N}} float: %f ms %f Gflops int: %f ms "
  662. "%f Gflops speedup: %f\n",
  663. M, K, N, float_used, computations / float_used, int_used,
  664. computations / int_used, float_used / int_used);
  665. };
  666. for (size_t M : {1, 2, 3, 4, 5, 6, 7, 8, 16, 32, 64, 256})
  667. for (size_t N : {1, 2, 3, 4, 5, 6, 7, 8, 16, 32, 64, 256})
  668. for (size_t K : {1, 2, 3, 4, 5, 6, 7, 8, 16, 32, 64, 256})
  669. run(M, N, K);
  670. }
  671. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_INT8X8X32_MK4_8X12X4) {
  672. constexpr size_t RUNS = 50;
  673. param::MatrixMul param;
  674. param.transposeA = false;
  675. param.transposeB = false;
  676. Benchmarker<MatrixMul> benchmarker(handle());
  677. Benchmarker<MatrixMul> benchmarker_mk4(handle());
  678. benchmarker.set_times(RUNS)
  679. .set_dtype(0, dtype::Int8{})
  680. .set_dtype(1, dtype::Int8{})
  681. .set_dtype(2, dtype::Int32{})
  682. .set_param(param)
  683. .set_display(false);
  684. benchmarker.set_before_exec_callback(
  685. AlgoChecker<MatrixMul>("AARCH64_INT8X8X32_K8X12X4"));
  686. param.format = MatrixMul::Param::Format::MK4_DOT;
  687. benchmarker_mk4.set_before_exec_callback(
  688. AlgoChecker<MatrixMul>("AARCH64_INT8X8X32_MK4_8X12X4_DOTPROD"));
  689. benchmarker_mk4.set_times(RUNS)
  690. .set_dtype(0, dtype::Int8{})
  691. .set_dtype(1, dtype::Int8{})
  692. .set_dtype(2, dtype::Int32{})
  693. .set_param(param)
  694. .set_display(false);
  695. auto run = [&](size_t M, size_t N, size_t K) {
  696. auto default_used = benchmarker.exec({{M, K}, {K, N}, {}}) / RUNS;
  697. auto mk_used =
  698. benchmarker_mk4.exec({{M / 4, K / 4, 4, 4}, {K / 4, N, 4}, {}}) / RUNS;
  699. float computations = 2.f * M * K * N * 1e-6;
  700. printf("run: {%zu{M} %zu{K} %zu{N}} normal: %f ms %f Gflops mk4: %f ms "
  701. "%f Gflops speedup_vs_normal: %f\n",
  702. M, K, N, default_used, computations / default_used, mk_used,
  703. computations / mk_used, default_used / mk_used);
  704. };
  705. run(256, 256, 128);
  706. for (size_t k = 4; k <= 512; k *= 2) {
  707. for (size_t m = 4; m <= 512; m *= 2) {
  708. for (size_t n = 4; n <= 512; n *= 2) {
  709. run(m, n, k);
  710. }
  711. }
  712. std::cout << std::endl;
  713. }
  714. }
  715. #endif // MGB_ENABLE_DOT
  716. #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  717. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_F16_MK8) {
  718. auto args = matrix_mul::get_benchmark_matmul_mk_packed_args(8);
  719. matrix_mul::benchmark_with_contrast(
  720. handle(), args, dtype::Float16{}, dtype::Float16{}, dtype::Float16{},
  721. "AARCH64_F16_MK8_8X8", param::MatrixMul::Format::MK8, dtype::Float16{},
  722. dtype::Float16{}, dtype::Float16{}, "AARCH64_F16_K8X24X1");
  723. }
  724. #endif
  725. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_INT16x16x32) {
  726. constexpr size_t RUNS = 50;
  727. Benchmarker<MatrixMul> benchmarker_int(handle());
  728. benchmarker_int.set_times(RUNS)
  729. .set_dtype(0, dtype::Int16{})
  730. .set_dtype(1, dtype::Int16{})
  731. .set_dtype(2, dtype::Int32{})
  732. .set_display(false);
  733. Benchmarker<MatrixMul> benchmarker_float(handle());
  734. benchmarker_float.set_display(false).set_times(RUNS);
  735. auto run = [&](size_t M, size_t N, size_t K, int mask) {
  736. param::MatrixMul param;
  737. param.transposeA = mask & 0x1;
  738. param.transposeB = mask & 0x2;
  739. benchmarker_int.set_param(param);
  740. benchmarker_float.set_param(param);
  741. TensorShape A, B;
  742. if (param.transposeA) {
  743. A = TensorShape{K, M};
  744. } else {
  745. A = TensorShape{M, K};
  746. }
  747. if (param.transposeB) {
  748. B = TensorShape{N, K};
  749. } else {
  750. B = TensorShape{K, N};
  751. }
  752. auto int_used = benchmarker_int.exec({A, B, {}}) / RUNS;
  753. auto float_used = benchmarker_float.exec({A, B, {}}) / RUNS;
  754. float computations = 2.f * M * K * N * 1e-6;
  755. printf("run: {%zu{M} %zu{K} %zu{N} %d{TA} %d{TB}} "
  756. "float: %f ms %f Gflops int: %f ms "
  757. "%f Gflops speedup: %f\n",
  758. M, K, N, param.transposeA, param.transposeB, float_used,
  759. computations / float_used, int_used, computations / int_used,
  760. float_used / int_used);
  761. };
  762. constexpr int mask = 4;
  763. for (auto i = 0; i < mask; i++) {
  764. for (size_t M : {8, 64, 112, 256}) {
  765. for (size_t K : {8, 64, 112, 256}) {
  766. for (size_t N : {8, 64, 112, 256}) {
  767. run(M, N, K, i);
  768. }
  769. }
  770. }
  771. }
  772. }
  773. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_FP32_MK4) {
  774. auto args = matrix_mul::get_benchmark_matmul_mk_packed_args(16);
  775. matrix_mul::benchmark_with_contrast(
  776. handle(), args, dtype::Float32{}, dtype::Float32{}, dtype::Float32{},
  777. "AARCH64_F32_MK4_4x16", param::MatrixMul::Format::MK4, dtype::Float32{},
  778. dtype::Float32{}, dtype::Float32{});
  779. }
  780. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_FP32_PACK_MK4) {
  781. auto args = matrix_mul::get_benchmark_matmul_mk_packed_args(16);
  782. matrix_mul::benchmark_with_contrast(
  783. handle(), args, dtype::Float32{}, dtype::Float32{}, dtype::Float32{},
  784. "AARCH64_F32_MK4_K8X12X1", param::MatrixMul::Format::MK4, dtype::Float32{},
  785. dtype::Float32{}, dtype::Float32{}, "AARCH64_F32K8X12X1");
  786. }
  787. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_INT16x16x32_MK8) {
  788. auto args = matrix_mul::get_benchmark_matmul_mk_packed_args(8);
  789. matrix_mul::benchmark_with_contrast(
  790. handle(), args, dtype::Int16{}, dtype::Int16{}, dtype::Int32{},
  791. "AARCH64_INT16X16X32_MK8_8X8", param::MatrixMul::Format::MK8,
  792. dtype::Int16{}, dtype::Int16{}, dtype::Int32{});
  793. }
  794. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_FP32_K8X12) {
  795. constexpr size_t RUNS = 50;
  796. param::MatrixMul param;
  797. param.transposeA = param.transposeB = true;
  798. Benchmarker<MatrixMul> benchmarker_k12x8(handle());
  799. Benchmarker<MatrixMul> benchmarker_k8x12(handle());
  800. benchmarker_k12x8.set_param(param).set_display(false).set_times(RUNS);
  801. benchmarker_k8x12.set_param(param).set_display(false).set_times(RUNS);
  802. benchmarker_k12x8.set_before_exec_callback(
  803. AlgoChecker<MatrixMul>("AARCH64_F32K4X16X1"));
  804. benchmarker_k8x12.set_before_exec_callback(
  805. AlgoChecker<MatrixMul>("AARCH64_F32K8X12X1"));
  806. auto run = [&](size_t M, size_t N, size_t K) {
  807. auto k12x8_used = benchmarker_k12x8.exec({{K, M}, {N, K}, {}}) / RUNS;
  808. auto k8x12_used = benchmarker_k8x12.exec({{K, M}, {N, K}, {}}) / RUNS;
  809. float computations = 2.f * M * K * N * 1e-6;
  810. printf("run: {%zu{M} %zu{K} %zu{N}} float k12x8: %f ms %f Gflops "
  811. "k8x12: %f ms "
  812. "%f Gflops speedup: %f\n",
  813. M, K, N, k12x8_used, computations / k12x8_used, k8x12_used,
  814. computations / k8x12_used, k12x8_used / k8x12_used);
  815. };
  816. run(256, 12 * 24, 256);
  817. for (size_t M : {8, 64, 112, 256}) {
  818. for (size_t K : {8, 64, 112, 256}) {
  819. for (size_t N : {8, 64, 112, 256}) {
  820. run(M, N, K);
  821. }
  822. }
  823. }
  824. }
  825. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_FP32_K8X12_NO_TRANS) {
  826. constexpr size_t RUNS = 50;
  827. param::MatrixMul param;
  828. param.transposeA = param.transposeB = false;
  829. Benchmarker<MatrixMul> benchmarker_k12x8(handle());
  830. Benchmarker<MatrixMul> benchmarker_k8x12(handle());
  831. benchmarker_k12x8.set_param(param).set_display(false).set_times(RUNS);
  832. benchmarker_k8x12.set_param(param).set_display(false).set_times(RUNS);
  833. benchmarker_k12x8.set_before_exec_callback(
  834. AlgoChecker<MatrixMul>("AARCH64_F32K4X16X1"));
  835. benchmarker_k8x12.set_before_exec_callback(
  836. AlgoChecker<MatrixMul>("AARCH64_F32K8X12X1"));
  837. auto run = [&](size_t M, size_t N, size_t K) {
  838. auto k12x8_used = benchmarker_k12x8.exec({{M, K}, {K, N}, {}}) / RUNS;
  839. auto k8x12_used = benchmarker_k8x12.exec({{M, K}, {K, N}, {}}) / RUNS;
  840. float computations = 2.f * M * K * N * 1e-6;
  841. printf("run: {%zu{M} %zu{K} %zu{N}} float k12x8: %f ms %f Gflops "
  842. "k8x12: %f ms "
  843. "%f Gflops speedup: %f\n",
  844. M, K, N, k12x8_used, computations / k12x8_used, k8x12_used,
  845. computations / k8x12_used, k12x8_used / k8x12_used);
  846. };
  847. run(256, 12 * 24, 256);
  848. for (size_t M : {8, 64, 112, 256}) {
  849. for (size_t K : {8, 64, 112, 256}) {
  850. for (size_t N : {8, 64, 112, 256}) {
  851. run(M, N, K);
  852. }
  853. }
  854. }
  855. }
  856. #endif // MEGDNN_WITH_BENCHMARK
  857. // vim: syntax=cpp.doxygen