You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

matrix_mul.cpp 33 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864
  1. /**
  2. * \file dnn/test/aarch64/matrix_mul.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
  10. * implied.
  11. */
  12. #include "test/aarch64/fixture.h"
  13. #include "test/common/benchmarker.h"
  14. #include "test/common/checker.h"
  15. #include "test/common/matrix_mul.h"
  16. #include "test/common/rng.h"
  17. #include "test/arm_common/cpuinfo_help.h"
  18. using namespace megdnn;
  19. using namespace test;
  20. TEST_F(AARCH64, MATRIX_MUL_FP32K8X12) {
  21. matrix_mul::check_matrix_mul(dtype::Float32{}, dtype::Float32{},
  22. dtype::Float32{}, handle(),
  23. "AARCH64_F32K8X12X1");
  24. }
  25. #if MGB_ENABLE_CPUINFO
  26. TEST_F(AARCH64, MATRIX_MUL_FP32K8X12_A53) {
  27. CpuInfoTmpReplace cpu_replace_guard(cpuinfo_uarch_cortex_a53);
  28. matrix_mul::check_matrix_mul(dtype::Float32{}, dtype::Float32{},
  29. dtype::Float32{}, handle(),
  30. "AARCH64_F32K8X12X1");
  31. }
  32. TEST_F(AARCH64, MATRIX_MUL_FP32K8X12_A55) {
  33. CpuInfoTmpReplace cpu_replace_guard(cpuinfo_uarch_cortex_a55);
  34. matrix_mul::check_matrix_mul(dtype::Float32{}, dtype::Float32{},
  35. dtype::Float32{}, handle(),
  36. "AARCH64_F32K8X12X1");
  37. }
  38. #endif
  39. TEST_F(AARCH64, MATRIX_MUL_FP32K4X16) {
  40. matrix_mul::check_matrix_mul(dtype::Float32{}, dtype::Float32{},
  41. dtype::Float32{}, handle(),
  42. "AARCH64_F32K4X16X1");
  43. }
  44. TEST_F(AARCH64, MATRIX_MUL_FP32_PACK_MK4) {
  45. matrix_mul::check_matrix_mul(
  46. dtype::Float32{}, dtype::Float32{}, dtype::Float32{}, handle(),
  47. "AARCH64_F32_MK4_K8X12X1", param::MatrixMul::Format::MK4, 1);
  48. }
  49. #if MGB_ENABLE_CPUINFO
  50. TEST_F(AARCH64, MATRIX_MUL_FP32_PACK_MK4_A53) {
  51. CpuInfoTmpReplace cpu_replace_guard(cpuinfo_uarch_cortex_a53);
  52. matrix_mul::check_matrix_mul(
  53. dtype::Float32{}, dtype::Float32{}, dtype::Float32{}, handle(),
  54. "AARCH64_F32_MK4_K8X12X1", param::MatrixMul::Format::MK4, 1);
  55. }
  56. TEST_F(AARCH64, MATRIX_MUL_FP32_PACK_MK4_A55) {
  57. CpuInfoTmpReplace cpu_replace_guard(cpuinfo_uarch_cortex_a55);
  58. matrix_mul::check_matrix_mul(
  59. dtype::Float32{}, dtype::Float32{}, dtype::Float32{}, handle(),
  60. "AARCH64_F32_MK4_K8X12X1", param::MatrixMul::Format::MK4, 1);
  61. }
  62. #endif
  63. TEST_F(AARCH64, MATRIX_MUL_FP32_MK4) {
  64. matrix_mul::check_matrix_mul(
  65. dtype::Float32{}, dtype::Float32{}, dtype::Float32{}, handle(),
  66. "AARCH64_F32_MK4_4x16", param::MatrixMul::Format::MK4, 1);
  67. }
  68. #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  69. TEST_F(AARCH64, MATRIX_MUL_F16_K8X24X1) {
  70. matrix_mul::check_matrix_mul(dtype::Float16{}, dtype::Float16{},
  71. dtype::Float16{}, handle(),
  72. "AARCH64_F16_K8X24X1");
  73. }
  74. TEST_F(AARCH64, MATRIX_MUL_F16_MK8) {
  75. matrix_mul::check_matrix_mul(
  76. dtype::Float16{}, dtype::Float16{}, dtype::Float16{}, handle(),
  77. "AARCH64_F16_MK8_8X8", param::MatrixMul::Format::MK8, 1);
  78. }
  79. #endif
  80. #if __ARM_FEATURE_DOTPROD
  81. TEST_F(AARCH64, MATRIX_MUL_INT8X8X32_K8X12X4_DOTPROD) {
  82. matrix_mul::check_matrix_mul(dtype::Int8{}, dtype::Int8{}, dtype::Int32{},
  83. handle(), "AARCH64_INT8X8X32_K8X12X4_DOTPROD");
  84. }
  85. TEST_F(AARCH64, MATRIX_MUL_INT8X8X32_MK4_8X12X4_DOTPROD) {
  86. std::vector<matrix_mul::TestArg> args;
  87. for (size_t m : {1, 2, 3, 4, 5, 6, 7, 10, 11})
  88. for (size_t n : {2, 3, 4, 5, 8, 12, 13, 14, 15, 16, 31})
  89. for (size_t k : {1, 2, 3, 4, 5, 6, 7, 8, 16, 32, 33, 34})
  90. args.emplace_back(m, n, k, 0);
  91. matrix_mul::check_matrix_mul(
  92. dtype::Int8{}, dtype::Int8{}, dtype::Int32{}, handle(),
  93. "AARCH64_INT8X8X32_MK4_8X12X4_DOTPROD",
  94. param::MatrixMul::Format::MK4_DOT, 1, 1e-3, std::move(args));
  95. }
  96. #else
  97. TEST_F(AARCH64, MATRIX_MUL_INT8X8X32_K4X4X16) {
  98. matrix_mul::check_matrix_mul(dtype::Int8{}, dtype::Int8{}, dtype::Int32{},
  99. handle(), "AARCH64_INT8X8X32_K4X4X16");
  100. }
  101. TEST_F(AARCH64, MATRIX_MUL_INT8_MK4) {
  102. std::vector<matrix_mul::TestArg> args;
  103. for (size_t m : {1, 2, 3, 4, 5, 7, 10, 11})
  104. for (size_t n : {1, 2, 3, 4, 5, 8, 16, 24, 25, 32})
  105. for (size_t k : {1, 2, 3, 4, 5, 6, 7, 8, 16, 32, 33, 34})
  106. args.emplace_back(m, n, k, 0);
  107. matrix_mul::check_matrix_mul(dtype::Int8{}, dtype::Int8{}, dtype::Int32{},
  108. handle(), "AARCH64_INT8X8X32_MK4_4X4X16",
  109. param::MatrixMul::Format::MK4, 1, 1e-3,
  110. std::move(args));
  111. }
  112. TEST_F(AARCH64, MATRIX_MUL_INT8x8x16_MK4) {
  113. std::vector<matrix_mul::TestArg> args;
  114. for (size_t m : {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17})
  115. for (size_t n :
  116. {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 24})
  117. for (size_t k :
  118. {2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
  119. 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29})
  120. args.emplace_back(m, n, k, 0);
  121. matrix_mul::check_matrix_mul(dtype::Int8{}, dtype::Int8{}, dtype::Int16{},
  122. handle(), "AARCH64_INT8X8X16_MK4_K8X8X8",
  123. param::MatrixMul::Format::MK4, 1, 1e-3,
  124. std::move(args));
  125. }
  126. TEST_F(AARCH64, MATRIX_MUL_MK4_8x8x16_4x4) {
  127. matrix_mul::check_matrix_mul(dtype::Int8{}, dtype::Int8{}, dtype::Int16{},
  128. handle(), "AARCH64_INT8X8X16_MK4_4X4X8",
  129. param::MatrixMul::Format::MK4, 1);
  130. }
  131. TEST_F(AARCH64, MATRIX_MUL_MK4_8x8x16) {
  132. matrix_mul::check_matrix_mul(dtype::Int8{}, dtype::Int8{}, dtype::Int16{},
  133. handle(), "AARCH64_INT8X8X16_MK4_16X12X4",
  134. param::MatrixMul::Format::MK4, 1);
  135. }
  136. TEST_F(AARCH64, MATRIX_MUL_INT8x8x32_K8x8x8) {
  137. matrix_mul::check_matrix_mul(dtype::Int8{}, dtype::Int8{}, dtype::Int32{},
  138. handle(), "AARCH64_INT8X8X32_K8X8X8");
  139. }
  140. #endif
  141. TEST_F(AARCH64, MATRIX_MUL_INT8x8x16_K8x8x8) {
  142. matrix_mul::check_matrix_mul(dtype::Int8{}, dtype::Int8{}, dtype::Int16{},
  143. handle(), "AARCH64_INT8X8X16_K8X8X8");
  144. }
  145. TEST_F(AARCH64, MATRIX_MUL_INT8x8x16_K4x4x16) {
  146. matrix_mul::check_matrix_mul(dtype::Int8{}, dtype::Int8{}, dtype::Int16{},
  147. handle(), "AARCH64_INT8X8X16_K4X4X16");
  148. }
  149. TEST_F(AARCH64, MATRIX_MUL_INT16x16x32_K12X8X1) {
  150. matrix_mul::check_matrix_mul(dtype::Int16{}, dtype::Int16{}, dtype::Int32{},
  151. handle(), "AARCH64_INT16X16X32_K12X8X1");
  152. }
  153. TEST_F(AARCH64, MATRIX_MUL_INT16x16x32_MK8) {
  154. matrix_mul::check_matrix_mul(dtype::Int16{}, dtype::Int16{}, dtype::Int32{},
  155. handle(), "AARCH64_INT16X16X32_MK8_8X8",
  156. param::MatrixMul::Format::MK8, 1);
  157. }
  158. //! FIXME: need to add tests of GEMV and QUINT8
  159. #if MEGDNN_WITH_BENCHMARK
  160. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_FP32_K4X16) {
  161. constexpr size_t RUNS = 50;
  162. param::MatrixMul param;
  163. param.transposeA = false;
  164. param.transposeB = false;
  165. Benchmarker<MatrixMul> benchmarker_K4X16(handle());
  166. Benchmarker<MatrixMul> benchmarker_K12X8(handle());
  167. benchmarker_K4X16.set_times(RUNS)
  168. .set_dtype(0, dtype::Float32{})
  169. .set_dtype(1, dtype::Float32{})
  170. .set_dtype(2, dtype::Float32{})
  171. .set_param(param)
  172. .set_display(false);
  173. benchmarker_K4X16.set_before_exec_callback(
  174. AlgoChecker<MatrixMul>("AARCH64_F32K4X16X1"));
  175. benchmarker_K12X8.set_before_exec_callback(
  176. AlgoChecker<MatrixMul>("AARCH64_F32K8X12X1"));
  177. benchmarker_K12X8.set_times(RUNS)
  178. .set_dtype(0, dtype::Float32{})
  179. .set_dtype(1, dtype::Float32{})
  180. .set_dtype(2, dtype::Float32{})
  181. .set_param(param)
  182. .set_display(false);
  183. auto run = [&](size_t M, size_t N, size_t K) {
  184. TensorShape A, B;
  185. if (param.transposeA) {
  186. A = TensorShape{K, M};
  187. } else {
  188. A = TensorShape{M, K};
  189. }
  190. if (param.transposeB) {
  191. B = TensorShape{N, K};
  192. } else {
  193. B = TensorShape{K, N};
  194. }
  195. auto k4x16_used = benchmarker_K4X16.exec({A, B, {}}) / RUNS;
  196. auto k12x8_used = benchmarker_K12X8.exec({A, B, {}}) / RUNS;
  197. float computations = 2.f * M * K * N * 1e-6;
  198. printf("run: {%zu{M} %zu{K} %zu{N}} k4x16: %f ms %f Gflops k12x8: %f "
  199. "ms "
  200. "%f Gflops k4x16_vs_k12x8: %f\n",
  201. M, K, N, k4x16_used, computations / k4x16_used, k12x8_used,
  202. computations / k12x8_used, k12x8_used / k4x16_used);
  203. };
  204. run(256, 256, 128);
  205. run(384, 384, 384);
  206. for (size_t k = 4; k <= 256; k *= 8) {
  207. for (size_t m = 4; m <= 256; m *= 4) {
  208. for (size_t n = 4; n <= 256; n *= 4) {
  209. run(m, n, k);
  210. }
  211. printf("\n");
  212. }
  213. printf("\n");
  214. }
  215. }
  216. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_INT16_8X8X8) {
  217. constexpr size_t RUNS = 50;
  218. param::MatrixMul param;
  219. param.transposeA = false;
  220. param.transposeB = false;
  221. Benchmarker<MatrixMul> benchmarker_int(handle());
  222. Benchmarker<MatrixMul> benchmarker_int32(handle());
  223. benchmarker_int.set_times(RUNS)
  224. .set_dtype(0, dtype::Int8{})
  225. .set_dtype(1, dtype::Int8{})
  226. .set_dtype(2, dtype::Int16{})
  227. .set_param(param)
  228. .set_display(false);
  229. benchmarker_int.set_before_exec_callback(
  230. AlgoChecker<MatrixMul>("AARCH64_INT8X8X16_K8X8X8"));
  231. benchmarker_int32.set_before_exec_callback(
  232. AlgoChecker<MatrixMul>("AARCH64_INT8X8X32_K8X8X8"));
  233. benchmarker_int32.set_times(RUNS)
  234. .set_dtype(0, dtype::Int8{})
  235. .set_dtype(1, dtype::Int8{})
  236. .set_dtype(2, dtype::Int32{})
  237. .set_param(param)
  238. .set_display(false);
  239. Benchmarker<MatrixMul> benchmarker_float(handle());
  240. benchmarker_float.set_param(param).set_display(false).set_times(RUNS);
  241. auto run = [&](size_t M, size_t N, size_t K) {
  242. TensorShape A, B;
  243. if (param.transposeA) {
  244. A = TensorShape{K, M};
  245. } else {
  246. A = TensorShape{M, K};
  247. }
  248. if (param.transposeB) {
  249. B = TensorShape{N, K};
  250. } else {
  251. B = TensorShape{K, N};
  252. }
  253. auto int_used = benchmarker_int.exec({A, B, {}}) / RUNS;
  254. auto float_used = benchmarker_float.exec({A, B, {}}) / RUNS;
  255. auto int32_used = benchmarker_int32.exec({A, B, {}}) / RUNS;
  256. float computations = 2.f * M * K * N * 1e-6;
  257. printf("run: {%zu{M} %zu{K} %zu{N}} float: %f ms %f Gflops int: %f ms "
  258. "%f Gflops speedup_vs_fp32: %f, speedup_vs_int32: %f\n",
  259. M, K, N, float_used, computations / float_used, int_used,
  260. computations / int_used, float_used / int_used,
  261. int32_used / int_used);
  262. };
  263. run(256, 256, 256);
  264. for (size_t k = 4; k <= 256; k *= 8) {
  265. for (size_t m = 4; m <= 256; m *= 4) {
  266. for (size_t n = 4; n <= 256; n *= 4) {
  267. run(m, n, k);
  268. }
  269. std::cout << std::endl;
  270. }
  271. std::cout << std::endl;
  272. }
  273. }
  274. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_INT32_MK_4X4X16) {
  275. constexpr size_t RUNS = 50;
  276. param::MatrixMul param;
  277. param.transposeA = false;
  278. param.transposeB = false;
  279. Benchmarker<MatrixMul> benchmarker(handle());
  280. Benchmarker<MatrixMul> benchmarker_mk4(handle());
  281. benchmarker.set_times(RUNS)
  282. .set_dtype(0, dtype::Int8{})
  283. .set_dtype(1, dtype::Int8{})
  284. .set_dtype(2, dtype::Int32{})
  285. .set_param(param)
  286. .set_display(false);
  287. benchmarker.set_before_exec_callback(
  288. AlgoChecker<MatrixMul>("AARCH64_INT8X8X32_K4X4X16"));
  289. param.format = MatrixMul::Param::Format::MK4;
  290. benchmarker_mk4.set_before_exec_callback(
  291. AlgoChecker<MatrixMul>("AARCH64_INT8X8X32_MK4_4X4X16"));
  292. benchmarker_mk4.set_times(RUNS)
  293. .set_dtype(0, dtype::Int8{})
  294. .set_dtype(1, dtype::Int8{})
  295. .set_dtype(2, dtype::Int32{})
  296. .set_param(param)
  297. .set_display(false);
  298. auto run = [&](size_t M, size_t N, size_t K) {
  299. auto default_used = benchmarker.exec({{M, K}, {K, N}, {}}) / RUNS;
  300. auto mk_used = benchmarker_mk4.exec(
  301. {{M / 4, K / 4, 4, 4}, {K / 4, N, 4}, {}}) /
  302. RUNS;
  303. float computations = 2.f * M * K * N * 1e-6;
  304. printf("run: {%zu{M} %zu{K} %zu{N}} normal: %f ms %f Gflops mk4: %f ms "
  305. "%f Gflops speedup_vs_normal: %f\n",
  306. M, K, N, default_used, computations / default_used, mk_used,
  307. computations / mk_used, default_used / mk_used);
  308. };
  309. run(256, 256, 128);
  310. for (size_t k = 4; k <= 512; k *= 2) {
  311. for (size_t m = 4; m <= 512; m *= 2) {
  312. for (size_t n = 4; n <= 512; n *= 2) {
  313. run(m, n, k);
  314. }
  315. }
  316. std::cout << std::endl;
  317. }
  318. }
  319. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_MK4_8x8x16) {
  320. constexpr size_t RUNS = 50;
  321. param::MatrixMul param;
  322. param.transposeA = false;
  323. param.transposeB = false;
  324. Benchmarker<MatrixMul> benchmarker(handle());
  325. Benchmarker<MatrixMul> benchmarker_mk4(handle());
  326. Benchmarker<MatrixMul> benchmarker_mk4_16x12(handle());
  327. benchmarker.set_times(RUNS)
  328. .set_dtype(0, dtype::Int8{})
  329. .set_dtype(1, dtype::Int8{})
  330. .set_dtype(2, dtype::Int16{})
  331. .set_param(param)
  332. .set_display(false);
  333. benchmarker.set_before_exec_callback(
  334. AlgoChecker<MatrixMul>("AARCH64_INT8X8X16_K4X4X16"));
  335. param.format = MatrixMul::Param::Format::MK4;
  336. benchmarker_mk4.set_before_exec_callback(
  337. AlgoChecker<MatrixMul>("AARCH64_INT8X8X16_MK4_4X4X8"));
  338. benchmarker_mk4.set_times(RUNS)
  339. .set_dtype(0, dtype::Int8{})
  340. .set_dtype(1, dtype::Int8{})
  341. .set_dtype(2, dtype::Int16{})
  342. .set_param(param)
  343. .set_display(false);
  344. benchmarker_mk4_16x12.set_before_exec_callback(
  345. AlgoChecker<MatrixMul>("AARCH64_INT8X8X16_MK4_16X12X4"));
  346. benchmarker_mk4_16x12.set_times(RUNS)
  347. .set_dtype(0, dtype::Int8{})
  348. .set_dtype(1, dtype::Int8{})
  349. .set_dtype(2, dtype::Int16{})
  350. .set_param(param)
  351. .set_display(false);
  352. auto run = [&](size_t M, size_t N, size_t K) {
  353. auto default_used = benchmarker.exec({{M, K}, {K, N}, {}}) / RUNS;
  354. auto mk_used = benchmarker_mk4.exec(
  355. {{M / 4, K / 4, 4, 4}, {K / 4, N, 4}, {}}) /
  356. RUNS;
  357. auto mk4_16x12_used =
  358. benchmarker_mk4_16x12.exec(
  359. {{M / 4, K / 4, 4, 4}, {K / 4, N, 4}, {}}) /
  360. RUNS;
  361. float computations = 2.f * M * K * N * 1e-6;
  362. printf("run: {%zu{M} %zu{K} %zu{N}} normal: %f ms %f Gflops mk4: %f ms "
  363. "%f Gflops speedup: %f, mk4_16x12 %f Gflops speedup: %f\n",
  364. M, K, N, default_used, computations / default_used, mk_used,
  365. computations / mk_used, default_used / mk_used,
  366. computations / mk4_16x12_used, default_used / mk4_16x12_used);
  367. };
  368. run(384, 384, 384);
  369. }
  370. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_MK4_8x8x8_8x8x16_vs_4x4x16_8x8x16) {
  371. constexpr size_t RUNS = 50;
  372. param::MatrixMul param;
  373. param.transposeA = false;
  374. param.transposeB = false;
  375. Benchmarker<MatrixMul> benchmarker(handle());
  376. Benchmarker<MatrixMul> benchmarker_mk4(handle());
  377. Benchmarker<MatrixMul> benchmarker_mk4_4x4x8(handle());
  378. benchmarker.set_times(RUNS)
  379. .set_dtype(0, dtype::Int8{})
  380. .set_dtype(1, dtype::Int8{})
  381. .set_dtype(2, dtype::Int16{})
  382. .set_param(param)
  383. .set_display(false);
  384. benchmarker.set_before_exec_callback(
  385. AlgoChecker<MatrixMul>("AARCH64_INT8X8X16_K4X4X16"));
  386. param.format = MatrixMul::Param::Format::MK4;
  387. benchmarker_mk4.set_before_exec_callback(
  388. AlgoChecker<MatrixMul>(
  389. "AARCH64_INT8X8X16_MK4_K8X8X8"
  390. ));
  391. benchmarker_mk4.set_times(RUNS)
  392. .set_dtype(0, dtype::Int8{})
  393. .set_dtype(1, dtype::Int8{})
  394. .set_dtype(2, dtype::Int16{})
  395. .set_param(param)
  396. .set_display(false);
  397. benchmarker_mk4_4x4x8.set_before_exec_callback(
  398. AlgoChecker<MatrixMul>("AARCH64_INT8X8X16_MK4_4X4X8"));
  399. benchmarker_mk4_4x4x8.set_times(RUNS)
  400. .set_dtype(0, dtype::Int8{})
  401. .set_dtype(1, dtype::Int8{})
  402. .set_dtype(2, dtype::Int16{})
  403. .set_param(param)
  404. .set_display(false);
  405. auto run = [&](size_t M, size_t N, size_t K) {
  406. auto default_used = benchmarker.exec({{M, K}, {K, N}, {}}) / RUNS;
  407. auto mk_used = benchmarker_mk4.exec(
  408. {{M / 4, K / 4, 4, 4}, {K / 4, N, 4}, {}}) /
  409. RUNS;
  410. auto mk4_4x4x8_used =
  411. benchmarker_mk4_4x4x8.exec(
  412. {{M / 4, K / 4, 4, 4}, {K / 4, N, 4}, {}}) /
  413. RUNS;
  414. float computations = 2.f * M * K * N * 1e-6;
  415. printf("run: {%zu{M} %zu{K} %zu{N}} normal: %f ms %f Gflops mk4: %f ms "
  416. "%f Gflops speedup: %f, mk4_4x4x8 %f Gflops %f ms speedup: %f\n",
  417. M, K, N, default_used, computations / default_used, mk_used,
  418. computations / mk_used, default_used / mk_used,
  419. computations / mk4_4x4x8_used, mk4_4x4x8_used , mk4_4x4x8_used/mk_used);
  420. };
  421. run(384, 384, 384);
  422. run(512, 512, 512);
  423. run(1024, 1024, 384);
  424. run(256, 256, 384);
  425. for(int m = 32; m <= 512;m*=2)
  426. for(int n = 32; n <= 512;n*=2)
  427. for(int k = 32; k < 512;k*=2){
  428. run(m,n,k);
  429. }
  430. }
  431. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_INT16_4X4X16) {
  432. constexpr size_t RUNS = 50;
  433. param::MatrixMul param;
  434. param.transposeA = false;
  435. param.transposeB = false;
  436. Benchmarker<MatrixMul> benchmarker_int(handle());
  437. Benchmarker<MatrixMul> benchmarker_int32(handle());
  438. benchmarker_int.set_times(RUNS)
  439. .set_dtype(0, dtype::Int8{})
  440. .set_dtype(1, dtype::Int8{})
  441. .set_dtype(2, dtype::Int16{})
  442. .set_param(param)
  443. .set_display(false);
  444. benchmarker_int.set_before_exec_callback(
  445. AlgoChecker<MatrixMul>("AARCH64_INT8X8X16_K4X4X16"));
  446. benchmarker_int32.set_before_exec_callback(
  447. AlgoChecker<MatrixMul>("AARCH64_INT8X8X32_K4X4X16"));
  448. benchmarker_int32.set_times(RUNS)
  449. .set_dtype(0, dtype::Int8{})
  450. .set_dtype(1, dtype::Int8{})
  451. .set_dtype(2, dtype::Int32{})
  452. .set_param(param)
  453. .set_display(false);
  454. Benchmarker<MatrixMul> benchmarker_float(handle());
  455. benchmarker_float.set_param(param).set_display(false).set_times(RUNS);
  456. auto run = [&](size_t M, size_t N, size_t K) {
  457. TensorShape A, B;
  458. if (param.transposeA) {
  459. A = TensorShape{K, M};
  460. } else {
  461. A = TensorShape{M, K};
  462. }
  463. if (param.transposeB) {
  464. B = TensorShape{N, K};
  465. } else {
  466. B = TensorShape{K, N};
  467. }
  468. auto int_used = benchmarker_int.exec({A, B, {}}) / RUNS;
  469. auto float_used = benchmarker_float.exec({A, B, {}}) / RUNS;
  470. auto int32_used = benchmarker_int32.exec({A, B, {}}) / RUNS;
  471. float computations = 2.f * M * K * N * 1e-6;
  472. printf("run: {%zu{M} %zu{K} %zu{N}} float: %f ms %f Gflops int: %f ms "
  473. "%f Gflops speedup_vs_fp32: %f, speedup_vs_int32: %f\n",
  474. M, K, N, float_used, computations / float_used, int_used,
  475. computations / int_used, float_used / int_used,
  476. int32_used / int_used);
  477. };
  478. run(256, 256, 128);
  479. run(256, 256, 256);
  480. for (size_t k = 4; k <= 256; k *= 4) {
  481. for (size_t m = 4; m <= 256; m *= 4) {
  482. for (size_t n = 4; n <= 256; n *= 4) {
  483. run(m, n, k);
  484. }
  485. }
  486. std::cout << std::endl;
  487. }
  488. }
  489. TEST_F(AARCH64, BENCHMARK_GEMV) {
  490. int exec_times = 10;
  491. Benchmarker<MatrixMul> benchmarker_gemm(handle());
  492. benchmarker_gemm.set_times(exec_times);
  493. float mod = 1000 * exec_times / 1e9;
  494. auto run = [&](size_t M, size_t K, size_t N) {
  495. float time = 1.f, perf = 1.f;
  496. std::cout << "GEMM: (" << M << ", " << K << ", " << N << ")"
  497. << std::endl;
  498. benchmarker_gemm.set_dtype(0, dtype::Float32())
  499. .set_dtype(1, dtype::Float32());
  500. time = benchmarker_gemm.exec({{M, K}, {K, N}, {}});
  501. perf = 2.f * M * K * N / time * mod;
  502. std::cout << "gemm fp32, Performance is " << perf << " Gflops"
  503. << std::endl;
  504. #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  505. benchmarker_gemm.set_dtype(0, dtype::Float16())
  506. .set_dtype(1, dtype::Float16());
  507. time = benchmarker_gemm.exec({{M, K}, {K, N}, {}});
  508. perf = 2.f * M * K * N / time * mod;
  509. std::cout << "gemm fp16, Performance is " << perf << " Gflops"
  510. << std::endl;
  511. #endif
  512. };
  513. std::cout << "warm up:\n";
  514. for (int i = 0; i < 50; i++) {
  515. benchmarker_gemm.set_dtype(0, dtype::Float32())
  516. .set_dtype(1, dtype::Float32())
  517. .set_display(false)
  518. .exec({{256, 256}, {256, 256}, {}});
  519. benchmarker_gemm.set_display(true);
  520. }
  521. // run gemv
  522. for (size_t M : {1, 2, 3, 4, 5, 6, 7, 8, 64, 256})
  523. for (size_t K : {1, 2, 3, 4, 5, 6, 7, 8, 64, 256})
  524. for (size_t N : {112})
  525. run(M, K, N);
  526. }
  527. #if __ARM_FEATURE_DOTPROD
  528. TEST_F(AARCH64, BENCHMARK_TRANSPOSED_MATRIX_MUL_INT_8X8X32) {
  529. constexpr size_t RUNS = 50;
  530. param::MatrixMul param;
  531. param.transposeA = param.transposeB = true;
  532. Benchmarker<MatrixMul> benchmarker_int(handle());
  533. benchmarker_int.set_times(RUNS)
  534. .set_dtype(0, dtype::Int8{})
  535. .set_dtype(1, dtype::Int8{})
  536. .set_dtype(2, {})
  537. .set_param(param)
  538. .set_display(false);
  539. Benchmarker<MatrixMul> benchmarker_float(handle());
  540. benchmarker_float.set_param(param).set_display(false).set_times(RUNS);
  541. auto run = [&](size_t M, size_t N, size_t K) {
  542. auto int_used = benchmarker_int.exec({{K, M}, {N, K}, {}}) / RUNS;
  543. auto float_used = benchmarker_float.exec({{K, M}, {N, K}, {}}) / RUNS;
  544. float computations = 2.f * M * K * N * 1e-6;
  545. printf("run: {%zu{M} %zu{K} %zu{N}} float: %f ms %f Gflops int: %f ms "
  546. "%f Gflops speedup: %f\n",
  547. M, K, N, float_used, computations / float_used, int_used,
  548. computations / int_used, float_used / int_used);
  549. };
  550. run(256, 12 * 24, 256);
  551. for (size_t M : {8, 64, 112, 256}) {
  552. for (size_t K : {8, 64, 112, 256}) {
  553. for (size_t N : {8, 64, 112, 256}) {
  554. run(M, N, K);
  555. }
  556. }
  557. }
  558. }
  559. TEST_F(AARCH64, BENCHMARK_GEMV_INT_8X8X32) {
  560. constexpr size_t RUNS = 50;
  561. param::MatrixMul param;
  562. Benchmarker<MatrixMul> benchmarker_int(handle());
  563. benchmarker_int.set_times(RUNS)
  564. .set_dtype(0, dtype::Int8{})
  565. .set_dtype(1, dtype::Int8{})
  566. .set_dtype(2, {})
  567. .set_param(param)
  568. .set_display(false);
  569. Benchmarker<MatrixMul> benchmarker_float(handle());
  570. benchmarker_float.set_display(false).set_times(RUNS);
  571. auto run = [&](size_t M, size_t N, size_t K) {
  572. auto int_used = benchmarker_int.exec({{M, K}, {K, N}, {}}) / RUNS;
  573. auto float_used = benchmarker_float.exec({{M, K}, {K, N}, {}}) / RUNS;
  574. float computations = 2.f * M * K * N * 1e-6;
  575. printf("run: {%zu{M} %zu{K} %zu{N}} float: %f ms %f Gflops int: %f ms "
  576. "%f Gflops speedup: %f\n",
  577. M, K, N, float_used, computations / float_used, int_used,
  578. computations / int_used, float_used / int_used);
  579. };
  580. for (size_t M : {1, 2, 3, 4, 5, 6, 7, 8, 16, 32, 64, 256})
  581. for (size_t N : {1, 2, 3, 4, 5, 6, 7, 8, 16, 32, 64, 256})
  582. for (size_t K : {1, 2, 3, 4, 5, 6, 7, 8, 16, 32, 64, 256})
  583. run(M, N, K);
  584. }
  585. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_INT8X8X32_MK4_8X12X4) {
  586. constexpr size_t RUNS = 50;
  587. param::MatrixMul param;
  588. param.transposeA = false;
  589. param.transposeB = false;
  590. Benchmarker<MatrixMul> benchmarker(handle());
  591. Benchmarker<MatrixMul> benchmarker_mk4(handle());
  592. benchmarker.set_times(RUNS)
  593. .set_dtype(0, dtype::Int8{})
  594. .set_dtype(1, dtype::Int8{})
  595. .set_dtype(2, dtype::Int32{})
  596. .set_param(param)
  597. .set_display(false);
  598. benchmarker.set_before_exec_callback(
  599. AlgoChecker<MatrixMul>("AARCH64_INT8X8X32_K8X12X4"));
  600. param.format = MatrixMul::Param::Format::MK4_DOT;
  601. benchmarker_mk4.set_before_exec_callback(
  602. AlgoChecker<MatrixMul>("AARCH64_INT8X8X32_MK4_8X12X4_DOTPROD"));
  603. benchmarker_mk4.set_times(RUNS)
  604. .set_dtype(0, dtype::Int8{})
  605. .set_dtype(1, dtype::Int8{})
  606. .set_dtype(2, dtype::Int32{})
  607. .set_param(param)
  608. .set_display(false);
  609. auto run = [&](size_t M, size_t N, size_t K) {
  610. auto default_used = benchmarker.exec({{M, K}, {K, N}, {}}) / RUNS;
  611. auto mk_used = benchmarker_mk4.exec(
  612. {{M / 4, K / 4, 4, 4}, {K / 4, N, 4}, {}}) /
  613. RUNS;
  614. float computations = 2.f * M * K * N * 1e-6;
  615. printf("run: {%zu{M} %zu{K} %zu{N}} normal: %f ms %f Gflops mk4: %f ms "
  616. "%f Gflops speedup_vs_normal: %f\n",
  617. M, K, N, default_used, computations / default_used, mk_used,
  618. computations / mk_used, default_used / mk_used);
  619. };
  620. run(256, 256, 128);
  621. for (size_t k = 4; k <= 512; k *= 2) {
  622. for (size_t m = 4; m <= 512; m *= 2) {
  623. for (size_t n = 4; n <= 512; n *= 2) {
  624. run(m, n, k);
  625. }
  626. }
  627. std::cout << std::endl;
  628. }
  629. }
  630. #endif // __ARM_FEATURE_DOTPROD
  631. #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  632. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_F16_MK8) {
  633. auto args = matrix_mul::get_benchmark_matmul_mk_packed_args(8);
  634. matrix_mul::benchmark_with_contrast(
  635. handle(), args, dtype::Float16{}, dtype::Float16{},
  636. dtype::Float16{}, "AARCH64_F16_MK8_8X8",
  637. param::MatrixMul::Format::MK8, dtype::Float16{}, dtype::Float16{},
  638. dtype::Float16{}, "AARCH64_F16_K8X24X1");
  639. }
  640. #endif
  641. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_INT16x16x32) {
  642. constexpr size_t RUNS = 50;
  643. Benchmarker<MatrixMul> benchmarker_int(handle());
  644. benchmarker_int.set_times(RUNS)
  645. .set_dtype(0, dtype::Int16{})
  646. .set_dtype(1, dtype::Int16{})
  647. .set_dtype(2, dtype::Int32{})
  648. .set_display(false);
  649. Benchmarker<MatrixMul> benchmarker_float(handle());
  650. benchmarker_float.set_display(false).set_times(RUNS);
  651. auto run = [&](size_t M, size_t N, size_t K, int mask) {
  652. param::MatrixMul param;
  653. param.transposeA = mask & 0x1;
  654. param.transposeB = mask & 0x2;
  655. benchmarker_int.set_param(param);
  656. benchmarker_float.set_param(param);
  657. TensorShape A, B;
  658. if (param.transposeA) {
  659. A = TensorShape{K, M};
  660. } else {
  661. A = TensorShape{M, K};
  662. }
  663. if (param.transposeB) {
  664. B = TensorShape{N, K};
  665. } else {
  666. B = TensorShape{K, N};
  667. }
  668. auto int_used = benchmarker_int.exec({A, B, {}}) / RUNS;
  669. auto float_used = benchmarker_float.exec({A, B, {}}) / RUNS;
  670. float computations = 2.f * M * K * N * 1e-6;
  671. printf("run: {%zu{M} %zu{K} %zu{N} %d{TA} %d{TB}} "
  672. "float: %f ms %f Gflops int: %f ms "
  673. "%f Gflops speedup: %f\n",
  674. M, K, N, param.transposeA, param.transposeB, float_used,
  675. computations / float_used, int_used, computations / int_used,
  676. float_used / int_used);
  677. };
  678. constexpr int mask = 4;
  679. for (auto i = 0; i < mask; i++) {
  680. for (size_t M : {8, 64, 112, 256}) {
  681. for (size_t K : {8, 64, 112, 256}) {
  682. for (size_t N : {8, 64, 112, 256}) {
  683. run(M, N, K, i);
  684. }
  685. }
  686. }
  687. }
  688. }
  689. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_FP32_MK4) {
  690. auto args = matrix_mul::get_benchmark_matmul_mk_packed_args(16);
  691. matrix_mul::benchmark_with_contrast(
  692. handle(), args, dtype::Float32{}, dtype::Float32{},
  693. dtype::Float32{}, "AARCH64_F32_MK4_4x16",
  694. param::MatrixMul::Format::MK4, dtype::Float32{}, dtype::Float32{},
  695. dtype::Float32{});
  696. }
  697. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_FP32_PACK_MK4) {
  698. auto args = matrix_mul::get_benchmark_matmul_mk_packed_args(16);
  699. matrix_mul::benchmark_with_contrast(
  700. handle(), args, dtype::Float32{}, dtype::Float32{},
  701. dtype::Float32{}, "AARCH64_F32_MK4_K8X12X1",
  702. param::MatrixMul::Format::MK4, dtype::Float32{}, dtype::Float32{},
  703. dtype::Float32{}, "AARCH64_F32K8X12X1");
  704. }
  705. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_INT16x16x32_MK8) {
  706. auto args = matrix_mul::get_benchmark_matmul_mk_packed_args(8);
  707. matrix_mul::benchmark_with_contrast(
  708. handle(), args, dtype::Int16{}, dtype::Int16{}, dtype::Int32{},
  709. "AARCH64_INT16X16X32_MK8_8X8", param::MatrixMul::Format::MK8,
  710. dtype::Int16{}, dtype::Int16{}, dtype::Int32{});
  711. }
  712. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_FP32_K8X12) {
  713. constexpr size_t RUNS = 50;
  714. param::MatrixMul param;
  715. param.transposeA = param.transposeB = true;
  716. Benchmarker<MatrixMul> benchmarker_k12x8(handle());
  717. Benchmarker<MatrixMul> benchmarker_k8x12(handle());
  718. benchmarker_k12x8.set_param(param).set_display(false).set_times(RUNS);
  719. benchmarker_k8x12.set_param(param).set_display(false).set_times(RUNS);
  720. benchmarker_k12x8.set_before_exec_callback(
  721. AlgoChecker<MatrixMul>("AARCH64_F32K4X16X1"));
  722. benchmarker_k8x12.set_before_exec_callback(
  723. AlgoChecker<MatrixMul>("AARCH64_F32K8X12X1"));
  724. auto run = [&](size_t M, size_t N, size_t K) {
  725. auto k12x8_used = benchmarker_k12x8.exec({{K, M}, {N, K}, {}}) / RUNS;
  726. auto k8x12_used = benchmarker_k8x12.exec({{K, M}, {N, K}, {}}) / RUNS;
  727. float computations = 2.f * M * K * N * 1e-6;
  728. printf("run: {%zu{M} %zu{K} %zu{N}} float k12x8: %f ms %f Gflops "
  729. "k8x12: %f ms "
  730. "%f Gflops speedup: %f\n",
  731. M, K, N, k12x8_used, computations / k12x8_used, k8x12_used,
  732. computations / k8x12_used, k12x8_used / k8x12_used);
  733. };
  734. run(256, 12 * 24, 256);
  735. for (size_t M : {8, 64, 112, 256}) {
  736. for (size_t K : {8, 64, 112, 256}) {
  737. for (size_t N : {8, 64, 112, 256}) {
  738. run(M, N, K);
  739. }
  740. }
  741. }
  742. }
  743. TEST_F(AARCH64, BENCHMARK_MATRIX_MUL_FP32_K8X12_NO_TRANS) {
  744. constexpr size_t RUNS = 50;
  745. param::MatrixMul param;
  746. param.transposeA = param.transposeB = false;
  747. Benchmarker<MatrixMul> benchmarker_k12x8(handle());
  748. Benchmarker<MatrixMul> benchmarker_k8x12(handle());
  749. benchmarker_k12x8.set_param(param).set_display(false).set_times(RUNS);
  750. benchmarker_k8x12.set_param(param).set_display(false).set_times(RUNS);
  751. benchmarker_k12x8.set_before_exec_callback(
  752. AlgoChecker<MatrixMul>("AARCH64_F32K4X16X1"));
  753. benchmarker_k8x12.set_before_exec_callback(
  754. AlgoChecker<MatrixMul>("AARCH64_F32K8X12X1"));
  755. auto run = [&](size_t M, size_t N, size_t K) {
  756. auto k12x8_used = benchmarker_k12x8.exec({{M, K}, {K, N}, {}}) / RUNS;
  757. auto k8x12_used = benchmarker_k8x12.exec({{M, K}, {K, N}, {}}) / RUNS;
  758. float computations = 2.f * M * K * N * 1e-6;
  759. printf("run: {%zu{M} %zu{K} %zu{N}} float k12x8: %f ms %f Gflops "
  760. "k8x12: %f ms "
  761. "%f Gflops speedup: %f\n",
  762. M, K, N, k12x8_used, computations / k12x8_used, k8x12_used,
  763. computations / k8x12_used, k12x8_used / k8x12_used);
  764. };
  765. run(256, 12 * 24, 256);
  766. for (size_t M : {8, 64, 112, 256}) {
  767. for (size_t K : {8, 64, 112, 256}) {
  768. for (size_t N : {8, 64, 112, 256}) {
  769. run(M, N, K);
  770. }
  771. }
  772. }
  773. }
  774. #endif // MEGDNN_WITH_BENCHMARK
  775. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台