You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

matrix_mul.cpp 10 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251
  1. /**
  2. * \file dnn/test/x86/matrix_mul.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
  10. * implied.
  11. */
  12. #include "test/x86/fixture.h"
  13. #include "src/x86/utils.h"
  14. #include "test/common/benchmarker.h"
  15. #include "test/common/checker.h"
  16. #include "test/common/matrix_mul.h"
  17. #include "test/common/rng.h"
  18. using namespace megdnn;
  19. using namespace test;
  20. using namespace megdnn::x86;
  21. #if MEGDNN_X86_WITH_VNNI
  22. TEST_F(X86, MATRIX_MUL_VNNI_8X8X32) {
  23. matrix_mul::check_matrix_mul(dtype::Int8{}, dtype::Int8{}, dtype::Int32{},
  24. handle(), "X86_INT8X8X32_VNNI");
  25. }
  26. #endif
  27. #if MEGDNN_X86_WITH_MKL_DNN
  28. TEST_F(X86, MATRIX_MUL_MKLDNN_8X8X32) {
  29. if (is_supported(SIMDType::VNNI)) {
  30. matrix_mul::check_matrix_mul(dtype::Int8{}, dtype::Int8{},
  31. dtype::Int32{}, handle(),
  32. "X86_INT8X8X32_MKLDNN");
  33. } else {
  34. std::cout << "can not do mkldnn matmul check for no vnni support"
  35. << std::endl;
  36. matrix_mul::check_matrix_mul(dtype::Int8{}, dtype::Int8{},
  37. dtype::Int32{}, handle());
  38. }
  39. }
  40. #endif
  41. //! FIXME: need to add tests of GEMV and QUINT8
  42. TEST_F(X86, MATRIX_MUL_AVX2_8X8X32) {
  43. matrix_mul::check_matrix_mul(dtype::Int8{}, dtype::Int8{}, dtype::Int32{},
  44. handle(), "X86_INT8X8X32_AVX2_2X4X16");
  45. matrix_mul::check_matrix_mul(dtype::Int8{}, dtype::Int8{}, dtype::Int32{},
  46. handle(), "X86_INT8X8X32_AVX2_4X16X2");
  47. }
  48. TEST_F(X86, MATRIX_MUL_AVX2_8X8X16) {
  49. matrix_mul::check_matrix_mul(dtype::Int8{}, dtype::Int8{}, dtype::Int16{},
  50. handle(), "X86_INT8X8X16_AVX2");
  51. }
  52. TEST_F(X86, MATRIX_MUL_SSE_8X8X16) {
  53. matrix_mul::check_matrix_mul(dtype::Int8{}, dtype::Int8{}, dtype::Int16{},
  54. handle(), "X86_INT8X8X16_SSE");
  55. }
  56. TEST_F(X86, MATRIX_MUL_SSE_8X8X32) {
  57. matrix_mul::check_matrix_mul(dtype::Int8{}, dtype::Int8{}, dtype::Int32{},
  58. handle(), "X86_INT8X8X32_SSE_4X8X2");
  59. }
  60. #if MEGDNN_X86_WITH_MKL && SUPPORT_MKL_PACKED_GEMM
  61. TEST_F(X86, MATRIX_MUL_MKL_PACKA) {
  62. matrix_mul::check_matrix_mul(dtype::Float32{}, dtype::Float32{},
  63. dtype::Float32{}, handle(),
  64. "X86_F32_MKL_PACKA");
  65. }
  66. #endif
  67. TEST_F(X86, MATRIX_MUL_AVX2_MK8_8X8) {
  68. matrix_mul::check_matrix_mul(dtype::Float32{}, dtype::Float32{},
  69. dtype::Float32{}, handle(), "X86_F32MK8_8X8",
  70. param::MatrixMul::Format::MK8, 1);
  71. }
  72. #if MEGDNN_WITH_BENCHMARK
  73. TEST_F(X86, BENCHMARK_MATRIX_MUL_AVX2_MK8_8X8) {
  74. auto args = matrix_mul::get_benchmark_matmul_mk_packed_args(8);
  75. matrix_mul::benchmark_with_contrast(
  76. handle(), args, dtype::Float32{}, dtype::Float32{},
  77. dtype::Float32{}, "X86_F32MK8_8X8", param::MatrixMul::Format::MK8,
  78. dtype::Float32{}, dtype::Float32{}, dtype::Float32{},
  79. "X86_F32_BLAS");
  80. }
  81. TEST_F(X86, BENCHMARK_MATRIX_MUL_8X8X32) {
  82. constexpr size_t RUNS = 50;
  83. auto rng = std::make_unique<UniformIntRNG>(-127, 127);
  84. #if MEGDNN_X86_WITH_VNNI
  85. Benchmarker<MatrixMul> benchmarker_vnni(handle());
  86. benchmarker_vnni.set_times(RUNS)
  87. .set_dtype(0, dtype::Int8{})
  88. .set_dtype(1, dtype::Int8{})
  89. .set_dtype(2, dtype::Int32{})
  90. .set_display(false)
  91. .set_rng(0, rng.get())
  92. .set_rng(1, rng.get());
  93. benchmarker_vnni.set_before_exec_callback(
  94. AlgoChecker<MatrixMul>("X86_INT8X8X32_VNNI"));
  95. #endif
  96. #if MEGDNN_X86_WITH_MKL_DNN
  97. Benchmarker<MatrixMul> benchmarker_mkldnn(handle());
  98. benchmarker_mkldnn.set_times(RUNS)
  99. .set_dtype(0, dtype::Int8{})
  100. .set_dtype(1, dtype::Int8{})
  101. .set_dtype(2, dtype::Int32{})
  102. .set_display(false)
  103. .set_rng(0, rng.get())
  104. .set_rng(1, rng.get());
  105. benchmarker_mkldnn.set_before_exec_callback(
  106. AlgoChecker<MatrixMul>("X86_INT8X8X32_MKLDNN"));
  107. #endif
  108. Benchmarker<MatrixMul> benchmarker_avx2_4x16x2(handle());
  109. benchmarker_avx2_4x16x2.set_display(false)
  110. .set_times(RUNS)
  111. .set_dtype(0, dtype::Int8{})
  112. .set_dtype(1, dtype::Int8{})
  113. .set_dtype(2, dtype::Int32{})
  114. .set_rng(0, rng.get())
  115. .set_rng(1, rng.get());
  116. benchmarker_avx2_4x16x2.set_before_exec_callback(
  117. AlgoChecker<MatrixMul>("X86_INT8X8X32_AVX2_4X16X2"));
  118. Benchmarker<MatrixMul> benchmarker_avx2_4x16x2_8816(handle());
  119. benchmarker_avx2_4x16x2_8816.set_display(false)
  120. .set_times(RUNS)
  121. .set_dtype(0, dtype::Int8{})
  122. .set_dtype(1, dtype::Int8{})
  123. .set_dtype(2, dtype::Int16{})
  124. .set_rng(0, rng.get())
  125. .set_rng(1, rng.get());
  126. benchmarker_avx2_4x16x2_8816.set_before_exec_callback(
  127. AlgoChecker<MatrixMul>("X86_INT8X8X16_AVX2"));
  128. Benchmarker<MatrixMul> benchmarker_sse_4x8x2_8816(handle());
  129. benchmarker_sse_4x8x2_8816.set_display(false)
  130. .set_times(RUNS)
  131. .set_dtype(0, dtype::Int8{})
  132. .set_dtype(1, dtype::Int8{})
  133. .set_dtype(2, dtype::Int16{})
  134. .set_rng(0, rng.get())
  135. .set_rng(1, rng.get());
  136. benchmarker_sse_4x8x2_8816.set_before_exec_callback(
  137. AlgoChecker<MatrixMul>("X86_INT8X8X16_SSE"));
  138. Benchmarker<MatrixMul> benchmarker_avx2_2x4x16(handle());
  139. benchmarker_avx2_2x4x16.set_display(false)
  140. .set_times(RUNS)
  141. .set_dtype(0, dtype::Int8{})
  142. .set_dtype(1, dtype::Int8{})
  143. .set_dtype(2, dtype::Int32{})
  144. .set_rng(0, rng.get())
  145. .set_rng(1, rng.get());
  146. benchmarker_avx2_2x4x16.set_before_exec_callback(
  147. AlgoChecker<MatrixMul>("X86_INT8X8X32_AVX2_2X4X16"));
  148. Benchmarker<MatrixMul> benchmarker_sse_4x8x2(handle());
  149. benchmarker_sse_4x8x2.set_display(false)
  150. .set_times(RUNS)
  151. .set_dtype(0, dtype::Int8{})
  152. .set_dtype(1, dtype::Int8{})
  153. .set_dtype(2, dtype::Int32{})
  154. .set_rng(0, rng.get())
  155. .set_rng(1, rng.get());
  156. benchmarker_sse_4x8x2.set_before_exec_callback(
  157. AlgoChecker<MatrixMul>("X86_INT8X8X32_SSE_4X8X2"));
  158. Benchmarker<MatrixMul> benchmarker_float(handle());
  159. benchmarker_float.set_display(false)
  160. .set_times(RUNS)
  161. .set_rng(0, rng.get())
  162. .set_rng(1, rng.get());
  163. benchmarker_float.set_before_exec_callback(
  164. AlgoChecker<MatrixMul>("X86_F32_BLAS"));
  165. auto run = [&](size_t M, size_t N, size_t K) {
  166. const float computations = 2.f * M * K * N * 1e-6;
  167. std::cout << "run : {" << M << "," << N << "," << K << "} ";
  168. auto float_used = benchmarker_float.exec({{M, K}, {K, N}, {}}) / RUNS;
  169. std::cout << "float: " << float_used << " ms, "
  170. << computations / float_used << " Gflops, ";
  171. #if MEGDNN_X86_WITH_VNNI
  172. if (is_supported(SIMDType::VNNI)) {
  173. auto vnni_used = benchmarker_vnni.exec({{M, K}, {K, N}, {}}) / RUNS;
  174. std::cout << "vnni: " << vnni_used << " ms, "
  175. << computations / vnni_used << " Gflops, "
  176. << "speed_up " << float_used / vnni_used << ", ";
  177. }
  178. #endif
  179. #if MEGDNN_X86_WITH_MKL_DNN
  180. if (is_supported(SIMDType::VNNI)) {
  181. auto mkldnn_used =
  182. benchmarker_mkldnn.exec({{M, K}, {K, N}, {}}) / RUNS;
  183. std::cout << "mkldnn: " << mkldnn_used << " ms, "
  184. << computations / mkldnn_used << " Gflops, "
  185. << "speed_up " << float_used / mkldnn_used << ", ";
  186. }
  187. #endif
  188. if (is_supported(SIMDType::AVX2)) {
  189. auto avx2_used_4x16x2 =
  190. benchmarker_avx2_4x16x2.exec({{M, K}, {K, N}, {}}) / RUNS;
  191. auto avx2_used_2x4x16 =
  192. benchmarker_avx2_2x4x16.exec({{M, K}, {K, N}, {}}) / RUNS;
  193. std::cout << "avx2_k2: " << avx2_used_4x16x2
  194. << " ms, k2 throughput "
  195. << computations / avx2_used_4x16x2 << " Gflops, "
  196. << "k2_speed_up " << float_used / avx2_used_4x16x2
  197. << ", k16_speed_up " << float_used / avx2_used_2x4x16
  198. << ",";
  199. auto avx2_used_4x16x2_8816 =
  200. benchmarker_avx2_4x16x2_8816.exec({{M, K}, {K, N}, {}}) /
  201. RUNS;
  202. std::cout << "avx2_8816: " << avx2_used_4x16x2_8816
  203. << " ms, 8816 throughput "
  204. << computations / avx2_used_4x16x2_8816 << " Gflops,";
  205. }
  206. if (is_supported(SIMDType::SSE4_1)) {
  207. auto sse_used =
  208. benchmarker_sse_4x8x2.exec({{M, K}, {K, N}, {}}) / RUNS;
  209. std::cout << "sse: " << sse_used << " ms, "
  210. << computations / sse_used << " Gflops, "
  211. << "speed_up " << float_used / sse_used << ", ";
  212. auto sse_used_8816 =
  213. benchmarker_sse_4x8x2_8816.exec({{M, K}, {K, N}, {}}) /
  214. RUNS;
  215. std::cout << "sse_8816: " << sse_used_8816 << " ms, "
  216. << computations / sse_used_8816 << " Gflops, ";
  217. }
  218. std::cout << std::endl;
  219. };
  220. run(256, 256, 256);
  221. for (size_t M : {8, 64, 112, 256, 512}) {
  222. for (size_t K : {8, 16, 32, 64, 112, 256, 512}) {
  223. for (size_t N : {8, 64, 112, 256, 512}) {
  224. run(M, N, K);
  225. }
  226. }
  227. }
  228. }
  229. #endif // MEGDNN_WITH_BENCHMARK
  230. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台