You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

relayout_format.cpp 16 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433
  1. /**
  2. * \file dnn/test/cuda/relayout_format.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
  10. * implied.
  11. */
  12. #include "megdnn/dtype.h"
  13. #include "megdnn/oprs.h"
  14. #include "test/cuda/benchmark.h"
  15. #include "test/common/checker.h"
  16. #include "test/common/rng.h"
  17. #include "test/cuda/fixture.h"
  18. using namespace megdnn;
  19. using namespace test;
  20. #define MEGDNN_WITH_BENCHMARK 1
  21. TEST_F(CUDA, RELAYOUT_FORMAT) {
  22. Checker<RelayoutFormat> checker(handle_cuda());
  23. UniformIntRNG rng{-50, 50};
  24. param::RelayoutFormat param;
  25. param.mode = param::RelayoutFormat::Mode::NCHW4_CHWN4;
  26. checker.set_dtype(0, dtype::QuantizedS8{0.1f})
  27. .set_dtype(1, dtype::QuantizedS8{0.1f})
  28. .set_rng(0, &rng)
  29. .set_param(param)
  30. .execs({{22, 23, 24, 25, 4}, {}});
  31. param.mode = param::RelayoutFormat::Mode::CHWN4_NCHW4;
  32. checker.execs({{22, 23, 24, 25, 4}, {}});
  33. }
  34. TEST_F(CUDA, RELAYOUT_FORMAT_NCHW4_NCHW) {
  35. Checker<RelayoutFormat> checker(handle_cuda());
  36. UniformIntRNG rng{-50, 50};
  37. param::RelayoutFormat param;
  38. param.mode = param::RelayoutFormat::Mode::NCHW4_NCHW;
  39. checker.set_dtype(0, dtype::QuantizedS8{0.1f})
  40. .set_dtype(1, dtype::QuantizedS8{0.1f})
  41. .set_rng(0, &rng)
  42. .set_param(param)
  43. .execs({{1, 1, 2, 2, 4}, {}});
  44. checker.set_dtype(0, dtype::QuantizedS8{0.1f})
  45. .set_dtype(1, dtype::QuantizedS8{0.1f})
  46. .set_rng(0, &rng)
  47. .set_param(param)
  48. .execs({{22, 23, 24, 25, 4}, {}});
  49. param.oc = 90;
  50. checker.set_dtype(0, dtype::QuantizedS8{0.1f})
  51. .set_dtype(1, dtype::QuantizedS8{0.1f})
  52. .set_rng(0, &rng)
  53. .set_param(param)
  54. .execs({{22, 23, 24, 25, 4}, {}});
  55. param.oc = 16;
  56. param.group = 8;
  57. checker.set_dtype(0, dtype::QuantizedS8{0.1f})
  58. .set_dtype(1, dtype::QuantizedS8{0.1f})
  59. .set_rng(0, &rng)
  60. .set_param(param)
  61. .execs({{11, 16, 22, 33, 4}, {}});
  62. }
  63. TEST_F(CUDA, RELAYOUT_FORMAT_NCHW_NCHW4) {
  64. Checker<RelayoutFormat> checker(handle_cuda());
  65. UniformIntRNG rng{-50, 50};
  66. param::RelayoutFormat param;
  67. param.mode = param::RelayoutFormat::Mode::NCHW_NCHW4;
  68. for (size_t n : {1, 3}) {
  69. for (size_t c : {1, 2, 3, 4, 8, 9, 11, 16}) {
  70. for (size_t h : {3, 7, 12, 16, 22, 59, 83}) {
  71. for (size_t w : {3, 22, 63, 128, 256}) {
  72. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  73. .set_dtype(1, dtype::QuantizedS8{1.f})
  74. .set_rng(0, &rng)
  75. .set_param(param)
  76. .execs({{n, c, h, w}, {}});
  77. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  78. .set_dtype(1, dtype::QuantizedS8{2.f})
  79. .set_rng(0, &rng)
  80. .set_param(param)
  81. .execs({{n, c, h, w}, {}});
  82. checker.set_dtype(0, dtype::QuantizedS32{1.f})
  83. .set_dtype(1, dtype::QuantizedS32{1.f})
  84. .set_rng(0, &rng)
  85. .set_param(param)
  86. .execs({{n, c, h, w}, {}});
  87. }
  88. }
  89. }
  90. }
  91. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  92. .set_dtype(1, dtype::QuantizedS8{1.f})
  93. .set_rng(0, &rng)
  94. .set_param(param)
  95. .execs({{8, 3, 224, 224}, {}});
  96. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  97. .set_dtype(1, dtype::QuantizedS8{1.f})
  98. .set_rng(0, &rng)
  99. .set_param(param)
  100. .execs({{8, 3, 600, 600}, {}});
  101. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  102. .set_dtype(1, dtype::QuantizedS8{1.f})
  103. .set_rng(0, &rng)
  104. .set_param(param)
  105. .execs({{1, 6, 768, 1280}, {}});
  106. param.group = 2;
  107. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  108. .set_dtype(1, dtype::QuantizedS8{1.f})
  109. .set_rng(0, &rng)
  110. .set_param(param)
  111. .execs({{8, 6, 300, 300}, {}});
  112. param.group = 3;
  113. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  114. .set_dtype(1, dtype::QuantizedS8{1.f})
  115. .set_rng(0, &rng)
  116. .set_param(param)
  117. .execs({{8, 6, 300, 300}, {}});
  118. }
  119. TEST_F(CUDA, RELAYOUT_FORMAT_NCHW_NCHW4_WEIGHT) {
  120. Checker<RelayoutFormat> checker(handle_cuda());
  121. UniformIntRNG rng{-50, 50};
  122. param::RelayoutFormat param;
  123. param.mode = param::RelayoutFormat::Mode::NCHW_NCHW4_WEIGHT;
  124. for (size_t oc : {1, 3, 4, 16, 33}) {
  125. for (size_t ic : {1, 2, 3, 4, 8, 9, 11, 16, 33}) {
  126. for (size_t h : {3, 5, 7}) {
  127. for (size_t w : {3, 5, 7}) {
  128. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  129. .set_dtype(1, dtype::QuantizedS8{1.f})
  130. .set_rng(0, &rng)
  131. .set_param(param)
  132. .execs({{oc, ic, h, w}, {}});
  133. }
  134. }
  135. }
  136. }
  137. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  138. .set_dtype(1, dtype::QuantizedS8{1.f})
  139. .set_rng(0, &rng)
  140. .set_param(param)
  141. .execs({{13, 13, 5, 5}, {}});
  142. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  143. .set_dtype(1, dtype::QuantizedS8{1.f})
  144. .set_rng(0, &rng)
  145. .set_param(param)
  146. .execs({{4, 16, 16, 3, 3}, {}});
  147. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  148. .set_dtype(1, dtype::QuantizedS8{1.f})
  149. .set_rng(0, &rng)
  150. .set_param(param)
  151. .execs({{4, 13, 11, 3, 3}, {}});
  152. }
  153. TEST_F(CUDA, RELAYOUT_FORMAT_NCHW_NCHW4_DEFAULT) {
  154. Checker<RelayoutFormat> checker(handle_cuda());
  155. UniformIntRNG rng{0, 50};
  156. param::RelayoutFormat param;
  157. param.mode = param::RelayoutFormat::Mode::NCHW_NCHW4;
  158. for (size_t n : {1, 3}) {
  159. for (size_t c : {1, 2, 3, 4, 8, 9, 11, 16}) {
  160. for (size_t h : {3, 7, 12, 16, 59, 83}) {
  161. for (size_t w : {3, 63, 128, 256}) {
  162. checker.set_dtype(0, dtype::Quantized8Asymm{1.f, 128})
  163. .set_dtype(1, dtype::QuantizedS8{1.f})
  164. .set_rng(0, &rng)
  165. .set_param(param)
  166. .execs({{n, c, h, w}, {}});
  167. }
  168. }
  169. }
  170. }
  171. }
  172. TEST_F(CUDA, RELAYOUT_FORMAT_NCHW_NCHW4_U8) {
  173. Checker<RelayoutFormat> checker(handle_cuda());
  174. UniformIntRNG rng{0, 255};
  175. param::RelayoutFormat param;
  176. param.mode = param::RelayoutFormat::Mode::NCHW_NCHW4;
  177. for (size_t n : {1, 3}) {
  178. for (size_t c : {1, 2, 3, 4, 8, 9, 11, 16}) {
  179. for (size_t h : {3, 7, 12, 16, 59, 83}) {
  180. for (size_t w : {3, 13, 3 * 4, 63 * 4, 128 * 4, 256 * 4}) {
  181. checker.set_dtype(0, dtype::Uint8())
  182. .set_dtype(1, dtype::QuantizedS8{1.f})
  183. .set_rng(0, &rng)
  184. .set_param(param)
  185. .execs({{n, c, h, w}, {}});
  186. checker.set_dtype(0, dtype::Quantized8Asymm{1.f, 128})
  187. .set_dtype(1, dtype::QuantizedS8{1.f})
  188. .set_rng(0, &rng)
  189. .set_param(param)
  190. .execs({{n, c, h, w}, {}});
  191. checker.set_dtype(0, dtype::Uint8())
  192. .set_dtype(1, dtype::QuantizedS8{2.5f})
  193. .set_rng(0, &rng)
  194. .set_param(param)
  195. .execs({{n, c, h, w}, {}});
  196. }
  197. }
  198. }
  199. }
  200. }
  201. TEST_F(CUDA, RELAYOUT_FORMAT_NCHW_NCHW4_IC_SMALL) {
  202. Checker<RelayoutFormat> checker(handle_cuda());
  203. UniformIntRNG rng{0, 50};
  204. param::RelayoutFormat param;
  205. param.mode = param::RelayoutFormat::Mode::NCHW_NCHW4_IC_SMALL;
  206. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  207. .set_dtype(1, dtype::QuantizedS8{1.f})
  208. .set_rng(0, &rng)
  209. .set_param(param)
  210. .execs({{8, 3, 768, 1280}, {}});
  211. }
  212. TEST_F(CUDA, RELAYOUT_FORMAT_NCHW_NCHW64) {
  213. Checker<RelayoutFormat> checker(handle_cuda());
  214. UniformIntRNG s4{-8, 7};
  215. UniformIntRNG u4{0, 15};
  216. param::RelayoutFormat param;
  217. param.mode = param::RelayoutFormat::Mode::NCHW_NCHW64;
  218. for (size_t n : {1, 3}) {
  219. for (size_t c : {64, 128}) {
  220. for (size_t h : {7, 14, 16, 28}) {
  221. for (size_t w : {2, 3, 7, 8, 16, 31}) {
  222. checker.set_dtype(0, dtype::QuantizedS4{2.f})
  223. .set_dtype(1, dtype::QuantizedS4{2.f})
  224. .set_rng(0, &s4)
  225. .set_param(param)
  226. .execs({{n, c, h, w}, {}});
  227. checker.set_dtype(0, dtype::Quantized4Asymm{1.2f, 8})
  228. .set_dtype(1, dtype::Quantized4Asymm{1.2f, 4})
  229. .set_rng(0, &u4)
  230. .set_param(param)
  231. .execs({{n, c, h, w}, {}});
  232. checker.set_dtype(0, dtype::QuantizedS4{1.19990307f})
  233. .set_dtype(1, dtype::QuantizedS4{1.f})
  234. .set_rng(0, &s4)
  235. .set_param(param)
  236. .execs({{n, c, h, w}, {}});
  237. checker.set_dtype(0, dtype::Quantized4Asymm{1.19990307f, 8})
  238. .set_dtype(1, dtype::Quantized4Asymm{1.f, 4})
  239. .set_rng(0, &u4)
  240. .set_param(param)
  241. .set_epsilon(1e-3)
  242. .execs({{n, c, h, w}, {}});
  243. }
  244. }
  245. }
  246. }
  247. }
  248. TEST_F(CUDA, RELAYOUT_FORMAT_NCHW64_NCHW) {
  249. Checker<RelayoutFormat> checker(handle_cuda());
  250. UniformIntRNG s4{-8, 7};
  251. UniformIntRNG u4{0, 15};
  252. param::RelayoutFormat param;
  253. param.mode = param::RelayoutFormat::Mode::NCHW64_NCHW;
  254. for (size_t n : {1, 3}) {
  255. for (size_t c : {64, 128}) {
  256. for (size_t h : {7, 14, 16, 28}) {
  257. for (size_t w : {2, 3, 4, 7, 14, 16, 17}) {
  258. checker.set_dtype(0, dtype::QuantizedS4{2.f})
  259. .set_dtype(1, dtype::QuantizedS4{2.f})
  260. .set_rng(0, &s4)
  261. .set_param(param)
  262. .set_epsilon(1e-3)
  263. .execs({{n, c / 64, h, w, 64}, {}});
  264. checker.set_dtype(0, dtype::Quantized4Asymm{1.2f, 4})
  265. .set_dtype(1, dtype::Quantized4Asymm{1.2f, 8})
  266. .set_rng(0, &u4)
  267. .set_param(param)
  268. .set_epsilon(1e-3)
  269. .execs({{n, c / 64, h, w, 64}, {}});
  270. checker.set_dtype(0, dtype::QuantizedS4{1.19990307f})
  271. .set_dtype(1, dtype::QuantizedS4{1.f})
  272. .set_rng(0, &s4)
  273. .set_param(param)
  274. .set_epsilon(1e-3)
  275. .execs({{n, c / 64, h, w, 64}, {}});
  276. checker.set_dtype(0, dtype::Quantized4Asymm{1.20211209f, 8})
  277. .set_dtype(1, dtype::Quantized4Asymm{1.f, 4})
  278. .set_rng(0, &u4)
  279. .set_param(param)
  280. .set_epsilon(1e-3)
  281. .execs({{n, c / 64, h, w, 64}, {}});
  282. }
  283. }
  284. }
  285. }
  286. }
  287. #if MEGDNN_WITH_BENCHMARK
  288. TEST_F(CUDA, BENCHMARK_RELAYOUT_FORMAT) {
  289. using Param = RelayoutFormat::Param;
  290. auto run = [&](const TensorShapeArray& shapes, Param param,
  291. Param default_param) {
  292. Benchmarker<RelayoutFormat> benchmarker(handle_cuda());
  293. benchmarker.set_param(param);
  294. benchmarker.set_dtype(0, dtype::QuantizedS8{1.f})
  295. .set_dtype(1, dtype::QuantizedS8{1.f});
  296. Benchmarker<RelayoutFormat> benchmarker_default(handle_cuda());
  297. benchmarker_default.set_param(default_param);
  298. benchmarker_default.set_dtype(0, dtype::QuantizedS8{1.f})
  299. .set_dtype(1, dtype::QuantizedS8{1.f});
  300. for (auto&& shape : shapes) {
  301. double memaccess = (double(shape.total_nr_elems()) +
  302. double(shape[0]) * ((shape[1] + 3) / 4 * 4) *
  303. shape[2] * shape[3]) *
  304. 1e-6;
  305. auto time_ms = benchmarker.execs({shape, {}});
  306. if (shape[1] <= 4) {
  307. auto time_default_ms = benchmarker_default.execs({shape, {}});
  308. printf("execute %s, time %.4f ms, %.4f GB/s, default %.4f "
  309. "GB/s\n",
  310. shape.to_string().c_str(), time_ms, memaccess / time_ms,
  311. memaccess / time_default_ms);
  312. } else {
  313. printf("execute %s, time %.4f ms, %.4f GB/s\n",
  314. shape.to_string().c_str(), time_ms, memaccess / time_ms);
  315. }
  316. }
  317. };
  318. TensorShapeArray shapes = {
  319. {8, 1, 768, 1280}, {8, 3, 768, 1280}, {8, 3, 224, 224},
  320. {8, 4, 768, 1280}, {64, 3, 768, 1280},
  321. };
  322. {
  323. Param param;
  324. param.mode = param::RelayoutFormat::Mode::NCHW_NCHW4;
  325. Param default_param;
  326. default_param.mode = param::RelayoutFormat::Mode::NCHW_NCHW4_IC_SMALL;
  327. run(shapes, param, default_param);
  328. }
  329. }
  330. TEST_F(CUDA, BENCHMARK_RELAYOUT_FORMAT_QS4) {
  331. using Param = RelayoutFormat::Param;
  332. auto run = [&](const TensorShapeArray& shapes, Param param) {
  333. CUBenchmarker<RelayoutFormat> benchmarker(handle_cuda());
  334. benchmarker.set_param(param);
  335. benchmarker.set_dtype(0, dtype::QuantizedS4{1.19990307f})
  336. .set_dtype(1, dtype::QuantizedS4{1.20210322f});
  337. for (auto&& shape : shapes) {
  338. double memaccess = double(shape.total_nr_elems()) * 1e-6;
  339. auto time_ms = benchmarker.execs({shape, {}});
  340. printf("execute %s, time %.4f ms, %.4f GB/s\n",
  341. shape.to_string().c_str(), time_ms, memaccess / time_ms);
  342. }
  343. };
  344. {
  345. TensorShapeArray shapes = {
  346. {1, 64, 56, 56}, {16, 64, 56, 56}, {64, 64, 56, 56},
  347. {1, 64, 56, 55}, {16, 64, 56, 55}, {64, 64, 56, 55},
  348. };
  349. Param param;
  350. param.mode = param::RelayoutFormat::Mode::NCHW_NCHW64;
  351. run(shapes, param);
  352. }
  353. {
  354. TensorShapeArray shapes = {
  355. {64, 1, 56, 56, 64},
  356. {1, 32, 7, 7, 64},
  357. {16, 32, 7, 7, 64},
  358. {64, 32, 7, 7, 64},
  359. };
  360. Param param;
  361. param.mode = param::RelayoutFormat::Mode::NCHW64_NCHW;
  362. run(shapes, param);
  363. }
  364. }
  365. #endif
  366. TEST_F(CUDA, RELAYOUT_FORMAT_NCHW4) {
  367. Checker<RelayoutFormat> checker(handle_cuda());
  368. UniformIntRNG rng{-50, 50};
  369. param::RelayoutFormat param;
  370. param.mode = param::RelayoutFormat::Mode::NCHW_NCHW4_IC_SMALL;
  371. for (DType dtype :
  372. std::vector<DType>({dtype::QuantizedS8{0.1f}, dtype::Float32{}})) {
  373. checker.set_dtype(0, dtype).set_dtype(1, dtype).set_rng(0, &rng);
  374. checker.set_param(param).execs({{2, 4, 35, 36}, {}});
  375. checker.set_param(param).execs({{2, 3, 35, 36}, {}});
  376. checker.set_param(param).execs({{2, 1, 35, 36}, {}});
  377. param.mode = param::RelayoutFormat::Mode::
  378. NCHW_NCHW4_IC_SMALL_CONV_DENSE_WEIGHT;
  379. checker.set_param(param).execs({{4, 3, 3, 3}, {}});
  380. checker.set_param(param).execs({{4, 4, 3, 3}, {}});
  381. checker.set_param(param).execs({{1, 4, 3, 3}, {}});
  382. }
  383. }
  384. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台