You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

relayout_format.cpp 12 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309
  1. /**
  2. * \file dnn/test/cuda/relayout_format.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
  10. * implied.
  11. */
  12. #include "megdnn/dtype.h"
  13. #include "megdnn/oprs.h"
  14. #include "test/common/benchmarker.h"
  15. #include "test/common/checker.h"
  16. #include "test/common/rng.h"
  17. #include "test/cuda/fixture.h"
  18. using namespace megdnn;
  19. using namespace test;
  20. TEST_F(CUDA, RELAYOUT_FORMAT) {
  21. Checker<RelayoutFormat> checker(handle_cuda());
  22. UniformIntRNG rng{-50, 50};
  23. param::RelayoutFormat param;
  24. param.mode = param::RelayoutFormat::Mode::NCHW4_CHWN4;
  25. checker.set_dtype(0, dtype::QuantizedS8{0.1f})
  26. .set_dtype(1, dtype::QuantizedS8{0.1f})
  27. .set_rng(0, &rng)
  28. .set_param(param)
  29. .execs({{22, 23, 24, 25, 4}, {}});
  30. param.mode = param::RelayoutFormat::Mode::CHWN4_NCHW4;
  31. checker.execs({{22, 23, 24, 25, 4}, {}});
  32. }
  33. TEST_F(CUDA, RELAYOUT_FORMAT_NCHW4_NCHW) {
  34. Checker<RelayoutFormat> checker(handle_cuda());
  35. UniformIntRNG rng{-50, 50};
  36. param::RelayoutFormat param;
  37. param.mode = param::RelayoutFormat::Mode::NCHW4_NCHW;
  38. checker.set_dtype(0, dtype::QuantizedS8{0.1f})
  39. .set_dtype(1, dtype::QuantizedS8{0.1f})
  40. .set_rng(0, &rng)
  41. .set_param(param)
  42. .execs({{1, 1, 2, 2, 4}, {}});
  43. checker.set_dtype(0, dtype::QuantizedS8{0.1f})
  44. .set_dtype(1, dtype::QuantizedS8{0.1f})
  45. .set_rng(0, &rng)
  46. .set_param(param)
  47. .execs({{22, 23, 24, 25, 4}, {}});
  48. param.oc = 90;
  49. checker.set_dtype(0, dtype::QuantizedS8{0.1f})
  50. .set_dtype(1, dtype::QuantizedS8{0.1f})
  51. .set_rng(0, &rng)
  52. .set_param(param)
  53. .execs({{22, 23, 24, 25, 4}, {}});
  54. param.oc = 16;
  55. param.group = 8;
  56. checker.set_dtype(0, dtype::QuantizedS8{0.1f})
  57. .set_dtype(1, dtype::QuantizedS8{0.1f})
  58. .set_rng(0, &rng)
  59. .set_param(param)
  60. .execs({{11, 16, 22, 33, 4}, {}});
  61. }
  62. TEST_F(CUDA, RELAYOUT_FORMAT_NCHW_NCHW4) {
  63. Checker<RelayoutFormat> checker(handle_cuda());
  64. UniformIntRNG rng{-50, 50};
  65. param::RelayoutFormat param;
  66. param.mode = param::RelayoutFormat::Mode::NCHW_NCHW4;
  67. for (size_t n : {1, 3}) {
  68. for (size_t c : {1, 2, 3, 4, 8, 9, 11, 16}) {
  69. for (size_t h : {3, 7, 12, 16, 22, 59, 83}) {
  70. for (size_t w : {3, 22, 63, 128, 256}) {
  71. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  72. .set_dtype(1, dtype::QuantizedS8{1.f})
  73. .set_rng(0, &rng)
  74. .set_param(param)
  75. .execs({{n, c, h, w}, {}});
  76. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  77. .set_dtype(1, dtype::QuantizedS8{2.f})
  78. .set_rng(0, &rng)
  79. .set_param(param)
  80. .execs({{n, c, h, w}, {}});
  81. checker.set_dtype(0, dtype::QuantizedS32{1.f})
  82. .set_dtype(1, dtype::QuantizedS32{1.f})
  83. .set_rng(0, &rng)
  84. .set_param(param)
  85. .execs({{n, c, h, w}, {}});
  86. }
  87. }
  88. }
  89. }
  90. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  91. .set_dtype(1, dtype::QuantizedS8{1.f})
  92. .set_rng(0, &rng)
  93. .set_param(param)
  94. .execs({{8, 3, 224, 224}, {}});
  95. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  96. .set_dtype(1, dtype::QuantizedS8{1.f})
  97. .set_rng(0, &rng)
  98. .set_param(param)
  99. .execs({{8, 3, 600, 600}, {}});
  100. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  101. .set_dtype(1, dtype::QuantizedS8{1.f})
  102. .set_rng(0, &rng)
  103. .set_param(param)
  104. .execs({{1, 6, 768, 1280}, {}});
  105. param.group = 2;
  106. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  107. .set_dtype(1, dtype::QuantizedS8{1.f})
  108. .set_rng(0, &rng)
  109. .set_param(param)
  110. .execs({{8, 6, 300, 300}, {}});
  111. param.group = 3;
  112. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  113. .set_dtype(1, dtype::QuantizedS8{1.f})
  114. .set_rng(0, &rng)
  115. .set_param(param)
  116. .execs({{8, 6, 300, 300}, {}});
  117. }
  118. TEST_F(CUDA, RELAYOUT_FORMAT_NCHW_NCHW4_WEIGHT) {
  119. Checker<RelayoutFormat> checker(handle_cuda());
  120. UniformIntRNG rng{-50, 50};
  121. param::RelayoutFormat param;
  122. param.mode = param::RelayoutFormat::Mode::NCHW_NCHW4_WEIGHT;
  123. for (size_t oc : {1, 3, 4, 16, 33}) {
  124. for (size_t ic : {1, 2, 3, 4, 8, 9, 11, 16, 33}) {
  125. for (size_t h : {3, 5, 7}) {
  126. for (size_t w : {3, 5, 7}) {
  127. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  128. .set_dtype(1, dtype::QuantizedS8{1.f})
  129. .set_rng(0, &rng)
  130. .set_param(param)
  131. .execs({{oc, ic, h, w}, {}});
  132. }
  133. }
  134. }
  135. }
  136. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  137. .set_dtype(1, dtype::QuantizedS8{1.f})
  138. .set_rng(0, &rng)
  139. .set_param(param)
  140. .execs({{13, 13, 5, 5}, {}});
  141. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  142. .set_dtype(1, dtype::QuantizedS8{1.f})
  143. .set_rng(0, &rng)
  144. .set_param(param)
  145. .execs({{4, 16, 16, 3, 3}, {}});
  146. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  147. .set_dtype(1, dtype::QuantizedS8{1.f})
  148. .set_rng(0, &rng)
  149. .set_param(param)
  150. .execs({{4, 13, 11, 3, 3}, {}});
  151. }
  152. TEST_F(CUDA, RELAYOUT_FORMAT_NCHW_NCHW4_DEFAULT) {
  153. Checker<RelayoutFormat> checker(handle_cuda());
  154. UniformIntRNG rng{0, 50};
  155. param::RelayoutFormat param;
  156. param.mode = param::RelayoutFormat::Mode::NCHW_NCHW4;
  157. for (size_t n : {1, 3}) {
  158. for (size_t c : {1, 2, 3, 4, 8, 9, 11, 16}) {
  159. for (size_t h : {3, 7, 12, 16, 59, 83}) {
  160. for (size_t w : {3, 63, 128, 256}) {
  161. checker.set_dtype(0, dtype::Quantized8Asymm{1.f, 128})
  162. .set_dtype(1, dtype::QuantizedS8{1.f})
  163. .set_rng(0, &rng)
  164. .set_param(param)
  165. .execs({{n, c, h, w}, {}});
  166. }
  167. }
  168. }
  169. }
  170. }
  171. TEST_F(CUDA, RELAYOUT_FORMAT_NCHW_NCHW4_U8) {
  172. Checker<RelayoutFormat> checker(handle_cuda());
  173. UniformIntRNG rng{0, 255};
  174. param::RelayoutFormat param;
  175. param.mode = param::RelayoutFormat::Mode::NCHW_NCHW4;
  176. for (size_t n : {1, 3}) {
  177. for (size_t c : {1, 2, 3, 4, 8, 9, 11, 16}) {
  178. for (size_t h : {3, 7, 12, 16, 59, 83}) {
  179. for (size_t w : {3, 13, 3 * 4, 63 * 4, 128 * 4, 256 * 4}) {
  180. checker.set_dtype(0, dtype::Uint8())
  181. .set_dtype(1, dtype::QuantizedS8{1.f})
  182. .set_rng(0, &rng)
  183. .set_param(param)
  184. .execs({{n, c, h, w}, {}});
  185. checker.set_dtype(0, dtype::Quantized8Asymm{1.f, 128})
  186. .set_dtype(1, dtype::QuantizedS8{1.f})
  187. .set_rng(0, &rng)
  188. .set_param(param)
  189. .execs({{n, c, h, w}, {}});
  190. checker.set_dtype(0, dtype::Uint8())
  191. .set_dtype(1, dtype::QuantizedS8{2.5f})
  192. .set_rng(0, &rng)
  193. .set_param(param)
  194. .execs({{n, c, h, w}, {}});
  195. }
  196. }
  197. }
  198. }
  199. }
  200. TEST_F(CUDA, RELAYOUT_FORMAT_NCHW_NCHW4_IC_SMALL) {
  201. Checker<RelayoutFormat> checker(handle_cuda());
  202. UniformIntRNG rng{0, 50};
  203. param::RelayoutFormat param;
  204. param.mode = param::RelayoutFormat::Mode::NCHW_NCHW4_IC_SMALL;
  205. checker.set_dtype(0, dtype::QuantizedS8{1.f})
  206. .set_dtype(1, dtype::QuantizedS8{1.f})
  207. .set_rng(0, &rng)
  208. .set_param(param)
  209. .execs({{8, 3, 768, 1280}, {}});
  210. }
  211. #if MEGDNN_WITH_BENCHMARK
  212. TEST_F(CUDA, BENCHMARK_RELAYOUT_FORMAT) {
  213. using Param = RelayoutFormat::Param;
  214. auto run = [&](const TensorShapeArray& shapes, Param param,
  215. Param default_param) {
  216. Benchmarker<RelayoutFormat> benchmarker(handle_cuda());
  217. benchmarker.set_param(param);
  218. benchmarker.set_dtype(0, dtype::QuantizedS8{1.f})
  219. .set_dtype(1, dtype::QuantizedS8{1.f});
  220. Benchmarker<RelayoutFormat> benchmarker_default(handle_cuda());
  221. benchmarker_default.set_param(default_param);
  222. benchmarker_default.set_dtype(0, dtype::QuantizedS8{1.f})
  223. .set_dtype(1, dtype::QuantizedS8{1.f});
  224. for (auto&& shape : shapes) {
  225. double memaccess = (double(shape.total_nr_elems()) +
  226. double(shape[0]) * ((shape[1] + 3) / 4 * 4) *
  227. shape[2] * shape[3]) *
  228. 1e-6;
  229. auto time_ms = benchmarker.execs({shape, {}});
  230. if (shape[1] <= 4) {
  231. auto time_default_ms = benchmarker_default.execs({shape, {}});
  232. printf("execute %s, time %.4f ms, %.4f GB/s, default %.4f "
  233. "GB/s\n",
  234. shape.to_string().c_str(), time_ms, memaccess / time_ms,
  235. memaccess / time_default_ms);
  236. } else {
  237. printf("execute %s, time %.4f ms, %.4f GB/s\n",
  238. shape.to_string().c_str(), time_ms, memaccess / time_ms);
  239. }
  240. }
  241. };
  242. TensorShapeArray shapes = {
  243. {8, 1, 768, 1280}, {8, 3, 768, 1280}, {8, 3, 224, 224},
  244. {8, 4, 768, 1280}, {64, 3, 768, 1280},
  245. };
  246. {
  247. Param param;
  248. param.mode = param::RelayoutFormat::Mode::NCHW_NCHW4;
  249. Param default_param;
  250. default_param.mode = param::RelayoutFormat::Mode::NCHW_NCHW4_IC_SMALL;
  251. run(shapes, param, default_param);
  252. }
  253. }
  254. #endif
  255. TEST_F(CUDA, RELAYOUT_FORMAT_NCHW4) {
  256. Checker<RelayoutFormat> checker(handle_cuda());
  257. UniformIntRNG rng{-50, 50};
  258. param::RelayoutFormat param;
  259. param.mode = param::RelayoutFormat::Mode::NCHW_NCHW4_IC_SMALL;
  260. for (DType dtype :
  261. std::vector<DType>({dtype::QuantizedS8{0.1f}, dtype::Float32{}})) {
  262. checker.set_dtype(0, dtype).set_dtype(1, dtype).set_rng(0, &rng);
  263. checker.set_param(param).execs({{2, 4, 35, 36}, {}});
  264. checker.set_param(param).execs({{2, 3, 35, 36}, {}});
  265. checker.set_param(param).execs({{2, 1, 35, 36}, {}});
  266. param.mode = param::RelayoutFormat::Mode::
  267. NCHW_NCHW4_IC_SMALL_CONV_DENSE_WEIGHT;
  268. checker.set_param(param).execs({{4, 3, 3, 3}, {}});
  269. checker.set_param(param).execs({{4, 4, 3, 3}, {}});
  270. checker.set_param(param).execs({{1, 4, 3, 3}, {}});
  271. }
  272. }
  273. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台