You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

pooling.cpp 22 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546
  1. /**
  2. * \file dnn/test/arm_common/pooling.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "test/arm_common/fixture.h"
  12. #include "test/common/pooling.h"
  13. #include "test/common/checker.h"
  14. #include "test/common/benchmarker.h"
  15. #include "test/common/rng.h"
  16. namespace megdnn {
  17. namespace test {
  18. TEST_F(ARM_COMMON, POOLING)
  19. {
  20. using Param = param::Pooling;
  21. // clang-format off
  22. for (size_t ih: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  23. for (size_t iw: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  24. for (size_t p: {1, 2})
  25. {
  26. Param param;
  27. param.mode = Param::Mode::MAX;
  28. param.window_h = param.window_w = 3;
  29. param.stride_h = param.stride_w = 2;
  30. param.pad_h = param.pad_w = p;
  31. Checker<Pooling> checker(handle());
  32. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  33. param.mode = Param::Mode::AVERAGE;
  34. param.window_h = param.window_w = 3;
  35. param.stride_h = param.stride_w = 2;
  36. param.pad_h = param.pad_w = p;
  37. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  38. param.mode = Param::Mode::MAX;
  39. param.window_h = param.window_w = 4;
  40. param.stride_h = param.stride_w = 2;
  41. param.pad_h = param.pad_w = p;
  42. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  43. param.mode = Param::Mode::MAX;
  44. param.window_h = param.window_w = 5;
  45. param.stride_h = param.stride_w = 2;
  46. param.pad_h = param.pad_w = p;
  47. if (ih + p * 2 >= 5 && iw + p * 2 >= 5)
  48. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  49. }
  50. // clang-format on
  51. }
  52. TEST_F(ARM_COMMON, POOLING_INT8_W2x2_S2x2)
  53. {
  54. // clang-format off
  55. for (size_t ih: {2, 3, 7, 13, 52, 53, 54, 55})
  56. for (size_t iw: {2, 3, 6, 14, 53, 54, 55, 56})
  57. for (size_t ph: {0, 1})
  58. for (size_t pw: {0, 1})
  59. if (ih+2*ph >= 3 && iw+2*pw >= 3)
  60. {
  61. Checker<Pooling> checker(handle());
  62. checker.set_dtype(0, dtype::Int8());
  63. param::Pooling param;
  64. param.mode = param::Pooling::Mode::MAX;
  65. param.pad_h = ph;
  66. param.pad_w = pw;
  67. param.stride_h = param.stride_w = 2;
  68. param.window_h = param.window_w = 2;
  69. checker.set_param(param).exec(TensorShapeArray{{2, 3, ih, iw}, {}});
  70. }
  71. // clang-format on
  72. }
  73. TEST_F(ARM_COMMON, POOLING_INT8_W3x3_S2x2)
  74. {
  75. // clang-format off
  76. for (size_t ih: {2, 3, 7, 13, 52, 53, 54, 55})
  77. for (size_t iw: {2, 3, 6, 14, 53, 54, 55, 56})
  78. for (size_t ph: {0, 1, 2})
  79. for (size_t pw: {0, 1, 2})
  80. if (ih+2*ph >= 3 && iw+2*pw >= 3)
  81. {
  82. Checker<Pooling> checker(handle());
  83. checker.set_dtype(0, dtype::Int8());
  84. param::Pooling param;
  85. param.mode = param::Pooling::Mode::MAX;
  86. param.pad_h = ph;
  87. param.pad_w = pw;
  88. param.stride_h = param.stride_w = 2;
  89. param.window_h = param.window_w = 3;
  90. checker.set_param(param).exec(TensorShapeArray{{2, 3, ih, iw}, {}});
  91. }
  92. // clang-format on
  93. }
  94. #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  95. TEST_F(ARM_COMMON, POOLING_FP16) {
  96. Checker<Pooling> checker(handle());
  97. checker.set_dtype(0, dtype::Float16{})
  98. .set_dtype(1, dtype::Float16{})
  99. .set_epsilon(3e-3);
  100. using Param = param::Pooling;
  101. for (size_t ih : {2, 3, 5, 7, 11, 13, 17, 19, 23})
  102. for (size_t iw : {2, 3, 5, 7, 11, 13, 17, 19, 23})
  103. for (auto mode : {Param::Mode::AVERAGE, Param::Mode::MAX}) {
  104. for (size_t window : {2, 3}) {
  105. Param param;
  106. param.mode = mode;
  107. param.window_h = param.window_w = window;
  108. param.stride_h = param.stride_w = 1;
  109. param.pad_h = param.pad_w = window / 2;
  110. //! test for SH == 1 && SW == 1 && FH == FW (FH == 2 || FH
  111. //! == 3)
  112. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  113. //! test for SH = SW = 2 && FH = FW = 2
  114. param.stride_h = param.stride_w = 2;
  115. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  116. }
  117. }
  118. //! test for SH == 2 && SW == 2 && FH == FW == 3 max pooling
  119. for (size_t ih : {2, 3, 7, 13, 52, 53, 54, 55})
  120. for (size_t iw : {2, 3, 6, 14, 53, 54, 55, 56})
  121. for (size_t ph : {0, 1, 2})
  122. for (size_t pw : {0, 1, 2})
  123. if (ih + 2 * ph >= 3 && iw + 2 * pw >= 3) {
  124. param::Pooling param;
  125. param.mode = param::Pooling::Mode::MAX;
  126. param.pad_h = ph;
  127. param.pad_w = pw;
  128. param.stride_h = param.stride_w = 2;
  129. param.window_h = param.window_w = 3;
  130. checker.set_param(param).exec(
  131. TensorShapeArray{{2, 3, ih, iw}, {}});
  132. }
  133. //! test for SH == 2 && SW == 2 && FH = FW = 4 max pooling
  134. for (size_t ih :
  135. {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  136. for (size_t iw :
  137. {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  138. for (size_t p : {1, 2}) {
  139. Param param;
  140. param.mode = Param::Mode::MAX;
  141. param.window_h = param.window_w = 4;
  142. param.stride_h = param.stride_w = 2;
  143. param.pad_h = param.pad_w = p;
  144. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  145. }
  146. //! test for SH == 2 && SW == 2 && FH = FW = 5 max pooling
  147. for (size_t ih :
  148. {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  149. for (size_t iw :
  150. {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  151. for (size_t p : {1, 2}) {
  152. Param param;
  153. param.mode = Param::Mode::MAX;
  154. param.window_h = param.window_w = 5;
  155. param.stride_h = param.stride_w = 2;
  156. param.pad_h = param.pad_w = p;
  157. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  158. }
  159. }
  160. #endif
  161. TEST_F(ARM_COMMON, POOLING_QUANTIZED) {
  162. Checker<Pooling> checker(handle());
  163. UniformIntRNG rng1{INT8_MIN >> 1, INT8_MAX >> 1};
  164. UniformIntRNG rng2{0, UINT8_MAX >> 1};
  165. using Param = param::Pooling;
  166. for (auto type : std::vector<DType>{
  167. dtype::QuantizedS8(1.1f),
  168. dtype::Quantized8Asymm(1.1f, static_cast<uint8_t>(3))}) {
  169. if (type.enumv() == DTypeEnum::QuantizedS8) {
  170. checker.set_rng(0, &rng1);
  171. } else {
  172. megdnn_assert(type.enumv() == DTypeEnum::Quantized8Asymm);
  173. checker.set_rng(0, &rng2);
  174. }
  175. for (size_t ih : {2, 3, 5, 7, 11, 13, 17, 19, 23, 33, 49})
  176. for (size_t iw : {2, 3, 5, 7, 11, 13, 17, 19, 23, 33, 49})
  177. for (auto mode : {Param::Mode::AVERAGE, Param::Mode::MAX}) {
  178. for (size_t window : {2, 3}) {
  179. Param param;
  180. param.mode = mode;
  181. param.window_h = param.window_w = window;
  182. param.stride_h = param.stride_w = 1;
  183. param.pad_h = param.pad_w = window / 2;
  184. //! test for SH == 1 && SW == 1 && FH == FW (FH == 2 ||
  185. //! FH
  186. //! == 3)
  187. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  188. //! test for SH = SW = 2 && FH = FW = 2
  189. param.stride_h = param.stride_w = 2;
  190. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  191. }
  192. }
  193. //! test for SH == 2 && SW == 2 && FH == FW == 3 max pooling
  194. for (size_t ih : {2, 3, 7, 13, 52, 53, 54, 55})
  195. for (size_t iw : {2, 3, 6, 14, 53, 54, 55, 56})
  196. for (size_t ph : {0, 1, 2})
  197. for (size_t pw : {0, 1, 2})
  198. if (ih + 2 * ph >= 3 && iw + 2 * pw >= 3) {
  199. param::Pooling param;
  200. param.mode = param::Pooling::Mode::MAX;
  201. param.pad_h = ph;
  202. param.pad_w = pw;
  203. param.window_h = param.window_w = 3;
  204. param.stride_h = param.stride_w = 2;
  205. checker.set_param(param).exec(
  206. TensorShapeArray{{2, 3, ih, iw}, {}});
  207. }
  208. //! test for SH == 2 && SW == 2 && FH == FW == 4 max pooling
  209. for (size_t ih :
  210. {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  211. for (size_t iw :
  212. {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  213. for (size_t p : {1, 2}) {
  214. Param param;
  215. param.mode = Param::Mode::MAX;
  216. param.window_h = param.window_w = 4;
  217. param.stride_h = param.stride_w = 2;
  218. param.pad_h = param.pad_w = p;
  219. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  220. }
  221. //! test for SH == 2 && SW == 2 && FH == FW == 5 max pooling
  222. for (size_t ih :
  223. {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  224. for (size_t iw :
  225. {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  226. for (size_t p : {1, 2}) {
  227. Param param;
  228. param.mode = Param::Mode::MAX;
  229. param.window_h = param.window_w = 5;
  230. param.stride_h = param.stride_w = 2;
  231. param.pad_h = param.pad_w = p;
  232. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  233. }
  234. }
  235. }
  236. #if MEGDNN_WITH_BENCHMARK
  237. void benchmark_nchw44_fp32(Handle *handle) {
  238. using Param = param::Pooling;
  239. auto run = [&](size_t n, size_t c, size_t h, size_t w, size_t filter,
  240. size_t stride, size_t pad, Param::Mode mode) {
  241. Param param;
  242. param.window_h = param.window_w = filter;
  243. param.stride_h = param.stride_w = stride;
  244. param.pad_h = param.pad_w = pad;
  245. param.format = Param::Format::NCHW;
  246. param.mode = mode;
  247. TensorShape nchw_shape = {n, c, h, w};
  248. TensorShape nchw44_shape = {n, c / 4, h, w, 4};
  249. TensorLayout dst_layout;
  250. auto opr = handle->create_operator<Pooling>();
  251. opr->param() = param;
  252. opr->deduce_layout({nchw_shape, dtype::Float32()}, dst_layout);
  253. float calc_amount =
  254. dst_layout.total_nr_elems() * param.window_h * param.window_w;
  255. Benchmarker<Pooling> benchmarker_float_nchw(handle);
  256. Benchmarker<Pooling> benchmarker_float_nchw44(handle);
  257. Benchmarker<Pooling> benchmarker_int_nchw44(handle);
  258. size_t RUN = 500;
  259. auto t1 = benchmarker_float_nchw.set_display(false)
  260. .set_times(RUN)
  261. .set_param(param)
  262. .exec({nchw_shape, {}});
  263. param.format = Param::Format::NCHW44;
  264. auto t2 = benchmarker_int_nchw44.set_display(false)
  265. .set_times(RUN)
  266. .set_param(param)
  267. .execl({{nchw44_shape, dtype::QuantizedS8(1.0)},
  268. {{}, dtype::QuantizedS8(1.0)}});
  269. auto t3 = benchmarker_float_nchw44.set_display(false)
  270. .set_times(RUN)
  271. .set_param(param)
  272. .exec({nchw44_shape, {}});
  273. printf("{%zu %zu %zu %zu} filter = %zu, stride = %zu pad = %zu\n"
  274. "nchw_fp32={%.3f ms, %.3f Mflops}, "
  275. "nchw44_int={%.3f ms, %.3f Mflops}, "
  276. "nchw44_fp32={%.3f ms, %.3f Mflops, speed_up %f}\n\n",
  277. n, c, h, w, filter, stride, pad, t1 / RUN,
  278. calc_amount / (t1 / RUN * 1000), t2 / RUN,
  279. calc_amount / (t2 / RUN * 1000), t3 / RUN,
  280. calc_amount / (t3 / RUN * 1000), t1 / t3);
  281. };
  282. // Resnet50
  283. run(1, 64, 112, 112, 3, 2, 1, param::Pooling::Mode::MAX);
  284. run(1, 2048, 7, 7, 7, 1, 0, param::Pooling::Mode::AVERAGE);
  285. // VGG16
  286. run(1, 64, 224, 224, 2, 2, 0, param::Pooling::Mode::MAX);
  287. run(1, 128, 112, 112, 2, 2, 0, param::Pooling::Mode::MAX);
  288. run(1, 256, 56, 56, 2, 2, 0, param::Pooling::Mode::MAX);
  289. run(1, 512, 28, 28, 2, 2, 0, param::Pooling::Mode::MAX);
  290. run(1, 512, 14, 14, 2, 2, 0, param::Pooling::Mode::MAX);
  291. }
  292. TEST_F(ARM_COMMON, BENCHMARK_POOLING_NCHW44_FP32) { benchmark_nchw44_fp32(handle()); }
  293. TEST_F(ARM_COMMON_MULTI_THREADS, BENCHMARK_POOLING_NCHW44_FP32) {
  294. benchmark_nchw44_fp32(handle());
  295. }
  296. TEST_F(ARM_COMMON, BENCHMARK_POOLING_INT8_W3x3_S2x2)
  297. {
  298. using Param = param::Pooling;
  299. auto run = [&](const TensorShapeArray &shapes,
  300. Param param) {
  301. auto handle_naive = create_cpu_handle(2);
  302. TensorLayoutArray layouts;
  303. layouts.emplace_back(shapes[0], dtype::Int8());
  304. layouts.emplace_back(shapes[1], dtype::Int8());
  305. Benchmarker<Pooling> benchmarker_naive(handle_naive.get());
  306. Benchmarker<Pooling> benchmarker_float(handle());
  307. Benchmarker<Pooling> benchmarker_int(handle());
  308. size_t RUN = 10;
  309. auto t1 = benchmarker_naive.set_display(false).set_times(RUN).
  310. set_param(param).exec(shapes);
  311. auto t2 = benchmarker_float.set_display(false).set_times(RUN).
  312. set_param(param).exec(shapes);
  313. auto t3 = benchmarker_int.set_display(false).set_times(RUN).
  314. set_param(param).execl(layouts);
  315. printf("naive=%.3fms float=%.3fms, int=%.3fms\n",
  316. t1 / RUN, t2 / RUN, t3 / RUN);
  317. auto speedup = t2/t3;
  318. ASSERT_GE(speedup, 2.0);
  319. };
  320. Param param;
  321. param.window_h = param.window_w = 3;
  322. param.stride_h = param.stride_w = 2;
  323. param.pad_h = param.pad_w = 1;
  324. std::cout << "3x3 with 2x2 stride max pooling:" << std::endl;
  325. run({{1, 3, 640, 480}, {}}, param);
  326. }
  327. TEST_F(ARM_COMMON, BENCHMARK_POOLING_W4x4_S2x2)
  328. {
  329. using Param = param::Pooling;
  330. auto run = [&](const TensorShapeArray &shapes,
  331. Param param) {
  332. std::cout << "N:" << shapes[0][0] << " "
  333. << "IC:" << shapes[0][1] << " "
  334. << "IH:" << shapes[0][2] << " "
  335. << "IW:" << shapes[0][3] << std::endl;
  336. auto handle_naive = create_cpu_handle(2);
  337. Benchmarker<Pooling> benchmarker_naive(handle_naive.get());
  338. Benchmarker<Pooling> benchmarker_float(handle());
  339. size_t RUN = 10;
  340. auto t1 = benchmarker_naive.set_display(false).set_times(RUN).
  341. set_param(param).exec(shapes);
  342. auto t2 = benchmarker_float.set_display(false).set_times(RUN).
  343. set_param(param).exec(shapes);
  344. TensorLayout dst_layout;
  345. auto opr = handle()->create_operator<Pooling>();
  346. opr->param() = param;
  347. opr->deduce_layout({shapes[0], dtype::Float32()}, dst_layout);
  348. float calc_amount = dst_layout.total_nr_elems() *
  349. param.window_h * param.window_w;
  350. printf("naive={%.3fms, %.3fMflops}, neon={%.3fms, %.3fMflops}\n",
  351. t1 / RUN, calc_amount / (t1 / RUN * 1000),
  352. t2 / RUN, calc_amount / (t2 / RUN * 1000));
  353. };
  354. Param param;
  355. param.window_h = param.window_w = 4;
  356. param.stride_h = param.stride_w = 2;
  357. param.pad_h = param.pad_w = 1;
  358. std::cout << "4x4 with 2x2 stride max pooling:" << std::endl;
  359. run({{1, 24, 160, 128}, {}}, param);
  360. run({{1, 4, 240, 135}, {}}, param);
  361. run({{1, 32, 120, 67}, {}}, param);
  362. run({{1, 64, 60, 33}, {}}, param);
  363. }
  364. TEST_F(ARM_COMMON, BENCHMARK_POOLING_W5x5_S2x2)
  365. {
  366. using Param = param::Pooling;
  367. auto run = [&](const TensorShapeArray &shapes,
  368. Param param) {
  369. std::cout << "N:" << shapes[0][0] << " "
  370. << "IC:" << shapes[0][1] << " "
  371. << "IH:" << shapes[0][2] << " "
  372. << "IW:" << shapes[0][3] << std::endl;
  373. auto handle_naive = create_cpu_handle(2);
  374. Benchmarker<Pooling> benchmarker_naive(handle_naive.get());
  375. Benchmarker<Pooling> benchmarker_float(handle());
  376. size_t RUN = 10;
  377. auto t1 = benchmarker_naive.set_display(false).set_times(RUN).
  378. set_param(param).exec(shapes);
  379. auto t2 = benchmarker_float.set_display(false).set_times(RUN).
  380. set_param(param).exec(shapes);
  381. TensorLayout dst_layout;
  382. auto opr = handle()->create_operator<Pooling>();
  383. opr->param() = param;
  384. opr->deduce_layout({shapes[0], dtype::Float32()}, dst_layout);
  385. float calc_amount = dst_layout.total_nr_elems() *
  386. param.window_h * param.window_w;
  387. printf("naive={%.3fms, %.3fMflops}, neon={%.3fms, %.3fMflops}\n",
  388. t1 / RUN, calc_amount / (t1 / RUN * 1000),
  389. t2 / RUN, calc_amount / (t2 / RUN * 1000));
  390. };
  391. Param param;
  392. param.window_h = param.window_w = 5;
  393. param.stride_h = param.stride_w = 2;
  394. param.pad_h = param.pad_w = 1;
  395. std::cout << "5x5 with 2x2 stride max pooling:" << std::endl;
  396. run({{1, 24, 160, 128}, {}}, param);
  397. run({{1, 4, 240, 135}, {}}, param);
  398. run({{1, 32, 120, 67}, {}}, param);
  399. run({{1, 64, 60, 33}, {}}, param);
  400. }
  401. TEST_F(ARM_COMMON, BENCHMARK_POOLING_FP16) {
  402. using Param = param::Pooling;
  403. auto run = [&](const TensorShapeArray& shapes, Param param) {
  404. TensorLayoutArray layouts;
  405. layouts.emplace_back(shapes[0], dtype::Float16());
  406. layouts.emplace_back(shapes[1], dtype::Float16());
  407. Benchmarker<Pooling> benchmarker_float(handle());
  408. Benchmarker<Pooling> benchmarker_half(handle());
  409. size_t RUN = 10;
  410. auto tf = benchmarker_float.set_display(false)
  411. .set_times(RUN)
  412. .set_param(param)
  413. .exec(shapes) /
  414. RUN;
  415. auto th = benchmarker_half.set_display(false)
  416. .set_times(RUN)
  417. .set_param(param)
  418. .execl(layouts) /
  419. RUN;
  420. TensorLayout dst_layout;
  421. auto opr = handle()->create_operator<Pooling>();
  422. opr->param() = param;
  423. opr->deduce_layout({shapes[0], dtype::Float32()}, dst_layout);
  424. float computations = dst_layout.total_nr_elems() * param.window_h *
  425. param.window_w / (1024.f * 1024 * 1024);
  426. printf("float=%.3fms %f gflops, float16=%.3fms %f gflops speedup: %f\n",
  427. tf, computations / tf * 1e3, th, computations / th * 1e3,
  428. tf / th);
  429. };
  430. Param param;
  431. param.window_h = param.window_w = 2;
  432. param.stride_h = param.stride_w = 1;
  433. param.pad_h = param.pad_w = 1;
  434. printf("2x2 with 1x1 stride max pooling:\n");
  435. run({{1, 3, 640, 480}, {}}, param);
  436. for (size_t oh : {640, 128})
  437. for (size_t ow : {480, 112}) {
  438. param.window_h = param.window_w = 3;
  439. param.stride_h = param.stride_w = 2;
  440. param.pad_h = param.pad_w = 1;
  441. param.mode = Param::Mode::AVERAGE;
  442. printf("3x3 with 2x2 stride average pooling.\n");
  443. run({{1, 3, oh, ow}, {}}, param);
  444. for (size_t pw : {2, 3, 4, 5}) {
  445. param.window_h = param.window_w = pw;
  446. param.stride_h = param.stride_w = 2;
  447. param.pad_h = param.pad_w = 1;
  448. param.mode = Param::Mode::MAX;
  449. printf("%zux%zu with 2x2 stride max pooling:\n", pw, pw);
  450. run({{1, 3, oh, ow}, {}}, param);
  451. }
  452. }
  453. }
  454. TEST_F(ARM_COMMON, BENCHMARK_POOLING_QUANTIZED) {
  455. using Param = param::Pooling;
  456. auto run = [&](const TensorShapeArray& shapes, Param param) {
  457. auto handle_naive = create_cpu_handle(2);
  458. TensorLayoutArray layouts;
  459. layouts.emplace_back(shapes[0], dtype::QuantizedS8(1.1f));
  460. layouts.emplace_back(shapes[1], dtype::QuantizedS8(1.1f));
  461. Benchmarker<Pooling> benchmarker_int(handle());
  462. Benchmarker<Pooling> benchmarker_naive(handle_naive.get());
  463. size_t RUN = 10;
  464. auto time_int = benchmarker_int.set_display(false)
  465. .set_times(RUN)
  466. .set_param(param)
  467. .exec(shapes) /
  468. RUN;
  469. auto time_naive = benchmarker_naive.set_display(false)
  470. .set_times(RUN)
  471. .set_param(param)
  472. .execl(layouts) /
  473. RUN;
  474. TensorLayout dst_layout;
  475. auto opr = handle()->create_operator<Pooling>();
  476. opr->param() = param;
  477. opr->deduce_layout({shapes[0], dtype::QuantizedS8(1.1f)}, dst_layout);
  478. float computations = dst_layout.total_nr_elems() * param.window_h *
  479. param.window_w / (1024.f * 1024 * 1024);
  480. printf("naive=%.3fms %f gflops, int8=%.3fms %f gflops speedup: %f\n",
  481. time_naive, computations / time_naive * 1e3, time_int,
  482. computations / time_int * 1e3, time_naive / time_int);
  483. };
  484. Param param;
  485. param.window_h = param.window_w = 2;
  486. param.stride_h = param.stride_w = 1;
  487. param.pad_h = param.pad_w = 1;
  488. printf("2x2 with 1x1 stride max pooling:\n");
  489. run({{1, 3, 640, 480}, {}}, param);
  490. // clang-format off
  491. for (size_t oh : {640, 128})
  492. for (size_t ow : {480, 112})
  493. for (size_t pw : {2, 3, 4, 5}) {
  494. param.window_h = param.window_w = pw;
  495. param.stride_h = param.stride_w = 2;
  496. param.pad_h = param.pad_w = 1;
  497. printf("%zux%zu with 2x2 stride max pooling:\n", pw, pw);
  498. run({{1, 3, oh, ow}, {}}, param);
  499. }
  500. // clang-format on
  501. }
  502. #endif
  503. } // namespace test
  504. } // namespace megdnn
  505. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台