You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

pooling.cpp 20 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507
  1. /**
  2. * \file dnn/test/arm_common/pooling.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "megdnn/dtype.h"
  12. #include "megdnn/opr_param_defs.h"
  13. #include "test/arm_common/fixture.h"
  14. #include "test/common/pooling.h"
  15. #include "test/common/checker.h"
  16. #include "test/common/benchmarker.h"
  17. #include "test/common/rng.h"
  18. namespace megdnn {
  19. namespace test {
  20. TEST_F(ARM_COMMON, POOLING)
  21. {
  22. using Param = param::Pooling;
  23. // clang-format off
  24. for (size_t ih: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  25. for (size_t iw: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  26. for (size_t p: {1, 2})
  27. {
  28. Param param;
  29. param.mode = Param::Mode::MAX;
  30. param.window_h = param.window_w = 3;
  31. param.stride_h = param.stride_w = 2;
  32. param.pad_h = param.pad_w = p;
  33. Checker<Pooling> checker(handle());
  34. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  35. param.mode = Param::Mode::AVERAGE;
  36. param.window_h = param.window_w = 3;
  37. param.stride_h = param.stride_w = 2;
  38. param.pad_h = param.pad_w = p;
  39. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  40. param.mode = Param::Mode::MAX;
  41. param.window_h = param.window_w = 4;
  42. param.stride_h = param.stride_w = 2;
  43. param.pad_h = param.pad_w = p;
  44. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  45. param.mode = Param::Mode::MAX;
  46. param.window_h = param.window_w = 5;
  47. param.stride_h = param.stride_w = 2;
  48. param.pad_h = param.pad_w = p;
  49. if (ih + p * 2 >= 5 && iw + p * 2 >= 5)
  50. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  51. }
  52. // clang-format on
  53. }
  54. TEST_F(ARM_COMMON, POOLING_INT8_W2x2_S2x2)
  55. {
  56. // clang-format off
  57. for (size_t ih: {2, 3, 7, 13, 52, 53, 54, 55})
  58. for (size_t iw: {2, 3, 6, 14, 53, 54, 55, 56})
  59. for (size_t ph: {0, 1})
  60. for (size_t pw: {0, 1})
  61. if (ih+2*ph >= 3 && iw+2*pw >= 3)
  62. {
  63. Checker<Pooling> checker(handle());
  64. checker.set_dtype(0, dtype::Int8());
  65. param::Pooling param;
  66. param.mode = param::Pooling::Mode::MAX;
  67. param.pad_h = ph;
  68. param.pad_w = pw;
  69. param.stride_h = param.stride_w = 2;
  70. param.window_h = param.window_w = 2;
  71. checker.set_param(param).exec(TensorShapeArray{{2, 3, ih, iw}, {}});
  72. }
  73. // clang-format on
  74. }
  75. TEST_F(ARM_COMMON, POOLING_INT8_W3x3_S2x2)
  76. {
  77. // clang-format off
  78. for (size_t ih: {2, 3, 7, 13, 52, 53, 54, 55})
  79. for (size_t iw: {2, 3, 6, 14, 53, 54, 55, 56})
  80. for (size_t ph: {0, 1, 2})
  81. for (size_t pw: {0, 1, 2})
  82. if (ih+2*ph >= 3 && iw+2*pw >= 3)
  83. {
  84. Checker<Pooling> checker(handle());
  85. checker.set_dtype(0, dtype::Int8());
  86. param::Pooling param;
  87. param.mode = param::Pooling::Mode::MAX;
  88. param.pad_h = ph;
  89. param.pad_w = pw;
  90. param.stride_h = param.stride_w = 2;
  91. param.window_h = param.window_w = 3;
  92. checker.set_param(param).exec(TensorShapeArray{{2, 3, ih, iw}, {}});
  93. }
  94. // clang-format on
  95. }
  96. TEST_F(ARM_COMMON, POOLING_MAX_W3x3_S2x2_NCHW44)
  97. {
  98. // clang-format off
  99. for (size_t ih: {3, 5, 10})
  100. for (size_t iw: {3, 5, 7, 9, 15, 20})
  101. for (size_t ph: {0})
  102. for (size_t pw: {0})
  103. if (ih+2*ph >= 3 && iw+2*pw >= 3)
  104. {
  105. UniformIntRNG rng{INT8_MIN >> 1, INT8_MAX >> 1};
  106. Checker<Pooling> checker(handle());
  107. checker.set_dtype(0, dtype::QuantizedS8(1.1f));
  108. checker.set_rng(0,&rng);
  109. param::Pooling param;
  110. param.mode = param::Pooling::Mode::MAX;
  111. param.format = param::Pooling::Format::NCHW44;
  112. param.pad_h = ph;
  113. param.pad_w = pw;
  114. param.stride_h = param.stride_w = 2;
  115. param.window_h = param.window_w = 3;
  116. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  117. }
  118. // clang-format on
  119. }
  120. #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  121. TEST_F(ARM_COMMON, POOLING_FP16) {
  122. Checker<Pooling> checker(handle());
  123. checker.set_dtype(0, dtype::Float16{})
  124. .set_dtype(1, dtype::Float16{})
  125. .set_epsilon(3e-3);
  126. using Param = param::Pooling;
  127. for (size_t ih : {2, 3, 5, 7, 11, 13, 17, 19, 23})
  128. for (size_t iw : {2, 3, 5, 7, 11, 13, 17, 19, 23})
  129. for (auto mode : {Param::Mode::AVERAGE, Param::Mode::MAX}) {
  130. for (size_t window : {2, 3}) {
  131. Param param;
  132. param.mode = mode;
  133. param.window_h = param.window_w = window;
  134. param.stride_h = param.stride_w = 1;
  135. param.pad_h = param.pad_w = window / 2;
  136. //! test for SH == 1 && SW == 1 && FH == FW (FH == 2 || FH
  137. //! == 3)
  138. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  139. //! test for SH = SW = 2 && FH = FW = 2
  140. param.stride_h = param.stride_w = 2;
  141. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  142. }
  143. }
  144. //! test for SH == 2 && SW == 2 && FH == FW == 3 max pooling
  145. for (size_t ih : {2, 3, 7, 13, 52, 53, 54, 55})
  146. for (size_t iw : {2, 3, 6, 14, 53, 54, 55, 56})
  147. for (size_t ph : {0, 1, 2})
  148. for (size_t pw : {0, 1, 2})
  149. if (ih + 2 * ph >= 3 && iw + 2 * pw >= 3) {
  150. param::Pooling param;
  151. param.mode = param::Pooling::Mode::MAX;
  152. param.pad_h = ph;
  153. param.pad_w = pw;
  154. param.stride_h = param.stride_w = 2;
  155. param.window_h = param.window_w = 3;
  156. checker.set_param(param).exec(
  157. TensorShapeArray{{2, 3, ih, iw}, {}});
  158. }
  159. //! test for SH == 2 && SW == 2 && FH = FW = 4 max pooling
  160. for (size_t ih :
  161. {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  162. for (size_t iw :
  163. {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  164. for (size_t p : {1, 2}) {
  165. Param param;
  166. param.mode = Param::Mode::MAX;
  167. param.window_h = param.window_w = 4;
  168. param.stride_h = param.stride_w = 2;
  169. param.pad_h = param.pad_w = p;
  170. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  171. }
  172. //! test for SH == 2 && SW == 2 && FH = FW = 5 max pooling
  173. for (size_t ih :
  174. {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  175. for (size_t iw :
  176. {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  177. for (size_t p : {1, 2}) {
  178. Param param;
  179. param.mode = Param::Mode::MAX;
  180. param.window_h = param.window_w = 5;
  181. param.stride_h = param.stride_w = 2;
  182. param.pad_h = param.pad_w = p;
  183. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  184. }
  185. }
  186. #endif
  187. TEST_F(ARM_COMMON, POOLING_QUANTIZED) {
  188. Checker<Pooling> checker(handle());
  189. UniformIntRNG rng1{INT8_MIN >> 1, INT8_MAX >> 1};
  190. UniformIntRNG rng2{0, UINT8_MAX >> 1};
  191. using Param = param::Pooling;
  192. for (auto type : std::vector<DType>{
  193. dtype::QuantizedS8(1.1f),
  194. dtype::Quantized8Asymm(1.1f, static_cast<uint8_t>(3))}) {
  195. if (type.enumv() == DTypeEnum::QuantizedS8) {
  196. checker.set_rng(0, &rng1);
  197. } else {
  198. megdnn_assert(type.enumv() == DTypeEnum::Quantized8Asymm);
  199. checker.set_rng(0, &rng2);
  200. }
  201. for (size_t ih : {2, 3, 5, 7, 11, 13, 17, 19, 23, 33, 49})
  202. for (size_t iw : {2, 3, 5, 7, 11, 13, 17, 19, 23, 33, 49})
  203. for (auto mode : {Param::Mode::AVERAGE, Param::Mode::MAX}) {
  204. for (size_t window : {2, 3}) {
  205. Param param;
  206. param.mode = mode;
  207. param.window_h = param.window_w = window;
  208. param.stride_h = param.stride_w = 1;
  209. param.pad_h = param.pad_w = window / 2;
  210. //! test for SH == 1 && SW == 1 && FH == FW (FH == 2 ||
  211. //! FH
  212. //! == 3)
  213. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  214. //! test for SH = SW = 2 && FH = FW = 2
  215. param.stride_h = param.stride_w = 2;
  216. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  217. }
  218. }
  219. //! test for SH == 2 && SW == 2 && FH == FW == 3 max pooling
  220. for (size_t ih : {2, 3, 7, 13, 52, 53, 54, 55})
  221. for (size_t iw : {2, 3, 6, 14, 53, 54, 55, 56})
  222. for (size_t ph : {0, 1, 2})
  223. for (size_t pw : {0, 1, 2})
  224. if (ih + 2 * ph >= 3 && iw + 2 * pw >= 3) {
  225. param::Pooling param;
  226. param.mode = param::Pooling::Mode::MAX;
  227. param.pad_h = ph;
  228. param.pad_w = pw;
  229. param.window_h = param.window_w = 3;
  230. param.stride_h = param.stride_w = 2;
  231. checker.set_param(param).exec(
  232. TensorShapeArray{{2, 3, ih, iw}, {}});
  233. }
  234. //! test for SH == 2 && SW == 2 && FH == FW == 4 max pooling
  235. for (size_t ih :
  236. {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  237. for (size_t iw :
  238. {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  239. for (size_t p : {1, 2}) {
  240. Param param;
  241. param.mode = Param::Mode::MAX;
  242. param.window_h = param.window_w = 4;
  243. param.stride_h = param.stride_w = 2;
  244. param.pad_h = param.pad_w = p;
  245. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  246. }
  247. //! test for SH == 2 && SW == 2 && FH == FW == 5 max pooling
  248. for (size_t ih :
  249. {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  250. for (size_t iw :
  251. {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  252. for (size_t p : {1, 2}) {
  253. Param param;
  254. param.mode = Param::Mode::MAX;
  255. param.window_h = param.window_w = 5;
  256. param.stride_h = param.stride_w = 2;
  257. param.pad_h = param.pad_w = p;
  258. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  259. }
  260. }
  261. }
  262. #if MEGDNN_WITH_BENCHMARK
  263. TEST_F(ARM_COMMON, BENCHMARK_POOLING_INT8_W3x3_S2x2)
  264. {
  265. using Param = param::Pooling;
  266. auto run = [&](const TensorShapeArray &shapes,
  267. Param param) {
  268. auto handle_naive = create_cpu_handle(2);
  269. TensorLayoutArray layouts;
  270. layouts.emplace_back(shapes[0], dtype::Int8());
  271. layouts.emplace_back(shapes[1], dtype::Int8());
  272. Benchmarker<Pooling> benchmarker_naive(handle_naive.get());
  273. Benchmarker<Pooling> benchmarker_float(handle());
  274. Benchmarker<Pooling> benchmarker_int(handle());
  275. size_t RUN = 10;
  276. auto t1 = benchmarker_naive.set_display(false).set_times(RUN).
  277. set_param(param).exec(shapes);
  278. auto t2 = benchmarker_float.set_display(false).set_times(RUN).
  279. set_param(param).exec(shapes);
  280. auto t3 = benchmarker_int.set_display(false).set_times(RUN).
  281. set_param(param).execl(layouts);
  282. printf("naive=%.3fms float=%.3fms, int=%.3fms\n",
  283. t1 / RUN, t2 / RUN, t3 / RUN);
  284. auto speedup = t2/t3;
  285. ASSERT_GE(speedup, 2.0);
  286. };
  287. Param param;
  288. param.window_h = param.window_w = 3;
  289. param.stride_h = param.stride_w = 2;
  290. param.pad_h = param.pad_w = 1;
  291. std::cout << "3x3 with 2x2 stride max pooling:" << std::endl;
  292. run({{1, 3, 640, 480}, {}}, param);
  293. }
  294. TEST_F(ARM_COMMON, BENCHMARK_POOLING_W4x4_S2x2)
  295. {
  296. using Param = param::Pooling;
  297. auto run = [&](const TensorShapeArray &shapes,
  298. Param param) {
  299. std::cout << "N:" << shapes[0][0] << " "
  300. << "IC:" << shapes[0][1] << " "
  301. << "IH:" << shapes[0][2] << " "
  302. << "IW:" << shapes[0][3] << std::endl;
  303. auto handle_naive = create_cpu_handle(2);
  304. Benchmarker<Pooling> benchmarker_naive(handle_naive.get());
  305. Benchmarker<Pooling> benchmarker_float(handle());
  306. size_t RUN = 10;
  307. auto t1 = benchmarker_naive.set_display(false).set_times(RUN).
  308. set_param(param).exec(shapes);
  309. auto t2 = benchmarker_float.set_display(false).set_times(RUN).
  310. set_param(param).exec(shapes);
  311. TensorLayout dst_layout;
  312. auto opr = handle()->create_operator<Pooling>();
  313. opr->param() = param;
  314. opr->deduce_layout({shapes[0], dtype::Float32()}, dst_layout);
  315. float calc_amount = dst_layout.total_nr_elems() *
  316. param.window_h * param.window_w;
  317. printf("naive={%.3fms, %.3fMflops}, neon={%.3fms, %.3fMflops}\n",
  318. t1 / RUN, calc_amount / (t1 / RUN * 1000),
  319. t2 / RUN, calc_amount / (t2 / RUN * 1000));
  320. };
  321. Param param;
  322. param.window_h = param.window_w = 4;
  323. param.stride_h = param.stride_w = 2;
  324. param.pad_h = param.pad_w = 1;
  325. std::cout << "4x4 with 2x2 stride max pooling:" << std::endl;
  326. run({{1, 24, 160, 128}, {}}, param);
  327. run({{1, 4, 240, 135}, {}}, param);
  328. run({{1, 32, 120, 67}, {}}, param);
  329. run({{1, 64, 60, 33}, {}}, param);
  330. }
  331. TEST_F(ARM_COMMON, BENCHMARK_POOLING_W5x5_S2x2)
  332. {
  333. using Param = param::Pooling;
  334. auto run = [&](const TensorShapeArray &shapes,
  335. Param param) {
  336. std::cout << "N:" << shapes[0][0] << " "
  337. << "IC:" << shapes[0][1] << " "
  338. << "IH:" << shapes[0][2] << " "
  339. << "IW:" << shapes[0][3] << std::endl;
  340. auto handle_naive = create_cpu_handle(2);
  341. Benchmarker<Pooling> benchmarker_naive(handle_naive.get());
  342. Benchmarker<Pooling> benchmarker_float(handle());
  343. size_t RUN = 10;
  344. auto t1 = benchmarker_naive.set_display(false).set_times(RUN).
  345. set_param(param).exec(shapes);
  346. auto t2 = benchmarker_float.set_display(false).set_times(RUN).
  347. set_param(param).exec(shapes);
  348. TensorLayout dst_layout;
  349. auto opr = handle()->create_operator<Pooling>();
  350. opr->param() = param;
  351. opr->deduce_layout({shapes[0], dtype::Float32()}, dst_layout);
  352. float calc_amount = dst_layout.total_nr_elems() *
  353. param.window_h * param.window_w;
  354. printf("naive={%.3fms, %.3fMflops}, neon={%.3fms, %.3fMflops}\n",
  355. t1 / RUN, calc_amount / (t1 / RUN * 1000),
  356. t2 / RUN, calc_amount / (t2 / RUN * 1000));
  357. };
  358. Param param;
  359. param.window_h = param.window_w = 5;
  360. param.stride_h = param.stride_w = 2;
  361. param.pad_h = param.pad_w = 1;
  362. std::cout << "5x5 with 2x2 stride max pooling:" << std::endl;
  363. run({{1, 24, 160, 128}, {}}, param);
  364. run({{1, 4, 240, 135}, {}}, param);
  365. run({{1, 32, 120, 67}, {}}, param);
  366. run({{1, 64, 60, 33}, {}}, param);
  367. }
  368. TEST_F(ARM_COMMON, BENCHMARK_POOLING_FP16) {
  369. using Param = param::Pooling;
  370. auto run = [&](const TensorShapeArray& shapes, Param param) {
  371. TensorLayoutArray layouts;
  372. layouts.emplace_back(shapes[0], dtype::Float16());
  373. layouts.emplace_back(shapes[1], dtype::Float16());
  374. Benchmarker<Pooling> benchmarker_float(handle());
  375. Benchmarker<Pooling> benchmarker_half(handle());
  376. size_t RUN = 10;
  377. auto tf = benchmarker_float.set_display(false)
  378. .set_times(RUN)
  379. .set_param(param)
  380. .exec(shapes) /
  381. RUN;
  382. auto th = benchmarker_half.set_display(false)
  383. .set_times(RUN)
  384. .set_param(param)
  385. .execl(layouts) /
  386. RUN;
  387. TensorLayout dst_layout;
  388. auto opr = handle()->create_operator<Pooling>();
  389. opr->param() = param;
  390. opr->deduce_layout({shapes[0], dtype::Float32()}, dst_layout);
  391. float computations = dst_layout.total_nr_elems() * param.window_h *
  392. param.window_w / (1024.f * 1024 * 1024);
  393. printf("float=%.3fms %f gflops, float16=%.3fms %f gflops speedup: %f\n",
  394. tf, computations / tf * 1e3, th, computations / th * 1e3,
  395. tf / th);
  396. };
  397. Param param;
  398. param.window_h = param.window_w = 2;
  399. param.stride_h = param.stride_w = 1;
  400. param.pad_h = param.pad_w = 1;
  401. printf("2x2 with 1x1 stride max pooling:\n");
  402. run({{1, 3, 640, 480}, {}}, param);
  403. for (size_t oh : {640, 128})
  404. for (size_t ow : {480, 112}) {
  405. param.window_h = param.window_w = 3;
  406. param.stride_h = param.stride_w = 2;
  407. param.pad_h = param.pad_w = 1;
  408. param.mode = Param::Mode::AVERAGE;
  409. printf("3x3 with 2x2 stride average pooling.\n");
  410. run({{1, 3, oh, ow}, {}}, param);
  411. for (size_t pw : {2, 3, 4, 5}) {
  412. param.window_h = param.window_w = pw;
  413. param.stride_h = param.stride_w = 2;
  414. param.pad_h = param.pad_w = 1;
  415. param.mode = Param::Mode::MAX;
  416. printf("%zux%zu with 2x2 stride max pooling:\n", pw, pw);
  417. run({{1, 3, oh, ow}, {}}, param);
  418. }
  419. }
  420. }
  421. TEST_F(ARM_COMMON, BENCHMARK_POOLING_QUANTIZED) {
  422. using Param = param::Pooling;
  423. auto run = [&](const TensorShapeArray& shapes, Param param) {
  424. auto handle_naive = create_cpu_handle(2);
  425. TensorLayoutArray layouts;
  426. layouts.emplace_back(shapes[0], dtype::QuantizedS8(1.1f));
  427. layouts.emplace_back(shapes[1], dtype::QuantizedS8(1.1f));
  428. Benchmarker<Pooling> benchmarker_int(handle());
  429. Benchmarker<Pooling> benchmarker_naive(handle_naive.get());
  430. size_t RUN = 10;
  431. auto time_int = benchmarker_int.set_display(false)
  432. .set_times(RUN)
  433. .set_param(param)
  434. .exec(shapes) /
  435. RUN;
  436. auto time_naive = benchmarker_naive.set_display(false)
  437. .set_times(RUN)
  438. .set_param(param)
  439. .execl(layouts) /
  440. RUN;
  441. TensorLayout dst_layout;
  442. auto opr = handle()->create_operator<Pooling>();
  443. opr->param() = param;
  444. opr->deduce_layout({shapes[0], dtype::QuantizedS8(1.1f)}, dst_layout);
  445. float computations = dst_layout.total_nr_elems() * param.window_h *
  446. param.window_w / (1024.f * 1024 * 1024);
  447. printf("naive=%.3fms %f gflops, int8=%.3fms %f gflops speedup: %f\n",
  448. time_naive, computations / time_naive * 1e3, time_int,
  449. computations / time_int * 1e3, time_naive / time_int);
  450. };
  451. Param param;
  452. param.window_h = param.window_w = 2;
  453. param.stride_h = param.stride_w = 1;
  454. param.pad_h = param.pad_w = 1;
  455. printf("2x2 with 1x1 stride max pooling:\n");
  456. run({{1, 3, 640, 480}, {}}, param);
  457. // clang-format off
  458. for (size_t oh : {640, 128})
  459. for (size_t ow : {480, 112})
  460. for (size_t pw : {2, 3, 4, 5}) {
  461. param.window_h = param.window_w = pw;
  462. param.stride_h = param.stride_w = 2;
  463. param.pad_h = param.pad_w = 1;
  464. printf("%zux%zu with 2x2 stride max pooling:\n", pw, pw);
  465. run({{1, 3, oh, ow}, {}}, param);
  466. }
  467. // clang-format on
  468. }
  469. #endif
  470. } // namespace test
  471. } // namespace megdnn
  472. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台