You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

pooling.cpp 23 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550
  1. #include "test/fallback/fixture.h"
  2. #include "test/common/benchmarker.h"
  3. #include "test/common/checker.h"
  4. #include "test/common/pooling.h"
  5. #include "test/common/rng.h"
  6. #include "test/common/task_record_check.h"
  7. namespace megdnn {
  8. namespace test {
  9. namespace {
  10. std::vector<std::pair<param::Pooling, TensorShapeArray>> get_nchw44_pool_args(
  11. size_t filter, size_t stride) {
  12. constexpr size_t ic_step = 4;
  13. std::vector<std::pair<param::Pooling, TensorShapeArray>> args;
  14. for (size_t n : {1, 2})
  15. for (size_t c : {4, 8})
  16. for (size_t ih : {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13})
  17. for (size_t iw : {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13})
  18. for (size_t ph : {0, 1, 2})
  19. for (size_t pw : {0, 1, 2})
  20. for (auto mode :
  21. {param::Pooling::Mode::MAX,
  22. param::Pooling::Mode::AVERAGE})
  23. if (ih + 2 * ph >= filter && iw + 2 * pw >= filter &&
  24. filter > ph && filter > pw) {
  25. param::Pooling param;
  26. param.mode = mode;
  27. param.format = param::Pooling::Format::NCHW44;
  28. param.pad_h = ph;
  29. param.pad_w = pw;
  30. param.stride_h = param.stride_w = stride;
  31. param.window_h = param.window_w = filter;
  32. args.emplace_back(std::make_pair(
  33. param,
  34. TensorShapeArray{
  35. {n, c / ic_step, ih, iw, ic_step},
  36. {}}));
  37. }
  38. return args;
  39. }
  40. void run_pooling_check(
  41. Handle* handle, std::vector<std::pair<param::Pooling, TensorShapeArray>> args,
  42. bool is_int8) {
  43. Checker<Pooling> checker(handle);
  44. UniformIntRNG rng_int8{INT8_MIN >> 1, INT8_MAX >> 1};
  45. UniformIntRNG rng_fp32{-10, 10};
  46. if (is_int8) {
  47. checker.set_dtype(0, dtype::QuantizedS8(1.1f));
  48. checker.set_rng(0, &rng_int8);
  49. } else {
  50. checker.set_rng(0, &rng_fp32);
  51. }
  52. for (auto arg : args) {
  53. checker.set_param(arg.first).exec(arg.second);
  54. }
  55. }
  56. } // namespace
  57. TEST_F(FALLBACK_MULTI_THREADS, POOLING_GI_NCHW44_FP32) {
  58. for (auto filter : {2, 3, 4, 5})
  59. for (auto stride : {1, 2}) {
  60. run_pooling_check(handle(), get_nchw44_pool_args(filter, stride), false);
  61. }
  62. }
  63. TEST_F(FALLBACK, POOLING_GI) {
  64. using Param = param::Pooling;
  65. // clang-format off
  66. for (size_t ih: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  67. for (size_t iw: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  68. for (size_t p: {1, 2})
  69. {
  70. Param param;
  71. param.mode = Param::Mode::MAX;
  72. param.window_h = param.window_w = 3;
  73. param.stride_h = param.stride_w = 2;
  74. param.pad_h = param.pad_w = p;
  75. Checker<Pooling> checker(handle());
  76. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  77. param.mode = Param::Mode::AVERAGE;
  78. param.window_h = param.window_w = 3;
  79. param.stride_h = param.stride_w = 2;
  80. param.pad_h = param.pad_w = p;
  81. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  82. param.mode = Param::Mode::MAX;
  83. param.window_h = param.window_w = 4;
  84. param.stride_h = param.stride_w = 2;
  85. param.pad_h = param.pad_w = p;
  86. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  87. param.mode = Param::Mode::MAX;
  88. param.window_h = param.window_w = 5;
  89. param.stride_h = param.stride_w = 2;
  90. param.pad_h = param.pad_w = p;
  91. if (ih + p * 2 >= 5 && iw + p * 2 >= 5)
  92. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  93. }
  94. for (size_t ih: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  95. for (size_t iw: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  96. for (size_t p: {1, 2})
  97. {
  98. Param param;
  99. param.mode = Param::Mode::AVERAGE_COUNT_EXCLUDE_PADDING;
  100. param.window_h = param.window_w = 3;
  101. param.stride_h = param.stride_w = 1;
  102. param.pad_h = param.pad_w = p;
  103. Checker<Pooling> checker(handle());
  104. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  105. }
  106. // clang-format on
  107. }
  108. TEST_F(FALLBACK, POOLING_GI_RECORD) {
  109. using Param = param::Pooling;
  110. TaskRecordChecker<Pooling> checker(0);
  111. // clang-format off
  112. for (size_t ih: {2, 3, 5, 7, 11, 13, 17})
  113. for (size_t iw: {2, 3, 5, 7, 11, 13, 17})
  114. for (size_t p: {1, 2})
  115. {
  116. Param param;
  117. param.mode = Param::Mode::MAX;
  118. param.window_h = param.window_w = 3;
  119. param.stride_h = param.stride_w = 2;
  120. param.pad_h = param.pad_w = p;
  121. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  122. param.mode = Param::Mode::AVERAGE;
  123. param.window_h = param.window_w = 3;
  124. param.stride_h = param.stride_w = 2;
  125. param.pad_h = param.pad_w = p;
  126. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  127. param.mode = Param::Mode::MAX;
  128. param.window_h = param.window_w = 4;
  129. param.stride_h = param.stride_w = 2;
  130. param.pad_h = param.pad_w = p;
  131. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  132. param.mode = Param::Mode::MAX;
  133. param.window_h = param.window_w = 5;
  134. param.stride_h = param.stride_w = 2;
  135. param.pad_h = param.pad_w = p;
  136. if (ih + p * 2 >= 5 && iw + p * 2 >= 5)
  137. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  138. }
  139. for (size_t ih: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  140. for (size_t iw: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  141. for (size_t p: {1, 2})
  142. {
  143. Param param;
  144. param.mode = Param::Mode::AVERAGE_COUNT_EXCLUDE_PADDING;
  145. param.window_h = param.window_w = 3;
  146. param.stride_h = param.stride_w = 1;
  147. param.pad_h = param.pad_w = p;
  148. Checker<Pooling> checker(handle());
  149. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  150. }
  151. // clang-format on
  152. }
  153. TEST_F(FALLBACK_MULTI_THREADS, POOLING_GI_RECORD) {
  154. using Param = param::Pooling;
  155. TaskRecordChecker<Pooling> checker(0);
  156. for (size_t ih : {2, 3, 5, 7, 11, 13, 17})
  157. for (size_t iw : {2, 3, 5, 7, 11, 13, 17})
  158. for (size_t p : {1, 2}) {
  159. Param param;
  160. param.mode = Param::Mode::MAX;
  161. param.window_h = param.window_w = 3;
  162. param.stride_h = param.stride_w = 2;
  163. param.pad_h = param.pad_w = p;
  164. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  165. param.mode = Param::Mode::AVERAGE;
  166. param.window_h = param.window_w = 3;
  167. param.stride_h = param.stride_w = 2;
  168. param.pad_h = param.pad_w = p;
  169. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  170. param.mode = Param::Mode::MAX;
  171. param.window_h = param.window_w = 4;
  172. param.stride_h = param.stride_w = 2;
  173. param.pad_h = param.pad_w = p;
  174. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  175. param.mode = Param::Mode::MAX;
  176. param.window_h = param.window_w = 5;
  177. param.stride_h = param.stride_w = 2;
  178. param.pad_h = param.pad_w = p;
  179. if (ih + p * 2 >= 5 && iw + p * 2 >= 5)
  180. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  181. }
  182. }
  183. TEST_F(FALLBACK_MULTI_THREADS, POOLING_GI_W9_w13_NCHW44) {
  184. UniformIntRNG rng{-10, 10};
  185. Checker<Pooling> checker(handle());
  186. checker.set_rng(0, &rng);
  187. // clang-format off
  188. for (size_t ih: {20, 15})
  189. for (size_t iw: {15, 20})
  190. for (size_t kernel: {9, 13})
  191. for (size_t pad: {4, 6})
  192. for(auto mode: {param::Pooling::Mode::MAX, param::Pooling::Mode::AVERAGE})
  193. if (kernel > pad)
  194. {
  195. param::Pooling param;
  196. param.mode = mode;
  197. param.format = param::Pooling::Format::NCHW44;
  198. param.pad_h = pad;
  199. param.pad_w = pad;
  200. param.stride_h = param.stride_w = 1;
  201. param.window_h = param.window_w = kernel ;
  202. checker.set_param(param).exec(TensorShapeArray{{2, 8, ih, iw, 4}, {}});
  203. }
  204. // clang-format on
  205. }
  206. TEST_F(FALLBACK_MULTI_THREADS, POOLING_GI_FALLBACK) {
  207. using Param = param::Pooling;
  208. for (size_t ih : {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  209. for (size_t iw : {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  210. for (size_t p : {1, 2}) {
  211. Param param;
  212. param.mode = Param::Mode::MAX;
  213. param.window_h = param.window_w = 3;
  214. param.stride_h = param.stride_w = 2;
  215. param.pad_h = param.pad_w = p;
  216. Checker<Pooling> checker(handle());
  217. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  218. }
  219. }
  220. TEST_F(FALLBACK_MULTI_THREADS, POOLING_GI) {
  221. using Param = param::Pooling;
  222. for (size_t ih : {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  223. for (size_t iw : {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  224. for (size_t p : {1, 2}) {
  225. Param param;
  226. param.mode = Param::Mode::MAX;
  227. param.window_h = param.window_w = 3;
  228. param.stride_h = param.stride_w = 2;
  229. param.pad_h = param.pad_w = p;
  230. Checker<Pooling> checker(handle());
  231. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  232. param.mode = Param::Mode::AVERAGE;
  233. param.window_h = param.window_w = 3;
  234. param.stride_h = param.stride_w = 2;
  235. param.pad_h = param.pad_w = p;
  236. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  237. param.mode = Param::Mode::MAX;
  238. param.window_h = param.window_w = 4;
  239. param.stride_h = param.stride_w = 2;
  240. param.pad_h = param.pad_w = p;
  241. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  242. param.mode = Param::Mode::MAX;
  243. param.window_h = param.window_w = 5;
  244. param.stride_h = param.stride_w = 2;
  245. param.pad_h = param.pad_w = p;
  246. if (ih + p * 2 >= 5 && iw + p * 2 >= 5)
  247. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  248. }
  249. }
  250. #if MEGDNN_WITH_BENCHMARK
  251. namespace {
  252. void benchmark_nchw44_fp32(Handle* handle) {
  253. using Param = param::Pooling;
  254. auto run = [&](size_t n, size_t c, size_t h, size_t w, size_t filter, size_t stride,
  255. size_t pad, Param::Mode mode) {
  256. Param param;
  257. param.window_h = param.window_w = filter;
  258. param.stride_h = param.stride_w = stride;
  259. param.pad_h = param.pad_w = pad;
  260. param.format = Param::Format::NCHW;
  261. param.mode = mode;
  262. TensorShape nchw_shape = {n, c, h, w};
  263. TensorShape nchw44_shape = {n, c / 4, h, w, 4};
  264. TensorLayout dst_layout;
  265. auto opr = handle->create_operator<Pooling>();
  266. opr->param() = param;
  267. opr->deduce_layout({nchw_shape, dtype::Float32()}, dst_layout);
  268. float calc_amount =
  269. dst_layout.total_nr_elems() * param.window_h * param.window_w;
  270. Benchmarker<Pooling> benchmarker_float_nchw(handle);
  271. Benchmarker<Pooling> benchmarker_float_nchw44(handle);
  272. Benchmarker<Pooling> benchmarker_int_nchw44(handle);
  273. size_t RUN = 500;
  274. auto t1 = benchmarker_float_nchw.set_display(false)
  275. .set_times(RUN)
  276. .set_param(param)
  277. .exec({nchw_shape, {}});
  278. param.format = Param::Format::NCHW44;
  279. auto t2 = benchmarker_int_nchw44.set_display(false)
  280. .set_times(RUN)
  281. .set_param(param)
  282. .execl({{nchw44_shape, dtype::QuantizedS8(1.0)},
  283. {{}, dtype::QuantizedS8(1.0)}});
  284. auto t3 = benchmarker_float_nchw44.set_display(false)
  285. .set_times(RUN)
  286. .set_param(param)
  287. .exec({nchw44_shape, {}});
  288. printf("{%zu %zu %zu %zu} filter = %zu, stride = %zu pad = %zu\n"
  289. "nchw_fp32={%.3f ms, %.3f Mflops}, "
  290. "nchw44_int={%.3f ms, %.3f Mflops}, "
  291. "nchw44_fp32={%.3f ms, %.3f Mflops, speed_up %f}\n\n",
  292. n, c, h, w, filter, stride, pad, t1 / RUN,
  293. calc_amount / (t1 / RUN * 1000), t2 / RUN,
  294. calc_amount / (t2 / RUN * 1000), t3 / RUN,
  295. calc_amount / (t3 / RUN * 1000), t1 / t3);
  296. };
  297. // Resnet50
  298. run(1, 64, 112, 112, 3, 2, 1, param::Pooling::Mode::MAX);
  299. run(1, 2048, 7, 7, 7, 1, 0, param::Pooling::Mode::AVERAGE);
  300. // VGG16
  301. run(1, 64, 224, 224, 2, 2, 0, param::Pooling::Mode::MAX);
  302. run(1, 128, 112, 112, 2, 2, 0, param::Pooling::Mode::MAX);
  303. run(1, 256, 56, 56, 2, 2, 0, param::Pooling::Mode::MAX);
  304. run(1, 512, 28, 28, 2, 2, 0, param::Pooling::Mode::MAX);
  305. run(1, 512, 14, 14, 2, 2, 0, param::Pooling::Mode::MAX);
  306. }
  307. } // namespace
  308. TEST_F(FALLBACK, BENCHMARK_POOLING_GI_NCHW44_FP32) {
  309. benchmark_nchw44_fp32(handle());
  310. }
  311. TEST_F(FALLBACK_MULTI_THREADS, BENCHMARK_POOLING_GI_NCHW44_FP32) {
  312. benchmark_nchw44_fp32(handle());
  313. }
  314. TEST_F(FALLBACK, BENCHMARK_POOLING_GI_W4x4_S2x2) {
  315. using Param = param::Pooling;
  316. auto run = [&](const TensorShapeArray& shapes, Param param) {
  317. std::cout << "N:" << shapes[0][0] << " "
  318. << "IC:" << shapes[0][1] << " "
  319. << "IH:" << shapes[0][2] << " "
  320. << "IW:" << shapes[0][3] << std::endl;
  321. auto handle_naive = create_cpu_handle(2);
  322. Benchmarker<Pooling> benchmarker_naive(handle_naive.get());
  323. Benchmarker<Pooling> benchmarker_float(handle());
  324. size_t RUN = 10;
  325. auto t1 = benchmarker_naive.set_display(false)
  326. .set_times(RUN)
  327. .set_param(param)
  328. .exec(shapes);
  329. auto t2 = benchmarker_float.set_display(false)
  330. .set_times(RUN)
  331. .set_param(param)
  332. .exec(shapes);
  333. TensorLayout dst_layout;
  334. auto opr = handle()->create_operator<Pooling>();
  335. opr->param() = param;
  336. opr->deduce_layout({shapes[0], dtype::Float32()}, dst_layout);
  337. float calc_amount =
  338. dst_layout.total_nr_elems() * param.window_h * param.window_w;
  339. printf("naive={%.3fms, %.3fMflops}, neon={%.3fms, %.3fMflops}\n", t1 / RUN,
  340. calc_amount / (t1 / RUN * 1000), t2 / RUN,
  341. calc_amount / (t2 / RUN * 1000));
  342. };
  343. Param param;
  344. param.window_h = param.window_w = 4;
  345. param.stride_h = param.stride_w = 2;
  346. param.pad_h = param.pad_w = 1;
  347. std::cout << "4x4 with 2x2 stride max pooling:" << std::endl;
  348. run({{1, 24, 160, 128}, {}}, param);
  349. run({{1, 4, 240, 135}, {}}, param);
  350. run({{1, 32, 120, 67}, {}}, param);
  351. run({{1, 64, 60, 33}, {}}, param);
  352. }
  353. TEST_F(FALLBACK, BENCHMARK_POOLING_GI_W5x5_S2x2) {
  354. using Param = param::Pooling;
  355. auto run = [&](const TensorShapeArray& shapes, Param param) {
  356. std::cout << "N:" << shapes[0][0] << " "
  357. << "IC:" << shapes[0][1] << " "
  358. << "IH:" << shapes[0][2] << " "
  359. << "IW:" << shapes[0][3] << std::endl;
  360. auto handle_naive = create_cpu_handle(2);
  361. Benchmarker<Pooling> benchmarker_naive(handle_naive.get());
  362. Benchmarker<Pooling> benchmarker_float(handle());
  363. size_t RUN = 10;
  364. auto t1 = benchmarker_naive.set_display(false)
  365. .set_times(RUN)
  366. .set_param(param)
  367. .exec(shapes);
  368. auto t2 = benchmarker_float.set_display(false)
  369. .set_times(RUN)
  370. .set_param(param)
  371. .exec(shapes);
  372. TensorLayout dst_layout;
  373. auto opr = handle()->create_operator<Pooling>();
  374. opr->param() = param;
  375. opr->deduce_layout({shapes[0], dtype::Float32()}, dst_layout);
  376. float calc_amount =
  377. dst_layout.total_nr_elems() * param.window_h * param.window_w;
  378. printf("naive={%.3fms, %.3fMflops}, neon={%.3fms, %.3fMflops}\n", t1 / RUN,
  379. calc_amount / (t1 / RUN * 1000), t2 / RUN,
  380. calc_amount / (t2 / RUN * 1000));
  381. };
  382. Param param;
  383. param.window_h = param.window_w = 5;
  384. param.stride_h = param.stride_w = 2;
  385. param.pad_h = param.pad_w = 1;
  386. std::cout << "5x5 with 2x2 stride max pooling:" << std::endl;
  387. run({{1, 24, 160, 128}, {}}, param);
  388. run({{1, 4, 240, 135}, {}}, param);
  389. run({{1, 32, 120, 67}, {}}, param);
  390. run({{1, 64, 60, 33}, {}}, param);
  391. }
  392. namespace {
  393. template <typename Opr>
  394. void benchmark_impl(
  395. const typename Opr::Param& param, std::vector<SmallVector<TensorShape>> shapes,
  396. size_t RUNS, TaskExecutorConfig&& multi_thread_config,
  397. TaskExecutorConfig&& single_thread_config, DType data_type) {
  398. std::vector<float> multi_thread_times, single_thread_times;
  399. {
  400. auto multi_thread_hanle = create_cpu_handle(0, true, &multi_thread_config);
  401. auto benchmarker = Benchmarker<Opr>(multi_thread_hanle.get());
  402. benchmarker.set_times(RUNS).set_display(false).set_param(param);
  403. benchmarker.set_dtype(0, data_type);
  404. for (auto shape : shapes) {
  405. multi_thread_times.push_back(benchmarker.exec(shape) / RUNS);
  406. }
  407. }
  408. {
  409. auto single_thread_handle = create_cpu_handle(0, true, &single_thread_config);
  410. auto benchmarker = Benchmarker<Opr>(single_thread_handle.get());
  411. benchmarker.set_times(RUNS).set_display(false).set_param(param);
  412. benchmarker.set_dtype(0, data_type);
  413. for (auto shape : shapes) {
  414. single_thread_times.push_back(benchmarker.exec(shape) / RUNS);
  415. }
  416. }
  417. printf("Benchmark : Multi threads %zu, ", multi_thread_config.nr_thread);
  418. printf("core_ids:");
  419. for (size_t i = 0; i < multi_thread_config.affinity_core_set.size(); i++) {
  420. printf("%zu ", multi_thread_config.affinity_core_set[i]);
  421. }
  422. printf(", Single thread core_id %zu\n", single_thread_config.affinity_core_set[0]);
  423. for (size_t i = 0; i < shapes.size(); i++) {
  424. auto shape = shapes[i];
  425. printf("Case: ");
  426. for (auto sh : shape)
  427. printf("%s ", sh.to_string().c_str());
  428. printf("%zu threads time: %f,\n single thread time: "
  429. "%f. spead up = %f, speedup/cores=%f\n",
  430. multi_thread_config.nr_thread, multi_thread_times[i],
  431. single_thread_times[i], single_thread_times[i] / multi_thread_times[i],
  432. single_thread_times[i] / multi_thread_times[i] /
  433. multi_thread_config.nr_thread);
  434. }
  435. }
  436. } // namespace
  437. TEST_F(FALLBACK_MULTI_THREADS, BENCHMARK_POOLING_GI) {
  438. constexpr size_t RUNS = 50;
  439. using Param = param::Pooling;
  440. Param param;
  441. param.window_h = param.window_w = 3;
  442. param.stride_h = param.stride_w = 2;
  443. param.pad_h = param.pad_w = 1;
  444. std::vector<SmallVector<TensorShape>> shapes;
  445. shapes.push_back({{32, 32, 215, 215}, {}});
  446. shapes.push_back({{32, 32, 128, 128}, {}});
  447. shapes.push_back({{8, 256, 100, 100}, {}});
  448. shapes.push_back({{1, 256, 100, 100}, {}});
  449. shapes.push_back({{1, 32, 100, 100}, {}});
  450. shapes.push_back({{1, 256, 80, 80}, {}});
  451. shapes.push_back({{1, 256, 60, 60}, {}});
  452. shapes.push_back({{1, 256, 30, 30}, {}});
  453. param.window_h = param.window_w = 3;
  454. param.stride_h = param.stride_w = 2;
  455. param.pad_h = param.pad_w = 1;
  456. printf("Benchmark POOLING kernel:%d*%d stride:%d,mode %d\n", param.window_h,
  457. param.window_w, param.stride_h, static_cast<int>(param.mode));
  458. benchmark_impl<Pooling>(
  459. param, shapes, RUNS, {4, {0, 1, 2, 3}}, {1, {0}}, dtype::Float32());
  460. benchmark_impl<Pooling>(
  461. param, shapes, RUNS, {4, {4, 5, 6, 7}}, {1, {4}}, dtype::Float32());
  462. benchmark_impl<Pooling>(
  463. param, shapes, RUNS, {2, {0, 1}}, {1, {0}}, dtype::Float32());
  464. }
  465. TEST_F(FALLBACK_MULTI_THREADS, BENCHMARK_POOLING_GI_NCHW44) {
  466. constexpr size_t RUNS = 50;
  467. using Param = param::Pooling;
  468. Param param;
  469. param.pad_h = param.pad_w = 0;
  470. param.mode = Param::Mode::MAX;
  471. std::vector<SmallVector<TensorShape>> shapes;
  472. std::vector<std::vector<size_t>> filter_and_stride = {
  473. {2, 1}, {2, 2}, {3, 1}, {3, 2}, {4, 1}, {4, 2}, {5, 1}, {5, 2}};
  474. for (auto mode : {param::Pooling::Mode::MAX, param::Pooling::Mode::AVERAGE}) {
  475. for (auto filter : filter_and_stride) {
  476. shapes.push_back({{1, 32 * 4, 215, 215}, {}});
  477. shapes.push_back({{1, 32 * 4, 128, 128}, {}});
  478. shapes.push_back({{1, 16 * 4, 56, 56}, {}});
  479. param.mode = mode;
  480. param.window_h = param.window_w = filter[0];
  481. param.stride_h = param.stride_w = filter[1];
  482. param.format = Param::Format::NCHW;
  483. printf("NCHW Benchmark POOLING kernel:%d*%d stride:%d,mode %d\n",
  484. param.window_h, param.window_h, param.stride_h,
  485. static_cast<int>(param.mode));
  486. benchmark_impl<Pooling>(
  487. param, shapes, RUNS, {4, {4, 5, 6, 7}}, {1, {4}},
  488. dtype::QuantizedS8(1.1f));
  489. shapes.clear();
  490. shapes.push_back({{1, 32, 215, 215, 4}, {}});
  491. shapes.push_back({{1, 32, 128, 128, 4}, {}});
  492. shapes.push_back({{1, 16, 56, 56, 4}, {}});
  493. param.format = Param::Format::NCHW44;
  494. printf("NCHW44 Benchmark POOLING kernel:%d*%d stride:%d,mode %d\n",
  495. param.window_h, param.window_w, param.stride_h,
  496. static_cast<int>(param.mode));
  497. benchmark_impl<Pooling>(
  498. param, shapes, RUNS, {4, {4, 5, 6, 7}}, {1, {4}},
  499. dtype::QuantizedS8(1.1f));
  500. shapes.clear();
  501. }
  502. }
  503. }
  504. #endif
  505. } // namespace test
  506. } // namespace megdnn
  507. // vim: syntax=cpp.doxygen