You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

pooling_multi_thread.cpp 23 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580
  1. /**
  2. * \file dnn/test/arm_common/pooling_multi_thread.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include <vector>
  12. #include "megdnn/dtype.h"
  13. #include "megdnn/opr_param_defs.h"
  14. #include "test/arm_common/fixture.h"
  15. #include "test/common/pooling.h"
  16. #include "test/common/checker.h"
  17. #include "test/common/benchmarker.h"
  18. #include "test/common/rng.h"
  19. namespace megdnn {
  20. namespace test {
  21. /*********************** mutli threads *********************************/
  22. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING) {
  23. using Param = param::Pooling;
  24. for (size_t ih: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  25. for (size_t iw: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  26. for (size_t p: {1, 2})
  27. {
  28. Param param;
  29. param.mode = Param::Mode::MAX;
  30. param.window_h = param.window_w = 3;
  31. param.stride_h = param.stride_w = 2;
  32. param.pad_h = param.pad_w = p;
  33. Checker<Pooling> checker(handle());
  34. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  35. param.mode = Param::Mode::AVERAGE;
  36. param.window_h = param.window_w = 3;
  37. param.stride_h = param.stride_w = 2;
  38. param.pad_h = param.pad_w = p;
  39. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  40. param.mode = Param::Mode::MAX;
  41. param.window_h = param.window_w = 4;
  42. param.stride_h = param.stride_w = 2;
  43. param.pad_h = param.pad_w = p;
  44. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  45. param.mode = Param::Mode::MAX;
  46. param.window_h = param.window_w = 5;
  47. param.stride_h = param.stride_w = 2;
  48. param.pad_h = param.pad_w = p;
  49. if (ih + p * 2 >= 5 && iw + p * 2 >= 5)
  50. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  51. }
  52. }
  53. std::vector<std::pair<param::Pooling, TensorShapeArray>> get_nchw44_pool_args(
  54. size_t filter, size_t stride) {
  55. constexpr size_t ic_step = 4;
  56. std::vector<std::pair<param::Pooling, TensorShapeArray>> args;
  57. for (size_t n : {1, 2})
  58. for (size_t c : {4, 8})
  59. for (size_t ih : {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13})
  60. for (size_t iw : {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13})
  61. for (size_t ph : {0, 1, 2})
  62. for (size_t pw : {0, 1, 2})
  63. for (auto mode : {param::Pooling::Mode::MAX,
  64. param::Pooling::Mode::AVERAGE})
  65. if (ih + 2 * ph >= filter &&
  66. iw + 2 * pw >= filter && filter > ph &&
  67. filter > pw) {
  68. param::Pooling param;
  69. param.mode = mode;
  70. param.format =
  71. param::Pooling::Format::NCHW44;
  72. param.pad_h = ph;
  73. param.pad_w = pw;
  74. param.stride_h = param.stride_w = stride;
  75. param.window_h = param.window_w = filter;
  76. args.emplace_back(std::make_pair(
  77. param,
  78. TensorShapeArray{{n, c / ic_step,
  79. ih, iw, ic_step},
  80. {}}));
  81. }
  82. return args;
  83. }
  84. void run_pooling_check(
  85. Handle* handle,
  86. std::vector<std::pair<param::Pooling, TensorShapeArray>> args,
  87. bool is_int8) {
  88. Checker<Pooling> checker(handle);
  89. UniformIntRNG rng_int8{INT8_MIN >> 1, INT8_MAX >> 1};
  90. UniformIntRNG rng_fp32{-10, 10};
  91. if (is_int8) {
  92. checker.set_dtype(0, dtype::QuantizedS8(1.1f));
  93. checker.set_rng(0, &rng_int8);
  94. } else {
  95. checker.set_rng(0, &rng_fp32);
  96. }
  97. for (auto arg : args) {
  98. checker.set_param(arg.first).exec(arg.second);
  99. }
  100. }
  101. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_NCHW44_FP32) {
  102. for (auto filter : {2, 3, 4, 5})
  103. for (auto stride : {1, 2}) {
  104. run_pooling_check(handle(), get_nchw44_pool_args(filter, stride),
  105. false);
  106. }
  107. }
  108. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_W3x3_NCHW44)
  109. {
  110. // clang-format off
  111. for (size_t ih: {3, 5, 10})
  112. for (size_t iw: {3, 5, 7, 9, 15, 20})
  113. for (size_t ph: {0, 1, 2})
  114. for (size_t pw: {0, 1, 2})
  115. for(auto mode: {param::Pooling::Mode::MAX, param::Pooling::Mode::AVERAGE})
  116. if (ih+2*ph >= 3 && iw+2*pw >= 3)
  117. {
  118. UniformIntRNG rng{INT8_MIN >> 1, INT8_MAX >> 1};
  119. Checker<Pooling> checker(handle());
  120. checker.set_dtype(0, dtype::QuantizedS8(1.1f));
  121. checker.set_rng(0,&rng);
  122. param::Pooling param;
  123. param.mode = mode;
  124. param.format = param::Pooling::Format::NCHW44;
  125. param.pad_h = ph;
  126. param.pad_w = pw;
  127. param.stride_h = param.stride_w = 1;
  128. param.window_h = param.window_w = 3;
  129. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  130. param.stride_h = param.stride_w = 2;
  131. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  132. }
  133. // clang-format on
  134. }
  135. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_W2x2_NCHW44)
  136. {
  137. // clang-format off
  138. for (size_t ih: {2, 5, 10, 17})
  139. for (size_t iw: {2, 6, 8, 16, 26})
  140. for (size_t ph: {0, 1})
  141. for (size_t pw: {0, 1})
  142. for(auto mode: {param::Pooling::Mode::MAX,param::Pooling::Mode::AVERAGE})
  143. if (ih+2*ph >= 2 && iw+2*pw >= 2)
  144. {
  145. UniformIntRNG rng{INT8_MIN >> 1, INT8_MAX >> 1};
  146. Checker<Pooling> checker(handle());
  147. checker.set_dtype(0, dtype::QuantizedS8(1.1f));
  148. checker.set_rng(0,&rng);
  149. param::Pooling param;
  150. param.mode = mode;
  151. param.format = param::Pooling::Format::NCHW44;
  152. param.pad_h = ph;
  153. param.pad_w = pw;
  154. param.stride_h = param.stride_w = 1;
  155. param.window_h = param.window_w = 2;
  156. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  157. param.stride_h = param.stride_w = 2;
  158. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  159. }
  160. // clang-format on
  161. }
  162. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_W4x4_NCHW44)
  163. {
  164. // clang-format off
  165. for (size_t ih: {4, 10, 18, 25, 30})
  166. for (size_t iw: {4, 12, 17, 20, 25})
  167. for (size_t ph: {0, 1, 2})
  168. for (size_t pw: {0, 1, 2})
  169. for(auto mode: {param::Pooling::Mode::MAX,param::Pooling::Mode::AVERAGE})
  170. if (ih+2*ph >= 4 && iw+2*pw >= 4)
  171. {
  172. UniformIntRNG rng{INT8_MIN >> 1, INT8_MAX >> 1};
  173. Checker<Pooling> checker(handle());
  174. checker.set_dtype(0, dtype::QuantizedS8(1.1f));
  175. checker.set_rng(0,&rng);
  176. param::Pooling param;
  177. param.mode = mode;
  178. param.format = param::Pooling::Format::NCHW44;
  179. param.pad_h = ph;
  180. param.pad_w = pw;
  181. param.stride_h = param.stride_w = 1;
  182. param.window_h = param.window_w = 4;
  183. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  184. param.stride_h = param.stride_w = 2;
  185. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  186. }
  187. // clang-format on
  188. }
  189. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_W5x5_NCHW44)
  190. {
  191. // clang-format off
  192. for (size_t ih: {5, 9, 19, 20, 39})
  193. for (size_t iw: {5, 12, 23, 27, 39})
  194. for (size_t ph: {0, 1, 2})
  195. for (size_t pw: {0, 1, 2})
  196. for(auto mode: {param::Pooling::Mode::MAX,param::Pooling::Mode::AVERAGE})
  197. if (ih+2*ph >= 5 && iw+2*pw >= 5)
  198. {
  199. UniformIntRNG rng{INT8_MIN >> 1, INT8_MAX >> 1};
  200. Checker<Pooling> checker(handle());
  201. checker.set_dtype(0, dtype::QuantizedS8(1.1f));
  202. checker.set_rng(0,&rng);
  203. param::Pooling param;
  204. param.mode = mode;
  205. param.format = param::Pooling::Format::NCHW44;
  206. param.pad_h = ph;
  207. param.pad_w = pw;
  208. param.stride_h = param.stride_w = 1;
  209. param.window_h = param.window_w = 5;
  210. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  211. param.stride_h = param.stride_w = 2;
  212. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  213. }
  214. // clang-format on
  215. }
  216. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_INT8_W3x3_S2x2)
  217. {
  218. for (size_t ih: {2, 3, 7, 13, 52, 53, 54, 55})
  219. for (size_t iw: {2, 3, 6, 14, 53, 54, 55, 56})
  220. for (size_t ph: {0, 1, 2})
  221. for (size_t pw: {0, 1, 2})
  222. if (ih+2*ph >= 3 && iw+2*pw >= 3)
  223. {
  224. Checker<Pooling> checker(handle());
  225. checker.set_dtype(0, dtype::Int8());
  226. param::Pooling param;
  227. param.mode = param::Pooling::Mode::MAX;
  228. param.pad_h = ph;
  229. param.pad_w = pw;
  230. param.stride_h = param.stride_w = 2;
  231. param.window_h = param.window_w = 3;
  232. checker.set_param(param).exec(TensorShapeArray{
  233. {2, 3, ih, iw}, {}});
  234. }
  235. }
  236. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_INT8_W2x2_S2x2)
  237. {
  238. for (size_t ih: {2, 3, 7, 13, 52, 53, 54, 55})
  239. for (size_t iw: {2, 3, 6, 14, 53, 54, 55, 56})
  240. for (size_t ph: {0, 1})
  241. for (size_t pw: {0, 1})
  242. if (ih+2*ph >= 3 && iw+2*pw >= 3)
  243. {
  244. Checker<Pooling> checker(handle());
  245. checker.set_dtype(0, dtype::Int8());
  246. param::Pooling param;
  247. param.mode = param::Pooling::Mode::MAX;
  248. param.pad_h = ph;
  249. param.pad_w = pw;
  250. param.stride_h = param.stride_w = 2;
  251. param.window_h = param.window_w = 2;
  252. checker.set_param(param).exec(TensorShapeArray{
  253. {2, 3, ih, iw}, {}});
  254. }
  255. }
  256. #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  257. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_FP16) {
  258. Checker<Pooling> checker(handle());
  259. checker.set_dtype(0, dtype::Float16{})
  260. .set_dtype(1, dtype::Float16{})
  261. .set_epsilon(3e-3);
  262. using Param = param::Pooling;
  263. for (size_t ih : {2, 3, 5, 7, 11, 13, 17, 19, 23})
  264. for (size_t iw : {2, 3, 5, 7, 11, 13, 17, 19, 23})
  265. for (auto mode : {Param::Mode::AVERAGE, Param::Mode::MAX}) {
  266. for (size_t window : {2, 3}) {
  267. Param param;
  268. param.mode = mode;
  269. param.window_h = param.window_w = window;
  270. param.stride_h = param.stride_w = 1;
  271. param.pad_h = param.pad_w = window / 2;
  272. //! test for SH == 1 && SW == 1 && FH == FW (FH == 2 || FH
  273. //! == 3)
  274. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  275. //! test for SH = SW = 2 && FH = FW = 2
  276. param.stride_h = param.stride_w = 2;
  277. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  278. }
  279. }
  280. //! test for SH == 2 && SW == 2 && FH == FW == 3 max pooling
  281. for (size_t ih : {2, 3, 7, 13, 52, 53, 54, 55})
  282. for (size_t iw : {2, 3, 6, 14, 53, 54, 55, 56})
  283. for (size_t ph : {0, 1, 2})
  284. for (size_t pw : {0, 1, 2})
  285. if (ih + 2 * ph >= 3 && iw + 2 * pw >= 3) {
  286. param::Pooling param;
  287. param.mode = param::Pooling::Mode::MAX;
  288. param.pad_h = ph;
  289. param.pad_w = pw;
  290. param.stride_h = param.stride_w = 2;
  291. param.window_h = param.window_w = 3;
  292. checker.set_param(param).exec(
  293. TensorShapeArray{{2, 3, ih, iw}, {}});
  294. }
  295. //! test for SH == 2 && SW == 2 && FH = FW = 4 max pooling
  296. for (size_t ih :
  297. {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  298. for (size_t iw :
  299. {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  300. for (size_t p : {1, 2}) {
  301. Param param;
  302. param.mode = Param::Mode::MAX;
  303. param.window_h = param.window_w = 4;
  304. param.stride_h = param.stride_w = 2;
  305. param.pad_h = param.pad_w = p;
  306. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  307. }
  308. //! test for SH == 2 && SW == 2 && FH = FW = 5 max pooling
  309. for (size_t ih :
  310. {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  311. for (size_t iw :
  312. {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  313. for (size_t p : {1, 2}) {
  314. Param param;
  315. param.mode = Param::Mode::MAX;
  316. param.window_h = param.window_w = 5;
  317. param.stride_h = param.stride_w = 2;
  318. param.pad_h = param.pad_w = p;
  319. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  320. }
  321. }
  322. #endif
  323. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_QUANTIZED) {
  324. Checker<Pooling> checker(handle());
  325. UniformIntRNG rng1{INT8_MIN >> 1, INT8_MAX >> 1};
  326. UniformIntRNG rng2{0, UINT8_MAX >> 1};
  327. using Param = param::Pooling;
  328. for (auto type : std::vector<DType>{
  329. dtype::QuantizedS8(1.1f),
  330. dtype::Quantized8Asymm(1.1f, static_cast<uint8_t>(3))}) {
  331. if (type.enumv() == DTypeEnum::QuantizedS8) {
  332. checker.set_rng(0, &rng1);
  333. } else {
  334. megdnn_assert(type.enumv() == DTypeEnum::Quantized8Asymm);
  335. checker.set_rng(0, &rng2);
  336. }
  337. for (size_t ih : {2, 3, 5, 7, 11, 13, 17, 19, 23, 33, 49})
  338. for (size_t iw : {2, 3, 5, 7, 11, 13, 17, 19, 23, 33, 49})
  339. for (auto mode : {Param::Mode::AVERAGE, Param::Mode::MAX}) {
  340. for (size_t window : {2, 3}) {
  341. Param param;
  342. param.mode = mode;
  343. param.window_h = param.window_w = window;
  344. param.stride_h = param.stride_w = 1;
  345. param.pad_h = param.pad_w = window / 2;
  346. //! test for SH == 1 && SW == 1 && FH == FW (FH == 2 ||
  347. //! FH
  348. //! == 3)
  349. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  350. //! test for SH = SW = 2 && FH = FW = 2
  351. param.stride_h = param.stride_w = 2;
  352. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  353. }
  354. }
  355. //! test for SH == 2 && SW == 2 && FH == FW == 3 max pooling
  356. for (size_t ih : {2, 3, 7, 13, 52, 53, 54, 55})
  357. for (size_t iw : {2, 3, 6, 14, 53, 54, 55, 56})
  358. for (size_t ph : {0, 1, 2})
  359. for (size_t pw : {0, 1, 2})
  360. if (ih + 2 * ph >= 3 && iw + 2 * pw >= 3) {
  361. param::Pooling param;
  362. param.mode = param::Pooling::Mode::MAX;
  363. param.pad_h = ph;
  364. param.pad_w = pw;
  365. param.window_h = param.window_w = 3;
  366. param.stride_h = param.stride_w = 2;
  367. checker.set_param(param).exec(
  368. TensorShapeArray{{2, 3, ih, iw}, {}});
  369. }
  370. //! test for SH == 2 && SW == 2 && FH == FW == 4 max pooling
  371. for (size_t ih :
  372. {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  373. for (size_t iw :
  374. {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  375. for (size_t p : {1, 2}) {
  376. Param param;
  377. param.mode = Param::Mode::MAX;
  378. param.window_h = param.window_w = 4;
  379. param.stride_h = param.stride_w = 2;
  380. param.pad_h = param.pad_w = p;
  381. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  382. }
  383. //! test for SH == 2 && SW == 2 && FH == FW == 5 max pooling
  384. for (size_t ih :
  385. {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  386. for (size_t iw :
  387. {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  388. for (size_t p : {1, 2}) {
  389. Param param;
  390. param.mode = Param::Mode::MAX;
  391. param.window_h = param.window_w = 5;
  392. param.stride_h = param.stride_w = 2;
  393. param.pad_h = param.pad_w = p;
  394. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  395. }
  396. }
  397. }
  398. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_FALLBACK) {
  399. using Param = param::Pooling;
  400. for (size_t ih: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  401. for (size_t iw: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  402. for (size_t p: {1, 2})
  403. {
  404. Param param;
  405. param.mode = Param::Mode::MAX;
  406. param.window_h = param.window_w = 3;
  407. param.stride_h = param.stride_w = 2;
  408. param.pad_h = param.pad_w = p;
  409. Checker<Pooling> checker(handle());
  410. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  411. }
  412. }
  413. #if MEGDNN_WITH_BENCHMARK
  414. namespace {
  415. template <typename Opr>
  416. void benchmark_impl(const typename Opr::Param& param,
  417. std::vector<SmallVector<TensorShape>> shapes, size_t RUNS,
  418. TaskExecutorConfig&& multi_thread_config,
  419. TaskExecutorConfig&& single_thread_config,
  420. DType data_type) {
  421. std::vector<float> multi_thread_times, single_thread_times;
  422. {
  423. auto multi_thread_hanle =
  424. create_cpu_handle(0, true, &multi_thread_config);
  425. auto benchmarker = Benchmarker<Opr>(multi_thread_hanle.get());
  426. benchmarker.set_times(RUNS).set_display(false).set_param(param);
  427. benchmarker.set_dtype(0, data_type);
  428. for (auto shape : shapes) {
  429. multi_thread_times.push_back(benchmarker.exec(shape) / RUNS);
  430. }
  431. }
  432. {
  433. auto single_thread_handle =
  434. create_cpu_handle(0, true, &single_thread_config);
  435. auto benchmarker = Benchmarker<Opr>(single_thread_handle.get());
  436. benchmarker.set_times(RUNS).set_display(false).set_param(param);
  437. benchmarker.set_dtype(0, data_type);
  438. for (auto shape : shapes) {
  439. single_thread_times.push_back(benchmarker.exec(shape) / RUNS);
  440. }
  441. }
  442. printf("Benchmark : Multi threads %zu, ", multi_thread_config.nr_thread);
  443. printf("core_ids:");
  444. for (size_t i = 0; i < multi_thread_config.affinity_core_set.size(); i++) {
  445. printf("%zu ", multi_thread_config.affinity_core_set[i]);
  446. }
  447. printf(", Single thread core_id %zu\n",
  448. single_thread_config.affinity_core_set[0]);
  449. for (size_t i = 0; i < shapes.size(); i++) {
  450. auto shape = shapes[i];
  451. printf("Case: ");
  452. for (auto sh : shape)
  453. printf("%s ", sh.to_string().c_str());
  454. printf("%zu threads time: %f,\n single thread time: "
  455. "%f. spead up = %f, speedup/cores=%f\n",
  456. multi_thread_config.nr_thread, multi_thread_times[i],
  457. single_thread_times[i],
  458. single_thread_times[i] / multi_thread_times[i],
  459. single_thread_times[i] / multi_thread_times[i] /
  460. multi_thread_config.nr_thread);
  461. }
  462. }
  463. } // namespace
  464. TEST_F(ARM_COMMON_BENCHMARK_MULTI_THREADS, BENCHMARK_POOLING) {
  465. constexpr size_t RUNS = 50;
  466. using Param = param::Pooling;
  467. Param param;
  468. param.window_h = param.window_w = 3;
  469. param.stride_h = param.stride_w = 2;
  470. param.pad_h = param.pad_w = 1;
  471. std::vector<SmallVector<TensorShape>> shapes;
  472. shapes.push_back({{32, 32, 215, 215}, {}});
  473. shapes.push_back({{32, 32, 128, 128}, {}});
  474. shapes.push_back({{8, 256, 100, 100}, {}});
  475. shapes.push_back({{1, 256, 100, 100}, {}});
  476. shapes.push_back({{1, 32, 100, 100}, {}});
  477. shapes.push_back({{1, 256, 80, 80}, {}});
  478. shapes.push_back({{1, 256, 60, 60}, {}});
  479. shapes.push_back({{1, 256, 30, 30}, {}});
  480. param.window_h = param.window_w = 3;
  481. param.stride_h = param.stride_w = 2;
  482. param.pad_h = param.pad_w = 1;
  483. printf("Benchmark POOLING kernel:%d*%d stride:%d,mode %d\n", param.window_h,
  484. param.window_w, param.stride_h, static_cast<int>(param.mode));
  485. benchmark_impl<Pooling>(param, shapes, RUNS, {4, {0, 1, 2, 3}}, {1, {0}}, dtype::Float32());
  486. benchmark_impl<Pooling>(param, shapes, RUNS, {4, {4, 5, 6, 7}}, {1, {4}}, dtype::Float32());
  487. benchmark_impl<Pooling>(param, shapes, RUNS, {2, {0, 1}}, {1, {0}}, dtype::Float32());
  488. }
  489. TEST_F(ARM_COMMON_BENCHMARK_MULTI_THREADS, BENCHMARK_POOLING_NCHW44) {
  490. constexpr size_t RUNS = 50;
  491. using Param = param::Pooling;
  492. Param param;
  493. param.pad_h = param.pad_w = 0;
  494. param.mode = Param::Mode::MAX;
  495. std::vector<SmallVector<TensorShape>> shapes;
  496. std::vector<std::vector<size_t>> filter_and_stride = {
  497. {2, 1}, {2, 2}, {3, 1}, {3, 2}, {4, 1}, {4, 2}, {5, 1}, {5, 2}};
  498. for (auto mode :
  499. {param::Pooling::Mode::MAX, param::Pooling::Mode::AVERAGE}) {
  500. for (auto filter : filter_and_stride) {
  501. shapes.push_back({{1, 32 * 4, 215, 215}, {}});
  502. shapes.push_back({{1, 32 * 4, 128, 128}, {}});
  503. shapes.push_back({{1, 16 * 4, 56, 56}, {}});
  504. param.mode = mode;
  505. param.window_h = param.window_w = filter[0];
  506. param.stride_h = param.stride_w = filter[1];
  507. param.format = Param::Format::NCHW;
  508. printf("NCHW Benchmark POOLING kernel:%d*%d stride:%d,mode %d\n",
  509. param.window_h, param.window_h, param.stride_h,
  510. static_cast<int>(param.mode));
  511. benchmark_impl<Pooling>(param, shapes, RUNS, {4, {4, 5, 6, 7}},
  512. {1, {4}}, dtype::QuantizedS8(1.1f));
  513. shapes.clear();
  514. shapes.push_back({{1, 32, 215, 215, 4}, {}});
  515. shapes.push_back({{1, 32, 128, 128, 4}, {}});
  516. shapes.push_back({{1, 16, 56, 56, 4}, {}});
  517. param.format = Param::Format::NCHW44;
  518. printf("NCHW44 Benchmark POOLING kernel:%d*%d stride:%d,mode %d\n",
  519. param.window_h, param.window_w, param.stride_h,
  520. static_cast<int>(param.mode));
  521. benchmark_impl<Pooling>(param, shapes, RUNS, {4, {4, 5, 6, 7}},
  522. {1, {4}}, dtype::QuantizedS8(1.1f));
  523. shapes.clear();
  524. }
  525. }
  526. }
  527. #endif
  528. } // namespace test
  529. } // namespace megdnn
  530. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台