You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

pooling_multi_thread.cpp 24 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610
  1. /**
  2. * \file dnn/test/arm_common/pooling_multi_thread.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include <vector>
  12. #include "megdnn/dtype.h"
  13. #include "megdnn/opr_param_defs.h"
  14. #include "test/arm_common/fixture.h"
  15. #include "test/common/pooling.h"
  16. #include "test/common/checker.h"
  17. #include "test/common/benchmarker.h"
  18. #include "test/common/rng.h"
  19. namespace megdnn {
  20. namespace test {
  21. /*********************** mutli threads *********************************/
  22. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING) {
  23. using Param = param::Pooling;
  24. for (size_t ih: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  25. for (size_t iw: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  26. for (size_t p: {1, 2})
  27. {
  28. Param param;
  29. param.mode = Param::Mode::MAX;
  30. param.window_h = param.window_w = 3;
  31. param.stride_h = param.stride_w = 2;
  32. param.pad_h = param.pad_w = p;
  33. Checker<Pooling> checker(handle());
  34. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  35. param.mode = Param::Mode::AVERAGE;
  36. param.window_h = param.window_w = 3;
  37. param.stride_h = param.stride_w = 2;
  38. param.pad_h = param.pad_w = p;
  39. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  40. param.mode = Param::Mode::MAX;
  41. param.window_h = param.window_w = 4;
  42. param.stride_h = param.stride_w = 2;
  43. param.pad_h = param.pad_w = p;
  44. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  45. param.mode = Param::Mode::MAX;
  46. param.window_h = param.window_w = 5;
  47. param.stride_h = param.stride_w = 2;
  48. param.pad_h = param.pad_w = p;
  49. if (ih + p * 2 >= 5 && iw + p * 2 >= 5)
  50. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  51. }
  52. }
  53. std::vector<std::pair<param::Pooling, TensorShapeArray>> get_nchw44_pool_args(
  54. size_t filter, size_t stride) {
  55. constexpr size_t ic_step = 4;
  56. std::vector<std::pair<param::Pooling, TensorShapeArray>> args;
  57. for (size_t n : {1, 2})
  58. for (size_t c : {4, 8})
  59. for (size_t ih : {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13})
  60. for (size_t iw : {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13})
  61. for (size_t ph : {0, 1, 2})
  62. for (size_t pw : {0, 1, 2})
  63. for (auto mode : {param::Pooling::Mode::MAX,
  64. param::Pooling::Mode::AVERAGE})
  65. if (ih + 2 * ph >= filter &&
  66. iw + 2 * pw >= filter && filter > ph &&
  67. filter > pw) {
  68. param::Pooling param;
  69. param.mode = mode;
  70. param.format =
  71. param::Pooling::Format::NCHW44;
  72. param.pad_h = ph;
  73. param.pad_w = pw;
  74. param.stride_h = param.stride_w = stride;
  75. param.window_h = param.window_w = filter;
  76. args.emplace_back(std::make_pair(
  77. param,
  78. TensorShapeArray{{n, c / ic_step,
  79. ih, iw, ic_step},
  80. {}}));
  81. }
  82. return args;
  83. }
  84. void run_pooling_check(
  85. Handle* handle,
  86. std::vector<std::pair<param::Pooling, TensorShapeArray>> args,
  87. bool is_int8) {
  88. Checker<Pooling> checker(handle);
  89. UniformIntRNG rng_int8{INT8_MIN >> 1, INT8_MAX >> 1};
  90. UniformIntRNG rng_fp32{-10, 10};
  91. if (is_int8) {
  92. checker.set_dtype(0, dtype::QuantizedS8(1.1f));
  93. checker.set_rng(0, &rng_int8);
  94. } else {
  95. checker.set_rng(0, &rng_fp32);
  96. }
  97. for (auto arg : args) {
  98. checker.set_param(arg.first).exec(arg.second);
  99. }
  100. }
  101. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_NCHW44_FP32) {
  102. for (auto filter : {2, 3, 4, 5})
  103. for (auto stride : {1, 2}) {
  104. run_pooling_check(handle(), get_nchw44_pool_args(filter, stride),
  105. false);
  106. }
  107. }
  108. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_W9_w13_NCHW44)
  109. {
  110. UniformIntRNG rng{-10, 10};
  111. Checker<Pooling> checker(handle());
  112. checker.set_rng(0, &rng);
  113. // clang-format off
  114. for (size_t ih: {20, 15})
  115. for (size_t iw: {15, 20})
  116. for (size_t kernel: {9, 13})
  117. for (size_t pad: {4, 6})
  118. for(auto mode: {param::Pooling::Mode::MAX, param::Pooling::Mode::AVERAGE})
  119. if (kernel > pad)
  120. {
  121. param::Pooling param;
  122. param.mode = mode;
  123. param.format = param::Pooling::Format::NCHW44;
  124. param.pad_h = pad;
  125. param.pad_w = pad;
  126. param.stride_h = param.stride_w = 1;
  127. param.window_h = param.window_w = kernel ;
  128. checker.set_param(param).exec(TensorShapeArray{{2, 8, ih, iw, 4}, {}});
  129. }
  130. // clang-format on
  131. }
  132. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_W3x3_NCHW44)
  133. {
  134. UniformIntRNG rng{INT8_MIN >> 1, INT8_MAX >> 1};
  135. Checker<Pooling> checker(handle());
  136. checker.set_rng(0, &rng);
  137. // clang-format off
  138. for (size_t ih: {3, 5, 10})
  139. for (size_t iw: {3, 5, 7, 9, 15, 20})
  140. for (size_t ph: {0, 1, 2})
  141. for (size_t pw: {0, 1, 2})
  142. for(auto mode: {param::Pooling::Mode::MAX, param::Pooling::Mode::AVERAGE})
  143. for(auto data_type: SmallVector<DType>{dtype::QuantizedS8(1.1f), dtype::Int8()})
  144. if (ih+2*ph >= 3 && iw+2*pw >= 3)
  145. {
  146. checker.set_dtype(0, data_type);
  147. param::Pooling param;
  148. param.mode = mode;
  149. param.format = param::Pooling::Format::NCHW44;
  150. param.pad_h = ph;
  151. param.pad_w = pw;
  152. param.stride_h = param.stride_w = 1;
  153. param.window_h = param.window_w = 3;
  154. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  155. param.stride_h = param.stride_w = 2;
  156. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  157. }
  158. // clang-format on
  159. }
  160. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_W2x2_NCHW44)
  161. {
  162. UniformIntRNG rng{INT8_MIN >> 1, INT8_MAX >> 1};
  163. Checker<Pooling> checker(handle());
  164. checker.set_rng(0, &rng);
  165. // clang-format off
  166. for (size_t ih: {2, 5, 10, 17})
  167. for (size_t iw: {2, 6, 8, 16, 26})
  168. for (size_t ph: {0, 1})
  169. for (size_t pw: {0, 1})
  170. for(auto mode: {param::Pooling::Mode::MAX, param::Pooling::Mode::AVERAGE})
  171. for(auto data_type: SmallVector<DType>{dtype::QuantizedS8(1.1f), dtype::Int8()})
  172. if (ih+2*ph >= 2 && iw+2*pw >= 2)
  173. {
  174. checker.set_dtype(0, data_type);
  175. checker.set_dtype(1, data_type);
  176. param::Pooling param;
  177. param.mode = mode;
  178. param.format = param::Pooling::Format::NCHW44;
  179. param.pad_h = ph;
  180. param.pad_w = pw;
  181. param.stride_h = param.stride_w = 1;
  182. param.window_h = param.window_w = 2;
  183. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  184. param.stride_h = param.stride_w = 2;
  185. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  186. }
  187. // clang-format on
  188. }
  189. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_W4x4_NCHW44)
  190. {
  191. UniformIntRNG rng{INT8_MIN >> 1, INT8_MAX >> 1};
  192. Checker<Pooling> checker(handle());
  193. checker.set_rng(0, &rng);
  194. // clang-format off
  195. for (size_t ih: {4, 10, 18, 25, 30})
  196. for (size_t iw: {4, 12, 17, 20, 25})
  197. for (size_t ph: {0, 1, 2})
  198. for (size_t pw: {0, 1, 2})
  199. for(auto data_type: SmallVector<DType>{dtype::QuantizedS8(1.1f), dtype::Int8()})
  200. for(auto mode: {param::Pooling::Mode::MAX,param::Pooling::Mode::AVERAGE})
  201. if (ih+2*ph >= 4 && iw+2*pw >= 4)
  202. {
  203. checker.set_dtype(0, data_type);
  204. param::Pooling param;
  205. param.mode = mode;
  206. param.format = param::Pooling::Format::NCHW44;
  207. param.pad_h = ph;
  208. param.pad_w = pw;
  209. param.stride_h = param.stride_w = 1;
  210. param.window_h = param.window_w = 4;
  211. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  212. param.stride_h = param.stride_w = 2;
  213. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  214. }
  215. // clang-format on
  216. }
  217. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_W5x5_NCHW44)
  218. {
  219. UniformIntRNG rng{INT8_MIN >> 1, INT8_MAX >> 1};
  220. Checker<Pooling> checker(handle());
  221. checker.set_rng(0, &rng);
  222. // clang-format off
  223. for (size_t ih: {5, 9, 19, 20, 39})
  224. for (size_t iw: {5, 12, 23, 27, 39})
  225. for (size_t ph: {0, 1, 2})
  226. for (size_t pw: {0, 1, 2})
  227. for(auto data_type: SmallVector<DType>{dtype::QuantizedS8(1.1f), dtype::Int8()})
  228. for(auto mode: {param::Pooling::Mode::MAX,param::Pooling::Mode::AVERAGE})
  229. if (ih+2*ph >= 5 && iw+2*pw >= 5)
  230. {
  231. checker.set_dtype(0, data_type);
  232. param::Pooling param;
  233. param.mode = mode;
  234. param.format = param::Pooling::Format::NCHW44;
  235. param.pad_h = ph;
  236. param.pad_w = pw;
  237. param.stride_h = param.stride_w = 1;
  238. param.window_h = param.window_w = 5;
  239. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  240. param.stride_h = param.stride_w = 2;
  241. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  242. }
  243. // clang-format on
  244. }
  245. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_INT8_W3x3_S2x2)
  246. {
  247. for (size_t ih: {2, 3, 7, 13, 52, 53, 54, 55})
  248. for (size_t iw: {2, 3, 6, 14, 53, 54, 55, 56})
  249. for (size_t ph: {0, 1, 2})
  250. for (size_t pw: {0, 1, 2})
  251. if (ih+2*ph >= 3 && iw+2*pw >= 3)
  252. {
  253. Checker<Pooling> checker(handle());
  254. checker.set_dtype(0, dtype::Int8());
  255. param::Pooling param;
  256. param.mode = param::Pooling::Mode::MAX;
  257. param.pad_h = ph;
  258. param.pad_w = pw;
  259. param.stride_h = param.stride_w = 2;
  260. param.window_h = param.window_w = 3;
  261. checker.set_param(param).exec(TensorShapeArray{
  262. {2, 3, ih, iw}, {}});
  263. }
  264. }
  265. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_INT8_W2x2_S2x2)
  266. {
  267. for (size_t ih: {2, 3, 7, 13, 52, 53, 54, 55})
  268. for (size_t iw: {2, 3, 6, 14, 53, 54, 55, 56})
  269. for (size_t ph: {0, 1})
  270. for (size_t pw: {0, 1})
  271. if (ih+2*ph >= 3 && iw+2*pw >= 3)
  272. {
  273. Checker<Pooling> checker(handle());
  274. checker.set_dtype(0, dtype::Int8());
  275. param::Pooling param;
  276. param.mode = param::Pooling::Mode::MAX;
  277. param.pad_h = ph;
  278. param.pad_w = pw;
  279. param.stride_h = param.stride_w = 2;
  280. param.window_h = param.window_w = 2;
  281. checker.set_param(param).exec(TensorShapeArray{
  282. {2, 3, ih, iw}, {}});
  283. }
  284. }
  285. #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  286. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_FP16) {
  287. Checker<Pooling> checker(handle());
  288. checker.set_dtype(0, dtype::Float16{})
  289. .set_dtype(1, dtype::Float16{})
  290. .set_epsilon(3e-3);
  291. using Param = param::Pooling;
  292. for (size_t ih : {2, 3, 5, 7, 11, 13, 17, 19, 23})
  293. for (size_t iw : {2, 3, 5, 7, 11, 13, 17, 19, 23})
  294. for (auto mode : {Param::Mode::AVERAGE, Param::Mode::MAX}) {
  295. for (size_t window : {2, 3}) {
  296. Param param;
  297. param.mode = mode;
  298. param.window_h = param.window_w = window;
  299. param.stride_h = param.stride_w = 1;
  300. param.pad_h = param.pad_w = window / 2;
  301. //! test for SH == 1 && SW == 1 && FH == FW (FH == 2 || FH
  302. //! == 3)
  303. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  304. //! test for SH = SW = 2 && FH = FW = 2
  305. param.stride_h = param.stride_w = 2;
  306. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  307. }
  308. }
  309. //! test for SH == 2 && SW == 2 && FH == FW == 3 max pooling
  310. for (size_t ih : {2, 3, 7, 13, 52, 53, 54, 55})
  311. for (size_t iw : {2, 3, 6, 14, 53, 54, 55, 56})
  312. for (size_t ph : {0, 1, 2})
  313. for (size_t pw : {0, 1, 2})
  314. if (ih + 2 * ph >= 3 && iw + 2 * pw >= 3) {
  315. param::Pooling param;
  316. param.mode = param::Pooling::Mode::MAX;
  317. param.pad_h = ph;
  318. param.pad_w = pw;
  319. param.stride_h = param.stride_w = 2;
  320. param.window_h = param.window_w = 3;
  321. checker.set_param(param).exec(
  322. TensorShapeArray{{2, 3, ih, iw}, {}});
  323. }
  324. //! test for SH == 2 && SW == 2 && FH = FW = 4 max pooling
  325. for (size_t ih :
  326. {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  327. for (size_t iw :
  328. {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  329. for (size_t p : {1, 2}) {
  330. Param param;
  331. param.mode = Param::Mode::MAX;
  332. param.window_h = param.window_w = 4;
  333. param.stride_h = param.stride_w = 2;
  334. param.pad_h = param.pad_w = p;
  335. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  336. }
  337. //! test for SH == 2 && SW == 2 && FH = FW = 5 max pooling
  338. for (size_t ih :
  339. {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  340. for (size_t iw :
  341. {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  342. for (size_t p : {1, 2}) {
  343. Param param;
  344. param.mode = Param::Mode::MAX;
  345. param.window_h = param.window_w = 5;
  346. param.stride_h = param.stride_w = 2;
  347. param.pad_h = param.pad_w = p;
  348. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  349. }
  350. }
  351. #endif
  352. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_QUANTIZED) {
  353. Checker<Pooling> checker(handle());
  354. UniformIntRNG rng1{INT8_MIN >> 1, INT8_MAX >> 1};
  355. UniformIntRNG rng2{0, UINT8_MAX >> 1};
  356. using Param = param::Pooling;
  357. for (auto type : std::vector<DType>{
  358. dtype::QuantizedS8(1.1f),
  359. dtype::Quantized8Asymm(1.1f, static_cast<uint8_t>(3))}) {
  360. if (type.enumv() == DTypeEnum::QuantizedS8) {
  361. checker.set_rng(0, &rng1);
  362. } else {
  363. megdnn_assert(type.enumv() == DTypeEnum::Quantized8Asymm);
  364. checker.set_rng(0, &rng2);
  365. }
  366. for (size_t ih : {2, 3, 5, 7, 11, 13, 17, 19, 23, 33, 49})
  367. for (size_t iw : {2, 3, 5, 7, 11, 13, 17, 19, 23, 33, 49})
  368. for (auto mode : {Param::Mode::AVERAGE, Param::Mode::MAX}) {
  369. for (size_t window : {2, 3}) {
  370. Param param;
  371. param.mode = mode;
  372. param.window_h = param.window_w = window;
  373. param.stride_h = param.stride_w = 1;
  374. param.pad_h = param.pad_w = window / 2;
  375. //! test for SH == 1 && SW == 1 && FH == FW (FH == 2 ||
  376. //! FH
  377. //! == 3)
  378. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  379. //! test for SH = SW = 2 && FH = FW = 2
  380. param.stride_h = param.stride_w = 2;
  381. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  382. }
  383. }
  384. //! test for SH == 2 && SW == 2 && FH == FW == 3 max pooling
  385. for (size_t ih : {2, 3, 7, 13, 52, 53, 54, 55})
  386. for (size_t iw : {2, 3, 6, 14, 53, 54, 55, 56})
  387. for (size_t ph : {0, 1, 2})
  388. for (size_t pw : {0, 1, 2})
  389. if (ih + 2 * ph >= 3 && iw + 2 * pw >= 3) {
  390. param::Pooling param;
  391. param.mode = param::Pooling::Mode::MAX;
  392. param.pad_h = ph;
  393. param.pad_w = pw;
  394. param.window_h = param.window_w = 3;
  395. param.stride_h = param.stride_w = 2;
  396. checker.set_param(param).exec(
  397. TensorShapeArray{{2, 3, ih, iw}, {}});
  398. }
  399. //! test for SH == 2 && SW == 2 && FH == FW == 4 max pooling
  400. for (size_t ih :
  401. {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  402. for (size_t iw :
  403. {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  404. for (size_t p : {1, 2}) {
  405. Param param;
  406. param.mode = Param::Mode::MAX;
  407. param.window_h = param.window_w = 4;
  408. param.stride_h = param.stride_w = 2;
  409. param.pad_h = param.pad_w = p;
  410. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  411. }
  412. //! test for SH == 2 && SW == 2 && FH == FW == 5 max pooling
  413. for (size_t ih :
  414. {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  415. for (size_t iw :
  416. {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  417. for (size_t p : {1, 2}) {
  418. Param param;
  419. param.mode = Param::Mode::MAX;
  420. param.window_h = param.window_w = 5;
  421. param.stride_h = param.stride_w = 2;
  422. param.pad_h = param.pad_w = p;
  423. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  424. }
  425. }
  426. }
  427. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_FALLBACK) {
  428. using Param = param::Pooling;
  429. for (size_t ih: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  430. for (size_t iw: {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  431. for (size_t p: {1, 2})
  432. {
  433. Param param;
  434. param.mode = Param::Mode::MAX;
  435. param.window_h = param.window_w = 3;
  436. param.stride_h = param.stride_w = 2;
  437. param.pad_h = param.pad_w = p;
  438. Checker<Pooling> checker(handle());
  439. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  440. }
  441. }
  442. #if MEGDNN_WITH_BENCHMARK
  443. namespace {
  444. template <typename Opr>
  445. void benchmark_impl(const typename Opr::Param& param,
  446. std::vector<SmallVector<TensorShape>> shapes, size_t RUNS,
  447. TaskExecutorConfig&& multi_thread_config,
  448. TaskExecutorConfig&& single_thread_config,
  449. DType data_type) {
  450. std::vector<float> multi_thread_times, single_thread_times;
  451. {
  452. auto multi_thread_hanle =
  453. create_cpu_handle(0, true, &multi_thread_config);
  454. auto benchmarker = Benchmarker<Opr>(multi_thread_hanle.get());
  455. benchmarker.set_times(RUNS).set_display(false).set_param(param);
  456. benchmarker.set_dtype(0, data_type);
  457. for (auto shape : shapes) {
  458. multi_thread_times.push_back(benchmarker.exec(shape) / RUNS);
  459. }
  460. }
  461. {
  462. auto single_thread_handle =
  463. create_cpu_handle(0, true, &single_thread_config);
  464. auto benchmarker = Benchmarker<Opr>(single_thread_handle.get());
  465. benchmarker.set_times(RUNS).set_display(false).set_param(param);
  466. benchmarker.set_dtype(0, data_type);
  467. for (auto shape : shapes) {
  468. single_thread_times.push_back(benchmarker.exec(shape) / RUNS);
  469. }
  470. }
  471. printf("Benchmark : Multi threads %zu, ", multi_thread_config.nr_thread);
  472. printf("core_ids:");
  473. for (size_t i = 0; i < multi_thread_config.affinity_core_set.size(); i++) {
  474. printf("%zu ", multi_thread_config.affinity_core_set[i]);
  475. }
  476. printf(", Single thread core_id %zu\n",
  477. single_thread_config.affinity_core_set[0]);
  478. for (size_t i = 0; i < shapes.size(); i++) {
  479. auto shape = shapes[i];
  480. printf("Case: ");
  481. for (auto sh : shape)
  482. printf("%s ", sh.to_string().c_str());
  483. printf("%zu threads time: %f,\n single thread time: "
  484. "%f. spead up = %f, speedup/cores=%f\n",
  485. multi_thread_config.nr_thread, multi_thread_times[i],
  486. single_thread_times[i],
  487. single_thread_times[i] / multi_thread_times[i],
  488. single_thread_times[i] / multi_thread_times[i] /
  489. multi_thread_config.nr_thread);
  490. }
  491. }
  492. } // namespace
  493. TEST_F(ARM_COMMON_BENCHMARK_MULTI_THREADS, BENCHMARK_POOLING) {
  494. constexpr size_t RUNS = 50;
  495. using Param = param::Pooling;
  496. Param param;
  497. param.window_h = param.window_w = 3;
  498. param.stride_h = param.stride_w = 2;
  499. param.pad_h = param.pad_w = 1;
  500. std::vector<SmallVector<TensorShape>> shapes;
  501. shapes.push_back({{32, 32, 215, 215}, {}});
  502. shapes.push_back({{32, 32, 128, 128}, {}});
  503. shapes.push_back({{8, 256, 100, 100}, {}});
  504. shapes.push_back({{1, 256, 100, 100}, {}});
  505. shapes.push_back({{1, 32, 100, 100}, {}});
  506. shapes.push_back({{1, 256, 80, 80}, {}});
  507. shapes.push_back({{1, 256, 60, 60}, {}});
  508. shapes.push_back({{1, 256, 30, 30}, {}});
  509. param.window_h = param.window_w = 3;
  510. param.stride_h = param.stride_w = 2;
  511. param.pad_h = param.pad_w = 1;
  512. printf("Benchmark POOLING kernel:%d*%d stride:%d,mode %d\n", param.window_h,
  513. param.window_w, param.stride_h, static_cast<int>(param.mode));
  514. benchmark_impl<Pooling>(param, shapes, RUNS, {4, {0, 1, 2, 3}}, {1, {0}}, dtype::Float32());
  515. benchmark_impl<Pooling>(param, shapes, RUNS, {4, {4, 5, 6, 7}}, {1, {4}}, dtype::Float32());
  516. benchmark_impl<Pooling>(param, shapes, RUNS, {2, {0, 1}}, {1, {0}}, dtype::Float32());
  517. }
  518. TEST_F(ARM_COMMON_BENCHMARK_MULTI_THREADS, BENCHMARK_POOLING_NCHW44) {
  519. constexpr size_t RUNS = 50;
  520. using Param = param::Pooling;
  521. Param param;
  522. param.pad_h = param.pad_w = 0;
  523. param.mode = Param::Mode::MAX;
  524. std::vector<SmallVector<TensorShape>> shapes;
  525. std::vector<std::vector<size_t>> filter_and_stride = {
  526. {2, 1}, {2, 2}, {3, 1}, {3, 2}, {4, 1}, {4, 2}, {5, 1}, {5, 2}};
  527. for (auto mode :
  528. {param::Pooling::Mode::MAX, param::Pooling::Mode::AVERAGE}) {
  529. for (auto filter : filter_and_stride) {
  530. shapes.push_back({{1, 32 * 4, 215, 215}, {}});
  531. shapes.push_back({{1, 32 * 4, 128, 128}, {}});
  532. shapes.push_back({{1, 16 * 4, 56, 56}, {}});
  533. param.mode = mode;
  534. param.window_h = param.window_w = filter[0];
  535. param.stride_h = param.stride_w = filter[1];
  536. param.format = Param::Format::NCHW;
  537. printf("NCHW Benchmark POOLING kernel:%d*%d stride:%d,mode %d\n",
  538. param.window_h, param.window_h, param.stride_h,
  539. static_cast<int>(param.mode));
  540. benchmark_impl<Pooling>(param, shapes, RUNS, {4, {4, 5, 6, 7}},
  541. {1, {4}}, dtype::QuantizedS8(1.1f));
  542. shapes.clear();
  543. shapes.push_back({{1, 32, 215, 215, 4}, {}});
  544. shapes.push_back({{1, 32, 128, 128, 4}, {}});
  545. shapes.push_back({{1, 16, 56, 56, 4}, {}});
  546. param.format = Param::Format::NCHW44;
  547. printf("NCHW44 Benchmark POOLING kernel:%d*%d stride:%d,mode %d\n",
  548. param.window_h, param.window_w, param.stride_h,
  549. static_cast<int>(param.mode));
  550. benchmark_impl<Pooling>(param, shapes, RUNS, {4, {4, 5, 6, 7}},
  551. {1, {4}}, dtype::QuantizedS8(1.1f));
  552. shapes.clear();
  553. }
  554. }
  555. }
  556. #endif
  557. } // namespace test
  558. } // namespace megdnn
  559. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台