You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

pooling_multi_thread.cpp 25 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588
  1. /**
  2. * \file dnn/test/arm_common/pooling_multi_thread.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include <vector>
  12. #include "megdnn/dtype.h"
  13. #include "megdnn/opr_param_defs.h"
  14. #include "test/arm_common/fixture.h"
  15. #include "test/common/benchmarker.h"
  16. #include "test/common/checker.h"
  17. #include "test/common/pooling.h"
  18. #include "test/common/rng.h"
  19. namespace megdnn {
  20. namespace test {
  21. /*********************** mutli threads *********************************/
  22. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING) {
  23. using Param = param::Pooling;
  24. for (size_t ih : {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  25. for (size_t iw : {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  26. for (size_t p : {1, 2}) {
  27. Param param;
  28. param.mode = Param::Mode::MAX;
  29. param.window_h = param.window_w = 3;
  30. param.stride_h = param.stride_w = 2;
  31. param.pad_h = param.pad_w = p;
  32. Checker<Pooling> checker(handle());
  33. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  34. param.mode = Param::Mode::AVERAGE;
  35. param.window_h = param.window_w = 3;
  36. param.stride_h = param.stride_w = 2;
  37. param.pad_h = param.pad_w = p;
  38. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  39. param.mode = Param::Mode::MAX;
  40. param.window_h = param.window_w = 4;
  41. param.stride_h = param.stride_w = 2;
  42. param.pad_h = param.pad_w = p;
  43. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  44. param.mode = Param::Mode::MAX;
  45. param.window_h = param.window_w = 5;
  46. param.stride_h = param.stride_w = 2;
  47. param.pad_h = param.pad_w = p;
  48. if (ih + p * 2 >= 5 && iw + p * 2 >= 5)
  49. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  50. }
  51. }
  52. std::vector<std::pair<param::Pooling, TensorShapeArray>> get_nchw44_pool_args(
  53. size_t filter, size_t stride) {
  54. constexpr size_t ic_step = 4;
  55. std::vector<std::pair<param::Pooling, TensorShapeArray>> args;
  56. for (size_t n : {1, 2})
  57. for (size_t c : {4, 8})
  58. for (size_t ih : {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13})
  59. for (size_t iw : {3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13})
  60. for (size_t ph : {0, 1, 2})
  61. for (size_t pw : {0, 1, 2})
  62. for (auto mode :
  63. {param::Pooling::Mode::MAX,
  64. param::Pooling::Mode::AVERAGE})
  65. if (ih + 2 * ph >= filter && iw + 2 * pw >= filter &&
  66. filter > ph && filter > pw) {
  67. param::Pooling param;
  68. param.mode = mode;
  69. param.format = param::Pooling::Format::NCHW44;
  70. param.pad_h = ph;
  71. param.pad_w = pw;
  72. param.stride_h = param.stride_w = stride;
  73. param.window_h = param.window_w = filter;
  74. args.emplace_back(std::make_pair(
  75. param,
  76. TensorShapeArray{
  77. {n, c / ic_step, ih, iw, ic_step},
  78. {}}));
  79. }
  80. return args;
  81. }
  82. void run_pooling_check(
  83. Handle* handle, std::vector<std::pair<param::Pooling, TensorShapeArray>> args,
  84. bool is_int8) {
  85. Checker<Pooling> checker(handle);
  86. UniformIntRNG rng_int8{INT8_MIN >> 1, INT8_MAX >> 1};
  87. UniformIntRNG rng_fp32{-10, 10};
  88. if (is_int8) {
  89. checker.set_dtype(0, dtype::QuantizedS8(1.1f));
  90. checker.set_rng(0, &rng_int8);
  91. } else {
  92. checker.set_rng(0, &rng_fp32);
  93. }
  94. for (auto arg : args) {
  95. checker.set_param(arg.first).exec(arg.second);
  96. }
  97. }
  98. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_NCHW44_FP32) {
  99. for (auto filter : {2, 3, 4, 5})
  100. for (auto stride : {1, 2}) {
  101. run_pooling_check(handle(), get_nchw44_pool_args(filter, stride), false);
  102. }
  103. }
  104. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_W9_w13_NCHW44) {
  105. UniformIntRNG rng{-10, 10};
  106. Checker<Pooling> checker(handle());
  107. checker.set_rng(0, &rng);
  108. // clang-format off
  109. for (size_t ih: {20, 15})
  110. for (size_t iw: {15, 20})
  111. for (size_t kernel: {9, 13})
  112. for (size_t pad: {4, 6})
  113. for(auto mode: {param::Pooling::Mode::MAX, param::Pooling::Mode::AVERAGE})
  114. if (kernel > pad)
  115. {
  116. param::Pooling param;
  117. param.mode = mode;
  118. param.format = param::Pooling::Format::NCHW44;
  119. param.pad_h = pad;
  120. param.pad_w = pad;
  121. param.stride_h = param.stride_w = 1;
  122. param.window_h = param.window_w = kernel ;
  123. checker.set_param(param).exec(TensorShapeArray{{2, 8, ih, iw, 4}, {}});
  124. }
  125. // clang-format on
  126. }
  127. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_W3x3_NCHW44) {
  128. UniformIntRNG rng{INT8_MIN >> 1, INT8_MAX >> 1};
  129. Checker<Pooling> checker(handle());
  130. checker.set_rng(0, &rng);
  131. // clang-format off
  132. for (size_t ih: {3, 5, 10})
  133. for (size_t iw: {3, 5, 7, 9, 15, 20})
  134. for (size_t ph: {0, 1, 2})
  135. for (size_t pw: {0, 1, 2})
  136. for(auto mode: {param::Pooling::Mode::MAX, param::Pooling::Mode::AVERAGE})
  137. for(auto data_type: SmallVector<DType>{dtype::QuantizedS8(1.1f), dtype::Int8()})
  138. if (ih+2*ph >= 3 && iw+2*pw >= 3)
  139. {
  140. checker.set_dtype(0, data_type);
  141. param::Pooling param;
  142. param.mode = mode;
  143. param.format = param::Pooling::Format::NCHW44;
  144. param.pad_h = ph;
  145. param.pad_w = pw;
  146. param.stride_h = param.stride_w = 1;
  147. param.window_h = param.window_w = 3;
  148. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  149. param.stride_h = param.stride_w = 2;
  150. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  151. }
  152. // clang-format on
  153. }
  154. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_W2x2_NCHW44) {
  155. UniformIntRNG rng{INT8_MIN >> 1, INT8_MAX >> 1};
  156. Checker<Pooling> checker(handle());
  157. checker.set_rng(0, &rng);
  158. // clang-format off
  159. for (size_t ih: {2, 5, 10, 17})
  160. for (size_t iw: {2, 6, 8, 16, 26})
  161. for (size_t ph: {0, 1})
  162. for (size_t pw: {0, 1})
  163. for(auto mode: {param::Pooling::Mode::MAX, param::Pooling::Mode::AVERAGE})
  164. for(auto data_type: SmallVector<DType>{dtype::QuantizedS8(1.1f), dtype::Int8()})
  165. if (ih+2*ph >= 2 && iw+2*pw >= 2)
  166. {
  167. checker.set_dtype(0, data_type);
  168. checker.set_dtype(1, data_type);
  169. param::Pooling param;
  170. param.mode = mode;
  171. param.format = param::Pooling::Format::NCHW44;
  172. param.pad_h = ph;
  173. param.pad_w = pw;
  174. param.stride_h = param.stride_w = 1;
  175. param.window_h = param.window_w = 2;
  176. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  177. param.stride_h = param.stride_w = 2;
  178. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  179. }
  180. // clang-format on
  181. }
  182. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_W4x4_NCHW44) {
  183. UniformIntRNG rng{INT8_MIN >> 1, INT8_MAX >> 1};
  184. Checker<Pooling> checker(handle());
  185. checker.set_rng(0, &rng);
  186. // clang-format off
  187. for (size_t ih: {4, 10, 18, 25, 30})
  188. for (size_t iw: {4, 12, 17, 20, 25})
  189. for (size_t ph: {0, 1, 2})
  190. for (size_t pw: {0, 1, 2})
  191. for(auto data_type: SmallVector<DType>{dtype::QuantizedS8(1.1f), dtype::Int8()})
  192. for(auto mode: {param::Pooling::Mode::MAX,param::Pooling::Mode::AVERAGE})
  193. if (ih+2*ph >= 4 && iw+2*pw >= 4)
  194. {
  195. checker.set_dtype(0, data_type);
  196. param::Pooling param;
  197. param.mode = mode;
  198. param.format = param::Pooling::Format::NCHW44;
  199. param.pad_h = ph;
  200. param.pad_w = pw;
  201. param.stride_h = param.stride_w = 1;
  202. param.window_h = param.window_w = 4;
  203. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  204. param.stride_h = param.stride_w = 2;
  205. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  206. }
  207. // clang-format on
  208. }
  209. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_W5x5_NCHW44) {
  210. UniformIntRNG rng{INT8_MIN >> 1, INT8_MAX >> 1};
  211. Checker<Pooling> checker(handle());
  212. checker.set_rng(0, &rng);
  213. // clang-format off
  214. for (size_t ih: {5, 9, 19, 20, 39})
  215. for (size_t iw: {5, 12, 23, 27, 39})
  216. for (size_t ph: {0, 1, 2})
  217. for (size_t pw: {0, 1, 2})
  218. for(auto data_type: SmallVector<DType>{dtype::QuantizedS8(1.1f), dtype::Int8()})
  219. for(auto mode: {param::Pooling::Mode::MAX,param::Pooling::Mode::AVERAGE})
  220. if (ih+2*ph >= 5 && iw+2*pw >= 5)
  221. {
  222. checker.set_dtype(0, data_type);
  223. param::Pooling param;
  224. param.mode = mode;
  225. param.format = param::Pooling::Format::NCHW44;
  226. param.pad_h = ph;
  227. param.pad_w = pw;
  228. param.stride_h = param.stride_w = 1;
  229. param.window_h = param.window_w = 5;
  230. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  231. param.stride_h = param.stride_w = 2;
  232. checker.set_param(param).exec(TensorShapeArray{{2, 2, ih, iw, 4}, {}});
  233. }
  234. // clang-format on
  235. }
  236. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_INT8_W3x3_S2x2) {
  237. for (size_t ih : {2, 3, 7, 13, 52, 53, 54, 55})
  238. for (size_t iw : {2, 3, 6, 14, 53, 54, 55, 56})
  239. for (size_t ph : {0, 1, 2})
  240. for (size_t pw : {0, 1, 2})
  241. if (ih + 2 * ph >= 3 && iw + 2 * pw >= 3) {
  242. Checker<Pooling> checker(handle());
  243. checker.set_dtype(0, dtype::Int8());
  244. param::Pooling param;
  245. param.mode = param::Pooling::Mode::MAX;
  246. param.pad_h = ph;
  247. param.pad_w = pw;
  248. param.stride_h = param.stride_w = 2;
  249. param.window_h = param.window_w = 3;
  250. checker.set_param(param).exec(
  251. TensorShapeArray{{2, 3, ih, iw}, {}});
  252. }
  253. }
  254. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_INT8_W2x2_S2x2) {
  255. for (size_t ih : {2, 3, 7, 13, 52, 53, 54, 55})
  256. for (size_t iw : {2, 3, 6, 14, 53, 54, 55, 56})
  257. for (size_t ph : {0, 1})
  258. for (size_t pw : {0, 1})
  259. if (ih + 2 * ph >= 3 && iw + 2 * pw >= 3) {
  260. Checker<Pooling> checker(handle());
  261. checker.set_dtype(0, dtype::Int8());
  262. param::Pooling param;
  263. param.mode = param::Pooling::Mode::MAX;
  264. param.pad_h = ph;
  265. param.pad_w = pw;
  266. param.stride_h = param.stride_w = 2;
  267. param.window_h = param.window_w = 2;
  268. checker.set_param(param).exec(
  269. TensorShapeArray{{2, 3, ih, iw}, {}});
  270. }
  271. }
  272. #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  273. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_FP16) {
  274. Checker<Pooling> checker(handle());
  275. checker.set_dtype(0, dtype::Float16{})
  276. .set_dtype(1, dtype::Float16{})
  277. .set_epsilon(3e-3);
  278. using Param = param::Pooling;
  279. for (size_t ih : {2, 3, 5, 7, 11, 13, 17, 19, 23})
  280. for (size_t iw : {2, 3, 5, 7, 11, 13, 17, 19, 23})
  281. for (auto mode : {Param::Mode::AVERAGE, Param::Mode::MAX}) {
  282. for (size_t window : {2, 3}) {
  283. Param param;
  284. param.mode = mode;
  285. param.window_h = param.window_w = window;
  286. param.stride_h = param.stride_w = 1;
  287. param.pad_h = param.pad_w = window / 2;
  288. //! test for SH == 1 && SW == 1 && FH == FW (FH == 2 || FH
  289. //! == 3)
  290. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  291. //! test for SH = SW = 2 && FH = FW = 2
  292. param.stride_h = param.stride_w = 2;
  293. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  294. }
  295. }
  296. //! test for SH == 2 && SW == 2 && FH == FW == 3 max pooling
  297. for (size_t ih : {2, 3, 7, 13, 52, 53, 54, 55})
  298. for (size_t iw : {2, 3, 6, 14, 53, 54, 55, 56})
  299. for (size_t ph : {0, 1, 2})
  300. for (size_t pw : {0, 1, 2})
  301. if (ih + 2 * ph >= 3 && iw + 2 * pw >= 3) {
  302. param::Pooling param;
  303. param.mode = param::Pooling::Mode::MAX;
  304. param.pad_h = ph;
  305. param.pad_w = pw;
  306. param.stride_h = param.stride_w = 2;
  307. param.window_h = param.window_w = 3;
  308. checker.set_param(param).exec(
  309. TensorShapeArray{{2, 3, ih, iw}, {}});
  310. }
  311. //! test for SH == 2 && SW == 2 && FH = FW = 4 max pooling
  312. for (size_t ih : {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  313. for (size_t iw : {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  314. for (size_t p : {1, 2}) {
  315. Param param;
  316. param.mode = Param::Mode::MAX;
  317. param.window_h = param.window_w = 4;
  318. param.stride_h = param.stride_w = 2;
  319. param.pad_h = param.pad_w = p;
  320. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  321. }
  322. //! test for SH == 2 && SW == 2 && FH = FW = 5 max pooling
  323. for (size_t ih : {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  324. for (size_t iw : {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  325. for (size_t p : {1, 2}) {
  326. Param param;
  327. param.mode = Param::Mode::MAX;
  328. param.window_h = param.window_w = 5;
  329. param.stride_h = param.stride_w = 2;
  330. param.pad_h = param.pad_w = p;
  331. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  332. }
  333. }
  334. #endif
  335. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_QUANTIZED) {
  336. Checker<Pooling> checker(handle());
  337. UniformIntRNG rng1{INT8_MIN >> 1, INT8_MAX >> 1};
  338. UniformIntRNG rng2{0, UINT8_MAX >> 1};
  339. using Param = param::Pooling;
  340. for (auto type : std::vector<DType>{
  341. dtype::QuantizedS8(1.1f),
  342. dtype::Quantized8Asymm(1.1f, static_cast<uint8_t>(3))}) {
  343. if (type.enumv() == DTypeEnum::QuantizedS8) {
  344. checker.set_rng(0, &rng1);
  345. } else {
  346. megdnn_assert(type.enumv() == DTypeEnum::Quantized8Asymm);
  347. checker.set_rng(0, &rng2);
  348. }
  349. for (size_t ih : {2, 3, 5, 7, 11, 13, 17, 19, 23, 33, 49})
  350. for (size_t iw : {2, 3, 5, 7, 11, 13, 17, 19, 23, 33, 49})
  351. for (auto mode : {Param::Mode::AVERAGE, Param::Mode::MAX}) {
  352. for (size_t window : {2, 3}) {
  353. Param param;
  354. param.mode = mode;
  355. param.window_h = param.window_w = window;
  356. param.stride_h = param.stride_w = 1;
  357. param.pad_h = param.pad_w = window / 2;
  358. //! test for SH == 1 && SW == 1 && FH == FW (FH == 2 ||
  359. //! FH
  360. //! == 3)
  361. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  362. //! test for SH = SW = 2 && FH = FW = 2
  363. param.stride_h = param.stride_w = 2;
  364. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  365. }
  366. }
  367. //! test for SH == 2 && SW == 2 && FH == FW == 3 max pooling
  368. for (size_t ih : {2, 3, 7, 13, 52, 53, 54, 55})
  369. for (size_t iw : {2, 3, 6, 14, 53, 54, 55, 56})
  370. for (size_t ph : {0, 1, 2})
  371. for (size_t pw : {0, 1, 2})
  372. if (ih + 2 * ph >= 3 && iw + 2 * pw >= 3) {
  373. param::Pooling param;
  374. param.mode = param::Pooling::Mode::MAX;
  375. param.pad_h = ph;
  376. param.pad_w = pw;
  377. param.window_h = param.window_w = 3;
  378. param.stride_h = param.stride_w = 2;
  379. checker.set_param(param).exec(
  380. TensorShapeArray{{2, 3, ih, iw}, {}});
  381. }
  382. //! test for SH == 2 && SW == 2 && FH == FW == 4 max pooling
  383. for (size_t ih : {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  384. for (size_t iw :
  385. {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  386. for (size_t p : {1, 2}) {
  387. Param param;
  388. param.mode = Param::Mode::MAX;
  389. param.window_h = param.window_w = 4;
  390. param.stride_h = param.stride_w = 2;
  391. param.pad_h = param.pad_w = p;
  392. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  393. }
  394. //! test for SH == 2 && SW == 2 && FH == FW == 5 max pooling
  395. for (size_t ih : {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  396. for (size_t iw : {3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  397. for (size_t p : {1, 2}) {
  398. Param param;
  399. param.mode = Param::Mode::MAX;
  400. param.window_h = param.window_w = 5;
  401. param.stride_h = param.stride_w = 2;
  402. param.pad_h = param.pad_w = p;
  403. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  404. }
  405. }
  406. }
  407. TEST_F(ARM_COMMON_MULTI_THREADS, POOLING_FALLBACK) {
  408. using Param = param::Pooling;
  409. for (size_t ih : {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  410. for (size_t iw : {2, 3, 5, 7, 11, 13, 17, 19, 23, 24, 25, 26, 27, 28, 29, 30})
  411. for (size_t p : {1, 2}) {
  412. Param param;
  413. param.mode = Param::Mode::MAX;
  414. param.window_h = param.window_w = 3;
  415. param.stride_h = param.stride_w = 2;
  416. param.pad_h = param.pad_w = p;
  417. Checker<Pooling> checker(handle());
  418. checker.set_param(param).exec({{2, 3, ih, iw}, {}});
  419. }
  420. }
  421. #if MEGDNN_WITH_BENCHMARK
  422. namespace {
  423. template <typename Opr>
  424. void benchmark_impl(
  425. const typename Opr::Param& param, std::vector<SmallVector<TensorShape>> shapes,
  426. size_t RUNS, TaskExecutorConfig&& multi_thread_config,
  427. TaskExecutorConfig&& single_thread_config, DType data_type) {
  428. std::vector<float> multi_thread_times, single_thread_times;
  429. {
  430. auto multi_thread_hanle = create_cpu_handle(0, true, &multi_thread_config);
  431. auto benchmarker = Benchmarker<Opr>(multi_thread_hanle.get());
  432. benchmarker.set_times(RUNS).set_display(false).set_param(param);
  433. benchmarker.set_dtype(0, data_type);
  434. for (auto shape : shapes) {
  435. multi_thread_times.push_back(benchmarker.exec(shape) / RUNS);
  436. }
  437. }
  438. {
  439. auto single_thread_handle = create_cpu_handle(0, true, &single_thread_config);
  440. auto benchmarker = Benchmarker<Opr>(single_thread_handle.get());
  441. benchmarker.set_times(RUNS).set_display(false).set_param(param);
  442. benchmarker.set_dtype(0, data_type);
  443. for (auto shape : shapes) {
  444. single_thread_times.push_back(benchmarker.exec(shape) / RUNS);
  445. }
  446. }
  447. printf("Benchmark : Multi threads %zu, ", multi_thread_config.nr_thread);
  448. printf("core_ids:");
  449. for (size_t i = 0; i < multi_thread_config.affinity_core_set.size(); i++) {
  450. printf("%zu ", multi_thread_config.affinity_core_set[i]);
  451. }
  452. printf(", Single thread core_id %zu\n", single_thread_config.affinity_core_set[0]);
  453. for (size_t i = 0; i < shapes.size(); i++) {
  454. auto shape = shapes[i];
  455. printf("Case: ");
  456. for (auto sh : shape)
  457. printf("%s ", sh.to_string().c_str());
  458. printf("%zu threads time: %f,\n single thread time: "
  459. "%f. spead up = %f, speedup/cores=%f\n",
  460. multi_thread_config.nr_thread, multi_thread_times[i],
  461. single_thread_times[i], single_thread_times[i] / multi_thread_times[i],
  462. single_thread_times[i] / multi_thread_times[i] /
  463. multi_thread_config.nr_thread);
  464. }
  465. }
  466. } // namespace
  467. TEST_F(ARM_COMMON_BENCHMARK_MULTI_THREADS, BENCHMARK_POOLING) {
  468. constexpr size_t RUNS = 50;
  469. using Param = param::Pooling;
  470. Param param;
  471. param.window_h = param.window_w = 3;
  472. param.stride_h = param.stride_w = 2;
  473. param.pad_h = param.pad_w = 1;
  474. std::vector<SmallVector<TensorShape>> shapes;
  475. shapes.push_back({{32, 32, 215, 215}, {}});
  476. shapes.push_back({{32, 32, 128, 128}, {}});
  477. shapes.push_back({{8, 256, 100, 100}, {}});
  478. shapes.push_back({{1, 256, 100, 100}, {}});
  479. shapes.push_back({{1, 32, 100, 100}, {}});
  480. shapes.push_back({{1, 256, 80, 80}, {}});
  481. shapes.push_back({{1, 256, 60, 60}, {}});
  482. shapes.push_back({{1, 256, 30, 30}, {}});
  483. param.window_h = param.window_w = 3;
  484. param.stride_h = param.stride_w = 2;
  485. param.pad_h = param.pad_w = 1;
  486. printf("Benchmark POOLING kernel:%d*%d stride:%d,mode %d\n", param.window_h,
  487. param.window_w, param.stride_h, static_cast<int>(param.mode));
  488. benchmark_impl<Pooling>(
  489. param, shapes, RUNS, {4, {0, 1, 2, 3}}, {1, {0}}, dtype::Float32());
  490. benchmark_impl<Pooling>(
  491. param, shapes, RUNS, {4, {4, 5, 6, 7}}, {1, {4}}, dtype::Float32());
  492. benchmark_impl<Pooling>(
  493. param, shapes, RUNS, {2, {0, 1}}, {1, {0}}, dtype::Float32());
  494. }
  495. TEST_F(ARM_COMMON_BENCHMARK_MULTI_THREADS, BENCHMARK_POOLING_NCHW44) {
  496. constexpr size_t RUNS = 50;
  497. using Param = param::Pooling;
  498. Param param;
  499. param.pad_h = param.pad_w = 0;
  500. param.mode = Param::Mode::MAX;
  501. std::vector<SmallVector<TensorShape>> shapes;
  502. std::vector<std::vector<size_t>> filter_and_stride = {
  503. {2, 1}, {2, 2}, {3, 1}, {3, 2}, {4, 1}, {4, 2}, {5, 1}, {5, 2}};
  504. for (auto mode : {param::Pooling::Mode::MAX, param::Pooling::Mode::AVERAGE}) {
  505. for (auto filter : filter_and_stride) {
  506. shapes.push_back({{1, 32 * 4, 215, 215}, {}});
  507. shapes.push_back({{1, 32 * 4, 128, 128}, {}});
  508. shapes.push_back({{1, 16 * 4, 56, 56}, {}});
  509. param.mode = mode;
  510. param.window_h = param.window_w = filter[0];
  511. param.stride_h = param.stride_w = filter[1];
  512. param.format = Param::Format::NCHW;
  513. printf("NCHW Benchmark POOLING kernel:%d*%d stride:%d,mode %d\n",
  514. param.window_h, param.window_h, param.stride_h,
  515. static_cast<int>(param.mode));
  516. benchmark_impl<Pooling>(
  517. param, shapes, RUNS, {4, {4, 5, 6, 7}}, {1, {4}},
  518. dtype::QuantizedS8(1.1f));
  519. shapes.clear();
  520. shapes.push_back({{1, 32, 215, 215, 4}, {}});
  521. shapes.push_back({{1, 32, 128, 128, 4}, {}});
  522. shapes.push_back({{1, 16, 56, 56, 4}, {}});
  523. param.format = Param::Format::NCHW44;
  524. printf("NCHW44 Benchmark POOLING kernel:%d*%d stride:%d,mode %d\n",
  525. param.window_h, param.window_w, param.stride_h,
  526. static_cast<int>(param.mode));
  527. benchmark_impl<Pooling>(
  528. param, shapes, RUNS, {4, {4, 5, 6, 7}}, {1, {4}},
  529. dtype::QuantizedS8(1.1f));
  530. shapes.clear();
  531. }
  532. }
  533. }
  534. #endif
  535. } // namespace test
  536. } // namespace megdnn
  537. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台