You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

convolution.cpp 48 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212
  1. /**
  2. * \file dnn/test/arm_common/convolution.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "test/arm_common/fixture.h"
  12. #include "test/common/benchmarker.h"
  13. #include "test/common/checker.h"
  14. #include "test/common/convolution.h"
  15. #include "test/common/timer.h"
  16. using namespace megdnn;
  17. using namespace test;
  18. using Param = param::Convolution;
  19. #if MGB_ENABLE_DOT
  20. TEST_F(ARM_COMMON, CONVOLUTION_BACKWARD_DATA_INT8_INT8_INT32) {
  21. Checker<ConvolutionBackwardData> checker(handle());
  22. using Param = ConvolutionBackwardData::Param;
  23. Param param;
  24. auto run = [&](size_t n, size_t ic, size_t oh, size_t ow, size_t oc, size_t fh,
  25. size_t fw, size_t stride, size_t ph, size_t pw, size_t group = 1) {
  26. param.pad_h = ph;
  27. param.pad_w = pw;
  28. param.stride_h = param.stride_w = stride;
  29. TensorLayout diff = TensorLayout{{n, oc * group, oh, ow}, dtype::Int8()};
  30. TensorLayout grad;
  31. TensorLayout filter;
  32. if (group == 1) {
  33. param.sparse = Param::Sparse::DENSE;
  34. filter = {{oc, ic, fh, fw}, dtype::Int8()};
  35. } else {
  36. param.sparse = Param::Sparse::GROUP;
  37. filter = {{group, oc, ic, fh, fw}, dtype::Int8()};
  38. }
  39. // TensorLayout grad;
  40. {
  41. auto opr = handle()->create_operator<ConvolutionBackwardData>();
  42. opr->param() = param;
  43. opr->deduce_layout(filter, diff, grad);
  44. }
  45. if (stride == 1) {
  46. checker.set_before_exec_callback(AlgoChecker<ConvolutionBackwardData>(
  47. "AARCH32_I8x8x32_DECONV_STRIDE1"));
  48. } else {
  49. checker.set_before_exec_callback(AlgoChecker<ConvolutionBackwardData>(
  50. "AARCH32_I8x8x32_DECONV_STRIDE2"));
  51. }
  52. checker.set_param(param)
  53. .set_dtype(0, dtype::Int8())
  54. .set_dtype(1, dtype::Int8())
  55. .set_dtype(2, dtype::Int32());
  56. checker.exec(TensorLayoutArray{filter, diff, grad});
  57. };
  58. // clang-format off
  59. for (size_t f : {2, 3, 5, 7})
  60. for (size_t ih = 1; ih < f+1; ++ih)
  61. for (size_t iw = 1; iw < 8*f+1; ++iw)
  62. for (size_t s : {1, 2})
  63. for (size_t ph : {f/2, f-1})
  64. for (size_t pw : {f / 2, f - 1})
  65. if (f >= ph + 1 && f >= pw + 1 && (ih - 1) * s + f > 2 * ph &&
  66. (iw - 1) * s + f > 2 * pw) {
  67. run(2, 3, ih, iw, 2, f, f, s, ph, pw, 1);
  68. }
  69. // clang-format on
  70. }
  71. TEST_F(ARM_COMMON, CONVOLUTION_BACKWARD_DATA_QUINT8) {
  72. Checker<ConvolutionBackwardData> checker(handle());
  73. using Param = ConvolutionBackwardData::Param;
  74. Param param;
  75. auto run = [&](size_t n, size_t ic, size_t oh, size_t ow, size_t oc, size_t fh,
  76. size_t fw, size_t stride, size_t ph, size_t pw, size_t group = 1) {
  77. param.pad_h = ph;
  78. param.pad_w = pw;
  79. param.stride_h = param.stride_w = stride;
  80. TensorLayout diff = TensorLayout{
  81. {n, oc * group, oh, ow}, dtype::Quantized8Asymm(1.3f, (uint8_t)129)};
  82. TensorLayout grad;
  83. TensorLayout filter;
  84. if (group == 1) {
  85. param.sparse = Param::Sparse::DENSE;
  86. filter = {{oc, ic, fh, fw}, dtype::Quantized8Asymm(1.2f, (uint8_t)127)};
  87. } else {
  88. param.sparse = Param::Sparse::GROUP;
  89. filter = {
  90. {group, oc, ic, fh, fw},
  91. dtype::Quantized8Asymm(1.2f, (uint8_t)127)};
  92. }
  93. // TensorLayout grad;
  94. {
  95. auto opr = handle()->create_operator<ConvolutionBackwardData>();
  96. opr->param() = param;
  97. opr->deduce_layout(filter, diff, grad);
  98. }
  99. NormalRNG rng(128.f);
  100. if (stride == 1) {
  101. checker.set_before_exec_callback(
  102. AlgoChecker<ConvolutionBackwardData>("ARM_COMMON_QUINT8_DIRECT_"
  103. "DECONV_STRIDE1"));
  104. } else {
  105. checker.set_before_exec_callback(
  106. AlgoChecker<ConvolutionBackwardData>("ARM_COMMON_QUINT8_DIRECT_"
  107. "DECONV_STRIDE2"));
  108. }
  109. checker.set_param(param)
  110. .set_dtype(0, dtype::Quantized8Asymm(1.2f, (uint8_t)127))
  111. .set_dtype(1, dtype::Quantized8Asymm(1.3f, (uint8_t)129))
  112. .set_dtype(2, {});
  113. checker.set_rng(0, &rng).set_rng(1, &rng);
  114. checker.exec(TensorLayoutArray{filter, diff, grad});
  115. };
  116. // clang-format off
  117. for (size_t f : {2, 3, 5, 7})
  118. for (size_t ih = 1; ih < f+1; ++ih)
  119. for (size_t iw = 1; iw < 8*f+1; ++iw)
  120. for (size_t s : {1, 2})
  121. for (size_t ph : {f/2, f-1})
  122. for (size_t pw : {f/2, f-1})
  123. if (f >= ph + 1 && f >= pw + 1 && (ih - 1) * s + f > 2 * ph &&
  124. (iw - 1) * s + f > 2 * pw) {
  125. run(2, 2, ih, iw, 2, f, f, s, ph, pw, 1);
  126. }
  127. // clang-format on
  128. }
  129. #endif
  130. #if MEGDNN_WITH_BENCHMARK
  131. #if MGB_ENABLE_DOT
  132. TEST_F(ARM_COMMON, BENCHMARK_CONVOLUTION_STRIDE1_I8x8x32_WITHDOTPROD) {
  133. using namespace convolution;
  134. using Param = param::Convolution;
  135. std::vector<TestArg> args;
  136. auto run = [&](size_t oc, size_t ic, size_t w, size_t h, size_t kernel,
  137. size_t stride) {
  138. Param param;
  139. param.stride_h = stride;
  140. param.stride_w = stride;
  141. param.pad_h = kernel / 2;
  142. param.pad_w = kernel / 2;
  143. args.emplace_back(
  144. param, TensorShape{1, ic, h, w}, TensorShape{oc, ic, kernel, kernel});
  145. };
  146. for (size_t kernel : {2, 3, 5, 7}) {
  147. for (size_t ic : {1, 8, 16, 32, 64}) {
  148. for (size_t oc : {1, 8, 16, 32, 64}) {
  149. run(oc, ic, 56, 56, kernel, 1);
  150. run(oc, ic, 128, 128, kernel, 1);
  151. run(oc, ic, 256, 256, kernel, 1);
  152. }
  153. }
  154. }
  155. constexpr size_t RUN = 50;
  156. Benchmarker<Convolution> benchmark(handle());
  157. benchmark.set_before_exec_callback(
  158. AlgoChecker<Convolution>("CONVOLUTION_DEFAULT_ARMDOTS8STRD1"));
  159. benchmark.set_dtype(0, dtype::Int8())
  160. .set_dtype(1, dtype::Int8())
  161. .set_dtype(2, dtype::Int32());
  162. benchmark.set_display(false);
  163. benchmark.set_times(RUN);
  164. Benchmarker<Convolution> benchmark_float(handle());
  165. benchmark_float.set_display(false);
  166. benchmark_float.set_times(RUN);
  167. for (auto&& arg : args) {
  168. TensorLayout dst_layout;
  169. auto opr = handle()->create_operator<Convolution>();
  170. opr->param() = arg.param;
  171. opr->deduce_layout(
  172. {arg.src, dtype::Float32()}, {arg.filter, dtype::Float32()},
  173. dst_layout);
  174. //! dst.nr_elems * IC * FH * FW * 2
  175. float computations = dst_layout.total_nr_elems() * arg.filter[1] *
  176. arg.filter[2] * arg.filter[3] * 2.0 /
  177. (1024 * 1024 * 1024) * 1e3;
  178. auto used_int =
  179. benchmark.set_param(arg.param).exec({arg.src, arg.filter, {}}) / RUN;
  180. auto used_float =
  181. benchmark_float.set_param(arg.param).exec({arg.src, arg.filter, {}}) /
  182. RUN;
  183. printf("%s %s: int: %f ms %f Gflops float: %f ms %f GFlops speedup: "
  184. "%f\n",
  185. arg.src.to_string().c_str(), arg.filter.to_string().c_str(), used_int,
  186. computations / used_int, used_float, computations / used_float,
  187. used_float / used_int);
  188. }
  189. }
  190. TEST_F(ARM_COMMON, BENCHMARK_CONVOLUTION_STRIDE2_I8x8x32_WITHDOTPROD) {
  191. using namespace convolution;
  192. using Param = param::Convolution;
  193. std::vector<TestArg> args;
  194. auto run = [&](size_t oc, size_t ic, size_t w, size_t h, size_t kernel,
  195. size_t stride) {
  196. Param param;
  197. param.stride_h = stride;
  198. param.stride_w = stride;
  199. param.pad_h = kernel / 2;
  200. param.pad_w = kernel / 2;
  201. args.emplace_back(
  202. param, TensorShape{1, ic, h, w}, TensorShape{oc, ic, kernel, kernel});
  203. };
  204. for (size_t kernel : {2, 3, 5, 7}) {
  205. for (size_t ic : {1, 8, 16, 32, 64}) {
  206. for (size_t oc : {1, 8, 16, 32, 64}) {
  207. run(oc, ic, 56, 56, kernel, 2);
  208. run(oc, ic, 128, 128, kernel, 2);
  209. run(oc, ic, 256, 256, kernel, 2);
  210. }
  211. }
  212. }
  213. constexpr size_t RUN = 10;
  214. Benchmarker<Convolution> benchmark(handle());
  215. benchmark.set_before_exec_callback(
  216. AlgoChecker<Convolution>("CONVOLUTION_DEFAULT_ARMDOTS8STRD2"));
  217. benchmark.set_dtype(0, dtype::Int8())
  218. .set_dtype(1, dtype::Int8())
  219. .set_dtype(2, dtype::Int32());
  220. benchmark.set_display(false);
  221. benchmark.set_times(RUN);
  222. Benchmarker<Convolution> benchmark_float(handle());
  223. benchmark_float.set_display(false);
  224. benchmark_float.set_times(RUN);
  225. for (auto&& arg : args) {
  226. TensorLayout dst_layout;
  227. auto opr = handle()->create_operator<Convolution>();
  228. opr->param() = arg.param;
  229. opr->deduce_layout(
  230. {arg.src, dtype::Float32()}, {arg.filter, dtype::Float32()},
  231. dst_layout);
  232. //! dst.nr_elems * IC * FH * FW * 2
  233. float computations = dst_layout.total_nr_elems() * arg.filter[1] *
  234. arg.filter[2] * arg.filter[3] * 2.0 /
  235. (1024 * 1024 * 1024) * 1e3;
  236. auto used_int =
  237. benchmark.set_param(arg.param).exec({arg.src, arg.filter, {}}) / RUN;
  238. auto used_float =
  239. benchmark_float.set_param(arg.param).exec({arg.src, arg.filter, {}}) /
  240. RUN;
  241. printf("%s %s: int: %f ms %f Gflops float: %f ms %f GFlops speedup: "
  242. "%f\n",
  243. arg.src.to_string().c_str(), arg.filter.to_string().c_str(), used_int,
  244. computations / used_int, used_float, computations / used_float,
  245. used_float / used_int);
  246. }
  247. }
  248. TEST_F(ARM_COMMON, BENCHMARK_CONVOLUTION_STRIDE1_QUINT8_WITHDOTPROD) {
  249. using namespace convolution;
  250. using Param = param::Convolution;
  251. std::vector<TestArg> args;
  252. auto run = [&](size_t oc, size_t ic, size_t w, size_t h, size_t kernel,
  253. size_t stride) {
  254. Param param;
  255. param.stride_h = stride;
  256. param.stride_w = stride;
  257. param.pad_h = kernel / 2;
  258. param.pad_w = kernel / 2;
  259. args.emplace_back(
  260. param, TensorShape{1, ic, h, w}, TensorShape{oc, ic, kernel, kernel});
  261. };
  262. for (size_t kernel : {2, 3, 5, 7}) {
  263. for (size_t ic : {1, 8, 16, 32, 64}) {
  264. for (size_t oc : {1, 8, 16, 32, 64}) {
  265. run(oc, ic, 56, 56, kernel, 1);
  266. run(oc, ic, 128, 128, kernel, 1);
  267. run(oc, ic, 256, 256, kernel, 1);
  268. }
  269. }
  270. }
  271. constexpr size_t RUN = 50;
  272. Benchmarker<Convolution> benchmark(handle());
  273. benchmark.set_dtype(0, dtype::Quantized8Asymm(1.2f, (uint8_t)129))
  274. .set_dtype(1, dtype::Quantized8Asymm(1.3f, (uint8_t)127))
  275. .set_dtype(2, {});
  276. benchmark.set_display(false);
  277. benchmark.set_times(RUN);
  278. benchmark.set_before_exec_callback(
  279. AlgoChecker<ConvolutionForward>("CONVOLUTION_DEFAULT_ARMDOTU8STRD1"));
  280. Benchmarker<Convolution> benchmark_float(handle());
  281. benchmark_float.set_display(false);
  282. benchmark_float.set_times(RUN);
  283. for (auto&& arg : args) {
  284. TensorLayout dst_layout;
  285. auto opr = handle()->create_operator<Convolution>();
  286. opr->param() = arg.param;
  287. opr->deduce_layout(
  288. {arg.src, dtype::Float32()}, {arg.filter, dtype::Float32()},
  289. dst_layout);
  290. //! dst.nr_elems * IC * FH * FW * 2
  291. float computations = dst_layout.total_nr_elems() * arg.filter[1] *
  292. arg.filter[2] * arg.filter[3] * 2.0 /
  293. (1024 * 1024 * 1024) * 1e3;
  294. auto used_int =
  295. benchmark.set_param(arg.param).exec({arg.src, arg.filter, {}}) / RUN;
  296. auto used_float =
  297. benchmark_float.set_param(arg.param).exec({arg.src, arg.filter, {}}) /
  298. RUN;
  299. printf("%s %s: int: %f ms %f Gflops float: %f ms %f GFlops speedup: "
  300. "%f\n",
  301. arg.src.to_string().c_str(), arg.filter.to_string().c_str(), used_int,
  302. computations / used_int, used_float, computations / used_float,
  303. used_float / used_int);
  304. }
  305. }
  306. TEST_F(ARM_COMMON, BENCHMARK_CONVOLUTION_STRIDE2_QUINT8_WITHDOTPROD) {
  307. using namespace convolution;
  308. using Param = param::Convolution;
  309. std::vector<TestArg> args;
  310. auto run = [&](size_t oc, size_t ic, size_t w, size_t h, size_t kernel,
  311. size_t stride) {
  312. Param param;
  313. param.stride_h = stride;
  314. param.stride_w = stride;
  315. param.pad_h = kernel / 2;
  316. param.pad_w = kernel / 2;
  317. args.emplace_back(
  318. param, TensorShape{1, ic, h, w}, TensorShape{oc, ic, kernel, kernel});
  319. };
  320. for (size_t kernel : {2, 3, 5, 7}) {
  321. for (size_t ic : {1, 8, 16, 32, 64}) {
  322. for (size_t oc : {1, 8, 16, 32, 64}) {
  323. run(oc, ic, 56, 56, kernel, 2);
  324. run(oc, ic, 128, 128, kernel, 2);
  325. run(oc, ic, 256, 256, kernel, 2);
  326. }
  327. }
  328. }
  329. constexpr size_t RUN = 50;
  330. Benchmarker<Convolution> benchmark(handle());
  331. benchmark.set_dtype(0, dtype::Quantized8Asymm(1.2f, (uint8_t)129))
  332. .set_dtype(1, dtype::Quantized8Asymm(1.3f, (uint8_t)127))
  333. .set_dtype(2, {});
  334. benchmark.set_display(false);
  335. benchmark.set_times(RUN);
  336. benchmark.set_before_exec_callback(
  337. AlgoChecker<ConvolutionForward>("CONVOLUTION_DEFAULT_ARMDOTU8STRD2"));
  338. Benchmarker<Convolution> benchmark_float(handle());
  339. benchmark_float.set_display(false);
  340. benchmark_float.set_times(RUN);
  341. for (auto&& arg : args) {
  342. TensorLayout dst_layout;
  343. auto opr = handle()->create_operator<Convolution>();
  344. opr->param() = arg.param;
  345. opr->deduce_layout(
  346. {arg.src, dtype::Float32()}, {arg.filter, dtype::Float32()},
  347. dst_layout);
  348. //! dst.nr_elems * IC * FH * FW * 2
  349. float computations = dst_layout.total_nr_elems() * arg.filter[1] *
  350. arg.filter[2] * arg.filter[3] * 2.0 /
  351. (1024 * 1024 * 1024) * 1e3;
  352. auto used_int =
  353. benchmark.set_param(arg.param).exec({arg.src, arg.filter, {}}) / RUN;
  354. auto used_float =
  355. benchmark_float.set_param(arg.param).exec({arg.src, arg.filter, {}}) /
  356. RUN;
  357. printf("%s %s: int: %f ms %f Gflops float: %f ms %f GFlops speedup: "
  358. "%f\n",
  359. arg.src.to_string().c_str(), arg.filter.to_string().c_str(), used_int,
  360. computations / used_int, used_float, computations / used_float,
  361. used_float / used_int);
  362. }
  363. }
  364. TEST_F(ARM_COMMON, BENCHMARK_CONVOLUTION_BACKWARD_DATA_INT8_INT8_INT32) {
  365. using Param = ConvolutionBackwardData::Param;
  366. auto run = [&](const TensorLayoutArray& tensors, Param param) {
  367. Benchmarker<ConvolutionBackwardData> benchmarker(handle());
  368. size_t RUN = 50;
  369. auto time = benchmarker.set_display(false)
  370. .set_dtype(0, dtype::Int8{})
  371. .set_dtype(1, dtype::Int8{})
  372. .set_dtype(2, dtype::Int32{})
  373. .set_times(RUN)
  374. .set_param(param)
  375. .exec(tensors);
  376. size_t OC = tensors[0][0];
  377. size_t FH = tensors[0][2];
  378. size_t FW = tensors[0][3];
  379. float computations = tensors[2].total_nr_elems() * OC * FH * FW * 2.0 /
  380. (1024 * 1024 * 1024) * 1e3;
  381. printf("time = %f \n perf= %f gops\n", time, computations * RUN / time);
  382. };
  383. auto profile = [&](size_t n, size_t ic, size_t oh, size_t ow, size_t oc, size_t fh,
  384. size_t fw, size_t s) {
  385. Param param;
  386. param.stride_h = param.stride_w = s;
  387. printf("oc: %zd ic: %zd w: %zd h: %zd kernel_size: %zd sreide: %zd\n", oc, ic,
  388. ow, oh, fh, s);
  389. TensorLayout diff = TensorLayout{{n, oc, oh, ow}, dtype::Int8()};
  390. TensorLayout filter = TensorLayout{{oc, ic, fh, fw}, dtype::Int8()};
  391. TensorLayout grad;
  392. {
  393. auto opr = handle()->create_operator<ConvolutionBackwardData>();
  394. opr->param() = param;
  395. opr->deduce_layout(filter, diff, grad);
  396. }
  397. run(TensorLayoutArray{filter, diff, grad}, param);
  398. };
  399. profile(1, 3, 120, 120, 2, 3, 3, 1);
  400. profile(1, 3, 60, 60, 2, 3, 3, 2);
  401. profile(1, 3, 224, 224, 2, 5, 5, 1);
  402. profile(1, 3, 112, 112, 2, 5, 5, 2);
  403. profile(1, 3, 224, 224, 2, 7, 7, 1);
  404. profile(1, 3, 112, 112, 2, 7, 7, 2);
  405. }
  406. #endif
  407. TEST_F(ARM_COMMON, BENCHMARK_CHANWISE_CONVOLUTION) {
  408. auto run = [&](const TensorShapeArray& shapes, Param param) {
  409. auto handle_naive = create_cpu_handle(2);
  410. Benchmarker<Convolution> benchmarker_naive(handle_naive.get()),
  411. benchmarker_float(handle()), benchmarker_int(handle());
  412. benchmarker_int.set_dtype(0, dtype::Int8());
  413. benchmarker_int.set_dtype(1, dtype::Int8());
  414. benchmarker_int.set_dtype(2, dtype::Int16());
  415. size_t RUN = 10;
  416. auto tfloat = benchmarker_float.set_display(false)
  417. .set_times(RUN)
  418. .set_param(param)
  419. .exec(shapes);
  420. auto tnaive = benchmarker_naive.set_display(false)
  421. .set_times(RUN)
  422. .set_param(param)
  423. .exec(shapes);
  424. auto iparam = param;
  425. auto tint = benchmarker_int.set_display(false)
  426. .set_times(RUN)
  427. .set_param(iparam)
  428. .exec(shapes);
  429. float int_float_ratio = static_cast<float>(tfloat) / tint;
  430. printf("naive=%.3fms float=%.3fms int=%.3fms, int/float=%.3f\n", tnaive / RUN,
  431. tfloat / RUN, tint / RUN, int_float_ratio);
  432. EXPECT_GE(int_float_ratio, 1.5);
  433. };
  434. Param param;
  435. param.mode = Param::Mode::CROSS_CORRELATION;
  436. param.sparse = Param::Sparse::GROUP;
  437. run({{2, 12, 200, 100}, {12, 2, 1, 5, 5}, {}}, param);
  438. run({{10, 24, 28, 28}, {24, 1, 1, 3, 3}, {}}, param);
  439. param.stride_h = 2;
  440. param.stride_w = 2;
  441. param.pad_h = 1;
  442. param.pad_w = 1;
  443. run({{2, 12, 200, 100}, {12, 2, 1, 5, 5}, {}}, param);
  444. run({{10, 24, 28, 28}, {24, 1, 1, 3, 3}, {}}, param);
  445. }
  446. TEST_F(ARM_COMMON, BENCHMARK_CONVOLUTION_INT8X8X32_STRD1_WITHOUT_DOTPROD) {
  447. // have to remove preferred restrict in usable func before run the benchmark
  448. using namespace convolution;
  449. std::vector<TestArg> args;
  450. auto run = [&](size_t oc, size_t ic, size_t w, size_t h, size_t kernel, size_t p) {
  451. if (w + 2 * p < kernel || h + 2 * p < kernel)
  452. return;
  453. param::Convolution param;
  454. param.stride_h = 1;
  455. param.stride_w = 1;
  456. param.pad_h = p;
  457. param.pad_w = p;
  458. args.emplace_back(
  459. param, TensorShape{1, ic, h, w}, TensorShape{oc, ic, kernel, kernel});
  460. };
  461. // compare to float direct conv here,
  462. // but float direct conv don't support 7x7.
  463. for (size_t kernel : {2, 3, 5})
  464. for (size_t ic : {1, 8, 16, 32, 64})
  465. for (size_t oc : {1, 8, 16, 32, 64})
  466. for (size_t p : {0, 1, 2, 3}) {
  467. run(oc, ic, 56, 56, kernel, p);
  468. run(oc, ic, 128, 128, kernel, p);
  469. run(oc, ic, 256, 256, kernel, p);
  470. }
  471. constexpr size_t RUN = 50;
  472. Benchmarker<Convolution> benchmark(handle());
  473. benchmark.set_dtype(0, dtype::Int8())
  474. .set_dtype(1, dtype::Int8())
  475. .set_dtype(2, dtype::Int32());
  476. benchmark.set_display(false);
  477. benchmark.set_times(RUN);
  478. benchmark.set_before_exec_callback(
  479. AlgoChecker<ConvolutionForward>("CONVOLUTION_DEFAULT_S8STRD1"));
  480. Benchmarker<Convolution> benchmark_float(handle());
  481. benchmark_float.set_display(false);
  482. benchmark_float.set_times(RUN);
  483. benchmark_float.set_before_exec_callback(
  484. AlgoChecker<ConvolutionForward>("CONVOLUTION_DEFAULT_F32STRD1"));
  485. for (auto&& arg : args) {
  486. TensorLayout dst_layout;
  487. auto opr = handle()->create_operator<Convolution>();
  488. opr->param() = arg.param;
  489. opr->deduce_layout(
  490. {arg.src, dtype::Float32()}, {arg.filter, dtype::Float32()},
  491. dst_layout);
  492. //! dst.nr_elems * IC * FH * FW * 2
  493. float computations = dst_layout.total_nr_elems() * arg.filter[1] *
  494. arg.filter[2] * arg.filter[3] * 2.0 /
  495. (1024 * 1024 * 1024) * 1e3;
  496. auto used_int =
  497. benchmark.set_param(arg.param).exec({arg.src, arg.filter, {}}) / RUN;
  498. auto used_float =
  499. benchmark_float.set_param(arg.param).exec({arg.src, arg.filter, {}}) /
  500. RUN;
  501. printf("%s %s: int: %f ms %f Gflops float: %f ms %f GFlops speedup: "
  502. "%f\n",
  503. arg.src.to_string().c_str(), arg.filter.to_string().c_str(), used_int,
  504. computations / used_int, used_float, computations / used_float,
  505. used_float / used_int);
  506. }
  507. }
  508. TEST_F(ARM_COMMON, BENCHMARK_CONVOLUTION_INT8X8X32_STRD2_WITHOUT_DOTPROD) {
  509. // have to remove preferred restrict in usable func before run the benchmark
  510. using namespace convolution;
  511. std::vector<TestArg> args;
  512. auto run = [&](size_t oc, size_t ic, size_t w, size_t h, size_t kernel, size_t p) {
  513. if (w + 2 * p < kernel || h + 2 * p < kernel)
  514. return;
  515. param::Convolution param;
  516. param.stride_h = 2;
  517. param.stride_w = 2;
  518. param.pad_h = p;
  519. param.pad_w = p;
  520. args.emplace_back(
  521. param, TensorShape{1, ic, h, w}, TensorShape{oc, ic, kernel, kernel});
  522. };
  523. for (size_t kernel : {2, 3, 5, 7})
  524. for (size_t ic : {1, 8, 16, 32, 64})
  525. for (size_t oc : {1, 8, 16, 32, 64})
  526. for (size_t p : {0, 1, 2, 3}) {
  527. run(oc, ic, 56, 56, kernel, p);
  528. run(oc, ic, 128, 128, kernel, p);
  529. run(oc, ic, 256, 256, kernel, p);
  530. }
  531. constexpr size_t RUN = 50;
  532. Benchmarker<Convolution> benchmark(handle());
  533. benchmark.set_dtype(0, dtype::Int8())
  534. .set_dtype(1, dtype::Int8())
  535. .set_dtype(2, dtype::Int32());
  536. benchmark.set_display(false);
  537. benchmark.set_times(RUN);
  538. benchmark.set_before_exec_callback(
  539. AlgoChecker<ConvolutionForward>("CONVOLUTION_DEFAULT_S8STRD2"));
  540. Benchmarker<Convolution> benchmark_float(handle());
  541. benchmark_float.set_display(false);
  542. benchmark_float.set_times(RUN);
  543. #if MEGDNN_AARCH64
  544. benchmark_float.set_before_exec_callback(
  545. AlgoChecker<ConvolutionForward>("CONVOLUTION_DEFAULT_ARMV8F32STRD2"));
  546. #else
  547. benchmark_float.set_before_exec_callback(
  548. AlgoChecker<ConvolutionForward>("CONVOLUTION_DEFAULT_F32STRD2"));
  549. #endif
  550. for (auto&& arg : args) {
  551. TensorLayout dst_layout;
  552. auto opr = handle()->create_operator<Convolution>();
  553. opr->param() = arg.param;
  554. opr->deduce_layout(
  555. {arg.src, dtype::Float32()}, {arg.filter, dtype::Float32()},
  556. dst_layout);
  557. //! dst.nr_elems * IC * FH * FW * 2
  558. float computations = dst_layout.total_nr_elems() * arg.filter[1] *
  559. arg.filter[2] * arg.filter[3] * 2.0 /
  560. (1024 * 1024 * 1024) * 1e3;
  561. auto used_int =
  562. benchmark.set_param(arg.param).exec({arg.src, arg.filter, {}}) / RUN;
  563. auto used_float =
  564. benchmark_float.set_param(arg.param).exec({arg.src, arg.filter, {}}) /
  565. RUN;
  566. printf("%s %s: int: %f ms %f Gflops float: %f ms %f GFlops speedup: "
  567. "%f\n",
  568. arg.src.to_string().c_str(), arg.filter.to_string().c_str(), used_int,
  569. computations / used_int, used_float, computations / used_float,
  570. used_float / used_int);
  571. }
  572. }
  573. TEST_F(ARM_COMMON, BENCHMARK_CONVOLUTION_INT8X8X32_STRD1_WITHOUT_DOTPROD_TO_MATMUL) {
  574. // have to remove preferred restrict in usable func before run the benchmark
  575. using namespace convolution;
  576. std::vector<TestArg> args;
  577. auto run = [&](size_t oc, size_t ic, size_t w, size_t h, size_t kernel, size_t p) {
  578. if (w + 2 * p < kernel || h + 2 * p < kernel)
  579. return;
  580. param::Convolution param;
  581. param.stride_h = 1;
  582. param.stride_w = 1;
  583. param.pad_h = p;
  584. param.pad_w = p;
  585. args.emplace_back(
  586. param, TensorShape{1, ic, h, w}, TensorShape{oc, ic, kernel, kernel});
  587. };
  588. for (size_t kernel : {2, 3, 5, 7})
  589. for (size_t p : {0, 1, 2})
  590. for (size_t ic : {1, 3, 4, 8, 12, 16, 32, 48, 64})
  591. for (size_t oc : {1, 3, 4, 8, 12, 16, 32, 48, 64})
  592. for (size_t size : {56, 128, 256}) {
  593. run(oc, ic, size, size, kernel, p);
  594. }
  595. constexpr size_t RUN = 50;
  596. Benchmarker<Convolution> benchmark_conv(handle());
  597. benchmark_conv.set_dtype(0, dtype::Int8())
  598. .set_dtype(1, dtype::Int8())
  599. .set_dtype(2, dtype::Int32());
  600. benchmark_conv.set_display(false);
  601. benchmark_conv.set_times(RUN);
  602. benchmark_conv.set_before_exec_callback(
  603. AlgoChecker<ConvolutionForward>("CONVOLUTION_DEFAULT_S8STRD1"));
  604. Benchmarker<Convolution> benchmark_matmul(handle());
  605. benchmark_matmul.set_dtype(0, dtype::Int8())
  606. .set_dtype(1, dtype::Int8())
  607. .set_dtype(2, dtype::Int32());
  608. benchmark_matmul.set_display(false);
  609. benchmark_matmul.set_times(RUN);
  610. for (auto&& arg : args) {
  611. TensorLayout dst_layout;
  612. auto opr = handle()->create_operator<Convolution>();
  613. opr->param() = arg.param;
  614. opr->deduce_layout(
  615. {arg.src, dtype::Float32()}, {arg.filter, dtype::Float32()},
  616. dst_layout);
  617. //! dst.nr_elems * IC * FH * FW * 2
  618. float computations = dst_layout.total_nr_elems() * arg.filter[1] *
  619. arg.filter[2] * arg.filter[3] * 2.0 /
  620. (1024 * 1024 * 1024) * 1e3;
  621. auto used_conv =
  622. benchmark_conv.set_param(arg.param).exec({arg.src, arg.filter, {}}) /
  623. RUN;
  624. auto used_matmul =
  625. benchmark_matmul.set_param(arg.param).exec({arg.src, arg.filter, {}}) /
  626. RUN;
  627. printf("%s %s: conv: %f ms %f Gflops matmul: %f ms %f GFlops speedup: "
  628. "%f\n",
  629. arg.src.to_string().c_str(), arg.filter.to_string().c_str(), used_conv,
  630. computations / used_conv, used_matmul, computations / used_matmul,
  631. used_matmul / used_conv);
  632. }
  633. }
  634. TEST_F(ARM_COMMON, BENCHMARK_CONVOLUTION_INT8X8X32_STRD2_WITHOUT_DOTPROD_TO_MATMUL) {
  635. // have to remove preferred restrict in usable func before run the benchmark
  636. using namespace convolution;
  637. std::vector<TestArg> args;
  638. auto run = [&](size_t oc, size_t ic, size_t w, size_t h, size_t kernel, size_t p) {
  639. if (w + 2 * p < kernel || h + 2 * p < kernel)
  640. return;
  641. param::Convolution param;
  642. param.stride_h = 2;
  643. param.stride_w = 2;
  644. param.pad_h = p;
  645. param.pad_w = p;
  646. args.emplace_back(
  647. param, TensorShape{1, ic, h, w}, TensorShape{oc, ic, kernel, kernel});
  648. };
  649. for (size_t kernel : {2, 3, 5, 7})
  650. for (size_t p : {0, 1, 2})
  651. for (size_t ic : {1, 3, 4, 8, 12, 16, 32, 48, 64})
  652. for (size_t oc : {1, 3, 4, 8, 12, 16, 32, 48, 64})
  653. for (size_t size : {56, 128, 256}) {
  654. run(oc, ic, size, size, kernel, p);
  655. }
  656. constexpr size_t RUN = 50;
  657. Benchmarker<Convolution> benchmark_conv(handle());
  658. benchmark_conv.set_dtype(0, dtype::Int8())
  659. .set_dtype(1, dtype::Int8())
  660. .set_dtype(2, dtype::Int32());
  661. benchmark_conv.set_display(false);
  662. benchmark_conv.set_times(RUN);
  663. benchmark_conv.set_before_exec_callback(
  664. AlgoChecker<ConvolutionForward>("CONVOLUTION_DEFAULT_S8STRD2"));
  665. Benchmarker<Convolution> benchmark_matmul(handle());
  666. benchmark_matmul.set_dtype(0, dtype::Int8())
  667. .set_dtype(1, dtype::Int8())
  668. .set_dtype(2, dtype::Int32());
  669. benchmark_matmul.set_display(false);
  670. benchmark_matmul.set_times(RUN);
  671. for (auto&& arg : args) {
  672. TensorLayout dst_layout;
  673. auto opr = handle()->create_operator<Convolution>();
  674. opr->param() = arg.param;
  675. opr->deduce_layout(
  676. {arg.src, dtype::Float32()}, {arg.filter, dtype::Float32()},
  677. dst_layout);
  678. //! dst.nr_elems * IC * FH * FW * 2
  679. float computations = dst_layout.total_nr_elems() * arg.filter[1] *
  680. arg.filter[2] * arg.filter[3] * 2.0 /
  681. (1024 * 1024 * 1024) * 1e3;
  682. auto used_conv =
  683. benchmark_conv.set_param(arg.param).exec({arg.src, arg.filter, {}}) /
  684. RUN;
  685. auto used_matmul =
  686. benchmark_matmul.set_param(arg.param).exec({arg.src, arg.filter, {}}) /
  687. RUN;
  688. printf("%s %s: conv: %f ms %f Gflops matmul: %f ms %f GFlops speedup: "
  689. "%f\n",
  690. arg.src.to_string().c_str(), arg.filter.to_string().c_str(), used_conv,
  691. computations / used_conv, used_matmul, computations / used_matmul,
  692. used_matmul / used_conv);
  693. }
  694. }
  695. TEST_F(ARM_COMMON, BENCHMARK_CONVOLUTION_QUINT8X8X32_STRD1_WITHOUT_DOTPROD) {
  696. // have to remove preferred restrict in usable func before run the benchmark
  697. using namespace convolution;
  698. std::vector<TestArg> args;
  699. auto run = [&](size_t oc, size_t ic, size_t w, size_t h, size_t kernel, size_t p) {
  700. if (w + 2 * p < kernel || h + 2 * p < kernel)
  701. return;
  702. param::Convolution param;
  703. param.stride_h = 1;
  704. param.stride_w = 1;
  705. param.pad_h = p;
  706. param.pad_w = p;
  707. args.emplace_back(
  708. param, TensorShape{1, ic, h, w}, TensorShape{oc, ic, kernel, kernel});
  709. };
  710. // compare to float direct conv here,
  711. // but float direct conv don't support 7x7.
  712. for (size_t kernel : {2, 3, 5})
  713. for (size_t ic : {1, 8, 16, 32, 64})
  714. for (size_t oc : {1, 8, 16, 32, 64})
  715. for (size_t p : {0, 1, 2, 3}) {
  716. run(oc, ic, 56, 56, kernel, p);
  717. run(oc, ic, 128, 128, kernel, p);
  718. run(oc, ic, 256, 256, kernel, p);
  719. }
  720. constexpr size_t RUN = 50;
  721. Benchmarker<Convolution> benchmark(handle());
  722. benchmark.set_dtype(0, dtype::Quantized8Asymm(0.1f, static_cast<uint8_t>(120)))
  723. .set_dtype(1, dtype::Quantized8Asymm(0.1f, static_cast<uint8_t>(120)))
  724. .set_dtype(2, dtype::QuantizedS32(0.01f));
  725. benchmark.set_display(false);
  726. benchmark.set_times(RUN);
  727. benchmark.set_before_exec_callback(
  728. AlgoChecker<ConvolutionForward>("CONVOLUTION_DEFAULT_QU8STRD1"));
  729. Benchmarker<Convolution> benchmark_float(handle());
  730. benchmark_float.set_display(false);
  731. benchmark_float.set_times(RUN);
  732. benchmark_float.set_before_exec_callback(
  733. AlgoChecker<ConvolutionForward>("CONVOLUTION_DEFAULT_F32STRD1"));
  734. for (auto&& arg : args) {
  735. TensorLayout dst_layout;
  736. auto opr = handle()->create_operator<Convolution>();
  737. opr->param() = arg.param;
  738. opr->deduce_layout(
  739. {arg.src, dtype::Float32()}, {arg.filter, dtype::Float32()},
  740. dst_layout);
  741. //! dst.nr_elems * IC * FH * FW * 2
  742. float computations = dst_layout.total_nr_elems() * arg.filter[1] *
  743. arg.filter[2] * arg.filter[3] * 2.0 /
  744. (1024 * 1024 * 1024) * 1e3;
  745. auto used_int =
  746. benchmark.set_param(arg.param).exec({arg.src, arg.filter, {}}) / RUN;
  747. auto used_float =
  748. benchmark_float.set_param(arg.param).exec({arg.src, arg.filter, {}}) /
  749. RUN;
  750. printf("%s %s: int: %f ms %f Gflops float: %f ms %f GFlops speedup: "
  751. "%f\n",
  752. arg.src.to_string().c_str(), arg.filter.to_string().c_str(), used_int,
  753. computations / used_int, used_float, computations / used_float,
  754. used_float / used_int);
  755. }
  756. }
  757. TEST_F(ARM_COMMON, BENCHMARK_CONVOLUTION_QUINT8X8X32_STRD2_WITHOUT_DOTPROD) {
  758. // have to remove preferred restrict in usable func before run the benchmark
  759. using namespace convolution;
  760. std::vector<TestArg> args;
  761. auto run = [&](size_t oc, size_t ic, size_t w, size_t h, size_t kernel, size_t p) {
  762. if (w + 2 * p < kernel || h + 2 * p < kernel)
  763. return;
  764. param::Convolution param;
  765. param.stride_h = 2;
  766. param.stride_w = 2;
  767. param.pad_h = p;
  768. param.pad_w = p;
  769. args.emplace_back(
  770. param, TensorShape{1, ic, h, w}, TensorShape{oc, ic, kernel, kernel});
  771. };
  772. for (size_t kernel : {2, 3, 5, 7})
  773. for (size_t ic : {1, 8, 16, 32, 64})
  774. for (size_t oc : {1, 8, 16, 32, 64})
  775. for (size_t p : {0, 1, 2, 3}) {
  776. run(oc, ic, 56, 56, kernel, p);
  777. run(oc, ic, 128, 128, kernel, p);
  778. run(oc, ic, 256, 256, kernel, p);
  779. }
  780. constexpr size_t RUN = 50;
  781. Benchmarker<Convolution> benchmark(handle());
  782. benchmark.set_dtype(0, dtype::Quantized8Asymm(0.1f, static_cast<uint8_t>(120)))
  783. .set_dtype(1, dtype::Quantized8Asymm(0.1f, static_cast<uint8_t>(120)))
  784. .set_dtype(2, dtype::QuantizedS32(0.01f));
  785. benchmark.set_display(false);
  786. benchmark.set_times(RUN);
  787. benchmark.set_before_exec_callback(
  788. AlgoChecker<ConvolutionForward>("CONVOLUTION_DEFAULT_QU8STRD2"));
  789. Benchmarker<Convolution> benchmark_float(handle());
  790. benchmark_float.set_display(false);
  791. benchmark_float.set_times(RUN);
  792. #if MEGDNN_AARCH64
  793. benchmark_float.set_before_exec_callback(
  794. AlgoChecker<ConvolutionForward>("CONVOLUTION_DEFAULT_ARMV8F32STRD2"));
  795. #else
  796. benchmark_float.set_before_exec_callback(
  797. AlgoChecker<ConvolutionForward>("CONVOLUTION_DEFAULT_F32STRD2"));
  798. #endif
  799. for (auto&& arg : args) {
  800. TensorLayout dst_layout;
  801. auto opr = handle()->create_operator<Convolution>();
  802. opr->param() = arg.param;
  803. opr->deduce_layout(
  804. {arg.src, dtype::Float32()}, {arg.filter, dtype::Float32()},
  805. dst_layout);
  806. //! dst.nr_elems * IC * FH * FW * 2
  807. float computations = dst_layout.total_nr_elems() * arg.filter[1] *
  808. arg.filter[2] * arg.filter[3] * 2.0 /
  809. (1024 * 1024 * 1024) * 1e3;
  810. auto used_int =
  811. benchmark.set_param(arg.param).exec({arg.src, arg.filter, {}}) / RUN;
  812. auto used_float =
  813. benchmark_float.set_param(arg.param).exec({arg.src, arg.filter, {}}) /
  814. RUN;
  815. printf("%s %s: int: %f ms %f Gflops float: %f ms %f GFlops speedup: "
  816. "%f\n",
  817. arg.src.to_string().c_str(), arg.filter.to_string().c_str(), used_int,
  818. computations / used_int, used_float, computations / used_float,
  819. used_float / used_int);
  820. }
  821. }
  822. TEST_F(ARM_COMMON, BENCHMARK_CONVOLUTION_INT8_INT8_INT16) {
  823. using Param = param::Convolution;
  824. auto run = [&](const TensorShapeArray& shapes, Param param) {
  825. TensorLayoutArray layouts;
  826. layouts.emplace_back(shapes[0], dtype::Int8());
  827. layouts.emplace_back(shapes[1], dtype::Int8());
  828. layouts.emplace_back(shapes[2], dtype::Int16());
  829. Benchmarker<Convolution> benchmarker_cpu(handle()), benchmarker_float(handle());
  830. benchmarker_cpu.set_dtype(0, dtype::Int8());
  831. benchmarker_cpu.set_dtype(1, dtype::Int8());
  832. benchmarker_cpu.set_dtype(2, dtype::Int16());
  833. auto iparam = param;
  834. size_t RUN = 10;
  835. auto t2 = benchmarker_cpu.set_display(false)
  836. .set_times(RUN)
  837. .set_param(iparam)
  838. .execl(layouts);
  839. auto t4 = benchmarker_float.set_display(false)
  840. .set_times(RUN)
  841. .set_param(param)
  842. .exec(shapes);
  843. auto speedup = t4 / t2;
  844. std::cout << "src=" << shapes[0].to_string()
  845. << " filter=" << shapes[1].to_string() << " stride=" << param.stride_h
  846. << " float=" << t4 << "ms"
  847. << " int=" << t2 << "ms"
  848. << " speedup=" << speedup << std::endl;
  849. ASSERT_GE(speedup, 1);
  850. };
  851. /*
  852. for (size_t s: {1, 2})
  853. for (size_t k: {3})
  854. for (size_t c: {16})
  855. for (size_t h = 20; h <= 60; ++h)
  856. {
  857. Param param;
  858. param.stride_h = param.stride_w = s;
  859. run({{1, c, h, h}, {c, c, k, k}, {}}, param);
  860. }
  861. for (size_t s: {1})
  862. for (size_t k: {1})
  863. for (size_t c: {16})
  864. for (size_t h = 16; h <= 1024; h*=2)
  865. {
  866. Param param;
  867. param.stride_h = param.stride_w = s;
  868. run({{1, c, h, h}, {c, c, k, k}, {}}, param);
  869. }
  870. */
  871. for (size_t s : {1}) {
  872. Param param;
  873. param.stride_h = param.stride_w = s;
  874. run({{2, 3, 480, 270}, {12, 3, 1, 1}, {}}, param);
  875. run({{2, 12, 240, 135}, {48, 12, 1, 1}, {}}, param);
  876. run({{2, 16, 240, 135}, {4, 16, 1, 1}, {}}, param);
  877. run({{2, 4, 240, 135}, {16, 4, 1, 1}, {}}, param);
  878. run({{2, 16, 240, 135}, {8, 16, 1, 1}, {}}, param);
  879. run({{2, 8, 120, 68}, {32, 8, 1, 1}, {}}, param);
  880. run({{2, 32, 120, 68}, {8, 32, 1, 1}, {}}, param);
  881. run({{2, 64, 60, 34}, {16, 64, 1, 1}, {}}, param);
  882. }
  883. }
  884. TEST_F(ARM_COMMON, BENCHMARK_CONVOLUTION_INT8_INT8_INT32) {
  885. using Param = param::Convolution;
  886. auto run = [&](const TensorShapeArray& shapes, Param param) {
  887. TensorLayoutArray layouts;
  888. layouts.emplace_back(shapes[0], dtype::Int8());
  889. layouts.emplace_back(shapes[1], dtype::Int8());
  890. layouts.emplace_back(shapes[2], dtype::Int32());
  891. Benchmarker<Convolution> benchmarker_cpu(handle()), benchmarker_float(handle());
  892. benchmarker_cpu.set_dtype(0, dtype::Int8());
  893. benchmarker_cpu.set_dtype(1, dtype::Int8());
  894. benchmarker_cpu.set_dtype(2, dtype::Int32());
  895. auto iparam = param;
  896. size_t RUN = 10;
  897. auto t2 = benchmarker_cpu.set_display(false)
  898. .set_times(RUN)
  899. .set_param(iparam)
  900. .execl(layouts);
  901. auto t4 = benchmarker_float.set_display(false)
  902. .set_times(RUN)
  903. .set_param(param)
  904. .exec(shapes);
  905. auto speedup = t4 / t2;
  906. std::cout << "src=" << shapes[0].to_string()
  907. << " filter=" << shapes[1].to_string() << " stride=" << param.stride_h
  908. << " float=" << t4 << "ms"
  909. << " int=" << t2 << "ms"
  910. << " speedup=" << speedup << std::endl;
  911. ASSERT_GE(speedup, 1);
  912. };
  913. for (size_t s : {1, 2})
  914. for (size_t k : {3})
  915. for (size_t c : {16})
  916. for (size_t h = 20; h <= 60; ++h) {
  917. Param param;
  918. param.stride_h = param.stride_w = s;
  919. run({{1, c, h, h}, {c, c, k, k}, {}}, param);
  920. }
  921. for (size_t s : {1})
  922. for (size_t k : {1})
  923. for (size_t c : {16})
  924. for (size_t h = 16; h <= 1024; h *= 2) {
  925. Param param;
  926. param.stride_h = param.stride_w = s;
  927. run({{1, c, h, h}, {c, c, k, k}, {}}, param);
  928. }
  929. for (size_t s : {1}) {
  930. Param param;
  931. param.stride_h = param.stride_w = s;
  932. run({{2, 3, 480, 270}, {12, 3, 1, 1}, {}}, param);
  933. run({{2, 12, 240, 135}, {48, 12, 1, 1}, {}}, param);
  934. run({{2, 16, 240, 135}, {4, 16, 1, 1}, {}}, param);
  935. run({{2, 4, 240, 135}, {16, 4, 1, 1}, {}}, param);
  936. run({{2, 16, 240, 135}, {8, 16, 1, 1}, {}}, param);
  937. run({{2, 8, 120, 68}, {32, 8, 1, 1}, {}}, param);
  938. run({{2, 32, 120, 68}, {8, 32, 1, 1}, {}}, param);
  939. run({{2, 64, 60, 34}, {16, 64, 1, 1}, {}}, param);
  940. }
  941. }
  942. TEST_F(ARM_COMMON, BENCHMARK_CONVOLUTION_DIRECT) {
  943. using Param = param::Convolution;
  944. Benchmarker<Convolution> benchmarker_float(handle());
  945. Benchmarker<Convolution> benchmarker_half(handle());
  946. const size_t RUNS = 10;
  947. benchmarker_float.set_display(false)
  948. .set_times(RUNS)
  949. .set_dtype(0, dtype::Float32{})
  950. .set_dtype(1, dtype::Float32{})
  951. .set_dtype(2, dtype::Float32{})
  952. .set_before_exec_callback(
  953. AlgoChecker<Convolution>("CONVOLUTION_DEFAULT_F32DIRECT"));
  954. benchmarker_half.set_display(false)
  955. .set_times(RUNS)
  956. .set_dtype(0, dtype::Float16{})
  957. .set_dtype(1, dtype::Float16{})
  958. .set_dtype(2, dtype::Float16{})
  959. .set_before_exec_callback(
  960. AlgoChecker<Convolution>("CONVOLUTION_DEFAULT_F16DIRECT"));
  961. auto run = [&](const TensorShapeArray& shapes, Param param) {
  962. auto tfloat = benchmarker_float.set_param(param).exec(shapes) / RUNS;
  963. auto thalf = benchmarker_half.set_param(param).exec(shapes) / RUNS;
  964. TensorLayout dst_layout;
  965. auto opr = handle()->create_operator<Convolution>();
  966. opr->param() = param;
  967. opr->deduce_layout(
  968. {shapes[0], dtype::Float32()}, {shapes[1], dtype::Float32()},
  969. dst_layout);
  970. //! dst.nr_elems * IC * FH * FW * 2
  971. float computations = dst_layout.total_nr_elems() * shapes[1][1] * shapes[1][2] *
  972. shapes[1][3] * 2.0 / (1024 * 1024 * 1024);
  973. printf("run:%s %s float: %f ms %f Gflops VS half: %f ms %f Gflops "
  974. "speepup: %f\n",
  975. shapes[0].to_string().c_str(), shapes[1].to_string().c_str(), tfloat,
  976. computations / tfloat * 1e3, thalf, computations / thalf * 1e3,
  977. tfloat / thalf);
  978. };
  979. auto profile = [&](size_t n, size_t oc, size_t ic, size_t w, size_t h,
  980. size_t kernel, size_t stride) {
  981. Param param;
  982. param.stride_h = stride;
  983. param.stride_w = stride;
  984. param.pad_h = kernel / 2;
  985. param.pad_w = kernel / 2;
  986. run({{n, ic, h, w}, {oc, ic, kernel, kernel}, {}}, param);
  987. };
  988. for (size_t kernel : {1, 2, 3, 4, 5, 6, 7}) {
  989. for (size_t ic : {12}) {
  990. for (size_t oc : {4}) {
  991. for (size_t size : {17, 28, 32, 34, 64, 112, 256}) {
  992. profile(1, oc, ic, size, size, kernel, 1);
  993. }
  994. }
  995. }
  996. }
  997. for (auto k : {1, 2, 3, 4, 5, 6, 7}) {
  998. profile(2, 12, 3, 480, 270, k, 1);
  999. profile(2, 48, 12, 240, 135, k, 1);
  1000. profile(2, 4, 16, 240, 135, k, 1);
  1001. profile(2, 16, 4, 240, 135, k, 1);
  1002. profile(2, 8, 16, 240, 135, k, 1);
  1003. profile(2, 32, 8, 240, 135, k, 1);
  1004. profile(2, 8, 32, 120, 68, k, 1);
  1005. profile(2, 16, 64, 60, 34, k, 1);
  1006. }
  1007. }
  1008. TEST_F(ARM_COMMON, BENCHMARK_CONVOLUTION_STRIDE1) {
  1009. using Param = param::Convolution;
  1010. auto run_fp32 = [&](const TensorShapeArray& shapes, Param param) {
  1011. Benchmarker<Convolution> benchmarker_float(handle());
  1012. size_t RUN = 50;
  1013. auto tfloat = benchmarker_float.set_display(false)
  1014. .set_dtype(0, dtype::Float32())
  1015. .set_dtype(1, dtype::Float32())
  1016. .set_dtype(2, dtype::Float32())
  1017. .set_before_exec_callback(AlgoChecker<Convolution>(
  1018. "CONVOLUTION_DEFAULT_F32STRD1"))
  1019. .set_times(RUN)
  1020. .set_param(param)
  1021. .exec(shapes);
  1022. size_t IC = shapes[1][1];
  1023. size_t FH = shapes[1][2];
  1024. size_t FW = shapes[1][3];
  1025. TensorLayout dst_layout;
  1026. auto opr = handle()->create_operator<Convolution>();
  1027. opr->param() = param;
  1028. opr->deduce_layout(
  1029. {shapes[0], dtype::Float32()}, {shapes[1], dtype::Float32()},
  1030. dst_layout);
  1031. printf("fp32 flops: %.3f mflops\n",
  1032. (IC * dst_layout.total_nr_elems() * FH * FW * 2) /
  1033. (tfloat / RUN * 1000));
  1034. };
  1035. #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  1036. auto run_fp16 = [&](const TensorShapeArray& shapes, Param param) {
  1037. Benchmarker<Convolution> benchmarker_float(handle());
  1038. size_t RUN = 50;
  1039. auto tfloat = benchmarker_float.set_display(false)
  1040. .set_dtype(0, dtype::Float16())
  1041. .set_dtype(1, dtype::Float16())
  1042. .set_dtype(2, dtype::Float16())
  1043. .set_before_exec_callback(AlgoChecker<Convolution>(
  1044. "CONVOLUTION_DEFAULT_F16STRD1"))
  1045. .set_times(RUN)
  1046. .set_param(param)
  1047. .exec(shapes);
  1048. size_t IC = shapes[1][1];
  1049. size_t FH = shapes[1][2];
  1050. size_t FW = shapes[1][3];
  1051. TensorLayout dst_layout;
  1052. auto opr = handle()->create_operator<Convolution>();
  1053. opr->param() = param;
  1054. opr->deduce_layout(
  1055. {shapes[0], dtype::Float16()}, {shapes[1], dtype::Float16()},
  1056. dst_layout);
  1057. printf("fp16 flops: %.3f mflops\n",
  1058. (IC * dst_layout.total_nr_elems() * FH * FW * 2) /
  1059. (tfloat / RUN * 1000));
  1060. };
  1061. #endif
  1062. auto profile = [&](size_t oc, size_t ic, size_t w, size_t h, size_t kernel,
  1063. size_t stride) {
  1064. Param param;
  1065. param.stride_h = stride;
  1066. param.stride_w = stride;
  1067. param.pad_h = kernel / 2;
  1068. param.pad_w = kernel / 2;
  1069. printf("oc: %zd ic: %zd w: %zd h: %zd stride: %zd kernel_size: %zd\n", oc, ic,
  1070. w, h, stride, kernel);
  1071. run_fp32({{1, ic, h, w}, {oc, ic, kernel, kernel}, {}}, param);
  1072. #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  1073. run_fp16({{1, ic, h, w}, {oc, ic, kernel, kernel}, {}}, param);
  1074. #endif
  1075. };
  1076. for (size_t kernel : {2, 3, 5}) {
  1077. for (size_t ic : {3, 6, 12, 24}) {
  1078. for (size_t oc : {3, 6, 12, 24}) {
  1079. for (size_t size : {4, 7, 8, 14, 16, 17, 28, 32, 34, 64, 112}) {
  1080. profile(oc, ic, size, size, kernel, 1);
  1081. }
  1082. }
  1083. }
  1084. }
  1085. }
  1086. #endif
  1087. // vim: syntax=cpp.doxygen