You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

convolution.cpp 37 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886
  1. /**
  2. * \file dnn/test/cuda/convolution.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
  10. * implied.
  11. */
  12. #include "megdnn/dtype.h"
  13. #include "megdnn/oprs.h"
  14. #include "megdnn/opr_param_defs.h"
  15. #include "test/cuda/fixture.h"
  16. #include "test/common/tensor.h"
  17. #include "test/common/workspace_wrapper.h"
  18. #include "test/common/checker.h"
  19. #include "test/common/convolution.h"
  20. #include "test/common/rng.h"
  21. #include "test/cuda/benchmark.h"
  22. #include "src/cuda/utils.h"
  23. #define V1(x) #x
  24. #define V(x) V1(x)
  25. #define CUDNN_VERSION_STRING \
  26. "v" V(CUDNN_MAJOR) "." V(CUDNN_MINOR) "." V(CUDNN_PATCHLEVEL)
  27. namespace megdnn {
  28. namespace test {
  29. TEST_F(CUDA, CONVOLUTION_8X8X32) {
  30. if (!cuda::is_compute_capability_required(6, 1)) {
  31. printf("Skip CUDA.CONVOLUTION_8X8X32 test as current device"
  32. "doesn't support\n");
  33. return;
  34. }
  35. using namespace convolution;
  36. std::vector<TestArg> args;
  37. {
  38. auto v = get_args();
  39. for (auto&& a : v) {
  40. args.push_back(std::move(a));
  41. }
  42. }
  43. {
  44. auto v = get_dilated_args();
  45. for (auto&& a : v) {
  46. args.push_back(std::move(a));
  47. }
  48. }
  49. {
  50. auto v = get_chanwise_args();
  51. for (auto&& a : v) {
  52. args.push_back(std::move(a));
  53. }
  54. }
  55. Checker<ConvolutionForward> checker(handle_cuda());
  56. UniformIntRNG rng(-4, 4);
  57. for (auto arg : args) {
  58. arg.param.format = param::Convolution::Format::NHWC;
  59. arg.src = cvt_src_or_dst_nchw2nhwc(arg.src);
  60. arg.filter = cvt_filter_nchw2nhwc(arg.filter);
  61. checker.set_dtype(0, dtype::Int8())
  62. .set_dtype(1, dtype::Int8())
  63. .set_dtype(2, dtype::Int32())
  64. .set_param(arg.param)
  65. .set_rng(0, &rng)
  66. .set_rng(1, &rng)
  67. .execs({arg.src, arg.filter, {}});
  68. }
  69. }
  70. TEST_F(CUDA, CONVOLUTION_FORWARD) {
  71. using namespace convolution;
  72. std::vector<TestArg> args = get_args();
  73. Checker<ConvolutionForward> checker(handle_cuda());
  74. NormalRNG default_rng;
  75. for (auto&& arg : args) {
  76. float scale =
  77. 1.0f / sqrt(arg.filter[1] * arg.filter[2] * arg.filter[3]);
  78. UniformFloatRNG rng(scale, 2 * scale);
  79. checker.set_dtype(0, dtype::Float32())
  80. .set_dtype(1, dtype::Float32())
  81. .set_dtype(2, dtype::Float32())
  82. .set_rng(0, &default_rng)
  83. .set_rng(1, &default_rng)
  84. .set_epsilon(1e-3)
  85. .set_param(arg.param)
  86. .execs({arg.src, arg.filter, {}});
  87. checker.set_dtype(0, dtype::Float16())
  88. .set_dtype(1, dtype::Float16())
  89. .set_dtype(2, dtype::Float16())
  90. .set_rng(0, &rng)
  91. .set_rng(1, &rng)
  92. .set_epsilon(1e-1)
  93. .set_param(arg.param)
  94. .execs({arg.src, arg.filter, {}});
  95. arg.param.compute_mode = param::Convolution::ComputeMode::FLOAT32;
  96. checker.set_dtype(0, dtype::Float16())
  97. .set_dtype(1, dtype::Float16())
  98. .set_dtype(2, dtype::Float16())
  99. .set_rng(0, &rng)
  100. .set_rng(1, &rng)
  101. .set_epsilon(1e-1)
  102. .set_param(arg.param)
  103. .execs({arg.src, arg.filter, {}});
  104. checker.set_dtype(0, dtype::BFloat16())
  105. .set_dtype(1, dtype::BFloat16())
  106. .set_dtype(2, dtype::BFloat16())
  107. .set_epsilon(1e-1)
  108. .set_param(arg.param)
  109. .execs({arg.src, arg.filter, {}});
  110. }
  111. }
  112. TEST_F(CUDA, CONV_FORWARD_MATMUL_NCHW4) {
  113. if (!cuda::is_compute_capability_required(6, 1))
  114. return;
  115. using namespace convolution;
  116. Checker<Convolution> checker(handle_cuda());
  117. UniformIntRNG int_rng{-127, 127};
  118. Convolution::Param param;
  119. param.format = Convolution::Param::Format::NCHW4;
  120. checker.set_dtype(0, dtype::QuantizedS8(0.132f))
  121. .set_dtype(1, dtype::QuantizedS8(0.0239f))
  122. .set_dtype(2, dtype::QuantizedS32(0.132f * 0.0239f))
  123. .set_rng(0, &int_rng)
  124. .set_rng(1, &int_rng)
  125. .set_param(param);
  126. checker.set_before_exec_callback(
  127. AlgoChecker<ConvolutionForward>(ExecutionPolicyAlgoName{
  128. "DEFAULT",
  129. {{ConvBiasForward::algo_name<ConvBiasForward::MatmulParam>(
  130. "MATMUL8X8X32", {})
  131. .c_str(),
  132. {}}}}));
  133. param.sparse = Convolution::Param::Sparse::DENSE;
  134. param.pad_h = param.pad_w = 1;
  135. param.stride_h = param.stride_w = 1;
  136. checker.set_param(param);
  137. checker.exec({{8, 4, 10, 10, 4}, {16, 4, 3, 3, 4}, {}});
  138. checker.exec({{1, 4, 2, 2, 4}, {16, 4, 3, 3, 4}, {}});
  139. checker.exec({{8, 64, 12, 12, 4}, {256, 64, 3, 3, 4}, {}});
  140. }
  141. TEST_F(CUDA, CONVOLUTION_1X1_FORWARD) {
  142. using namespace convolution;
  143. std::vector<TestArg> args = get_1x1_args();
  144. Checker<ConvolutionForward> checker(handle_cuda());
  145. NormalRNG default_rng;
  146. for (auto&& arg : args) {
  147. float scale =
  148. 1.0f / sqrt(arg.filter[1] * arg.filter[2] * arg.filter[3]);
  149. UniformFloatRNG rng(scale, 2 * scale);
  150. checker.set_dtype(0, dtype::Float32())
  151. .set_dtype(1, dtype::Float32())
  152. .set_rng(0, &default_rng)
  153. .set_rng(1, &default_rng)
  154. .set_epsilon(1e-3)
  155. .set_param(arg.param)
  156. .execs({arg.src, arg.filter, {}});
  157. }
  158. }
  159. TEST_F(CUDA, BENCHMARK_CONVOLUTION_1X1_FORWARD) {
  160. using namespace convolution;
  161. std::vector<TestArg> args = get_1x1_args();
  162. Benchmarker<ConvolutionForward> marker(handle_cuda());
  163. NormalRNG default_rng;
  164. for (auto&& arg : args) {
  165. float scale =
  166. 1.0f / sqrt(arg.filter[1] * arg.filter[2] * arg.filter[3]);
  167. UniformFloatRNG rng(scale, 2 * scale);
  168. marker.set_dtype(0, dtype::Float32())
  169. .set_dtype(1, dtype::Float32())
  170. .set_rng(0, &default_rng)
  171. .set_rng(1, &default_rng)
  172. .set_param(arg.param)
  173. .execs({arg.src, arg.filter, {}});
  174. }
  175. }
  176. TEST_F(CUDA, CONVOLUTION_BACKWARD_DATA) {
  177. using namespace convolution;
  178. std::vector<TestArg> args = get_args_cuda_conv_bwd_data();
  179. Checker<ConvolutionBackwardData> checker(handle_cuda());
  180. NormalRNG default_rng;
  181. for (auto&& arg : args) {
  182. float scale =
  183. 64.f / sqrt(arg.filter[0] * arg.filter[2] * arg.filter[3]);
  184. UniformFloatRNG rng(scale, 2 * scale);
  185. auto src = TensorLayout(arg.src, dtype::Float32());
  186. auto filter = TensorLayout(arg.filter, dtype::Float32());
  187. TensorLayout dst;
  188. {
  189. auto opr = handle_cuda()->create_operator<Convolution>();
  190. opr->param() = arg.param;
  191. opr->deduce_layout(src, filter, dst);
  192. }
  193. src.dtype = dst.dtype = filter.dtype = dtype::Float32();
  194. checker.set_rng(0, &default_rng)
  195. .set_rng(1, &default_rng)
  196. .set_epsilon(1e-3)
  197. .set_param(arg.param)
  198. .exec(TensorLayoutArray{filter, dst, src});
  199. if (!cuda::is_compute_capability_required(6, 0)) {
  200. src.dtype = dst.dtype = filter.dtype = dtype::Float16();
  201. checker.set_rng(0, &rng)
  202. .set_rng(1, &rng)
  203. .set_epsilon(1e-1)
  204. .set_param(arg.param)
  205. .exec(TensorLayoutArray{filter, dst, src});
  206. arg.param.compute_mode = param::Convolution::ComputeMode::FLOAT32;
  207. checker.set_rng(0, &rng)
  208. .set_rng(1, &rng)
  209. .set_epsilon(1e-1)
  210. .set_param(arg.param)
  211. .exec(TensorLayoutArray{filter, dst, src});
  212. }
  213. checker.set_before_exec_callback(AlgoChecker<ConvolutionBackwardData>(
  214. ExecutionPolicyAlgoName{"CONVOLUTION_BACKWARD_DATD_BFLOAT16",
  215. {{"MATMUL", {{"CUBLAS", {}}}}}}));
  216. src.dtype = dst.dtype = filter.dtype = dtype::BFloat16();
  217. arg.param.compute_mode = param::Convolution::ComputeMode::FLOAT32;
  218. checker.set_rng(0, &rng)
  219. .set_rng(1, &rng)
  220. .set_epsilon(1e-1)
  221. .set_param(arg.param)
  222. .exec(TensorLayoutArray{filter, dst, src});
  223. checker.reset_before_exec_callback();
  224. checker.opr()->execution_policy() = {};
  225. }
  226. }
  227. TEST_F(CUDA, CONVOLUTION_BACKWARD_DATA_MATMUL) {
  228. using namespace convolution;
  229. std::vector<TestArg> args = get_args_cuda_conv_bwd_data();
  230. Checker<ConvolutionBackwardData> checker(handle_cuda());
  231. checker.set_before_exec_callback(AlgoChecker<ConvolutionBackwardData>(
  232. ExecutionPolicyAlgoName{"MATMUL", {{"CUBLAS", {}}}}));
  233. NormalRNG default_rng;
  234. for (auto&& arg : args) {
  235. float scale =
  236. 64.f / sqrt(arg.filter[0] * arg.filter[2] * arg.filter[3]);
  237. UniformFloatRNG rng(scale, 2 * scale);
  238. auto src = TensorLayout(arg.src, dtype::Float32());
  239. auto filter = TensorLayout(arg.filter, dtype::Float32());
  240. TensorLayout dst;
  241. {
  242. auto opr = handle_cuda()->create_operator<Convolution>();
  243. opr->param() = arg.param;
  244. opr->deduce_layout(src, filter, dst);
  245. }
  246. src.dtype = dst.dtype = filter.dtype = dtype::Float32();
  247. checker.set_rng(0, &default_rng)
  248. .set_rng(1, &default_rng)
  249. .set_epsilon(1e-3)
  250. .set_param(arg.param)
  251. .exec(TensorLayoutArray{filter, dst, src});
  252. }
  253. }
  254. TEST_F(CUDA, CONVOLUTION_BACKWARD_DATA_INT8_NCHW4_DP4A) {
  255. if (!cuda::is_compute_capability_required(6, 1)) {
  256. printf("Skip CUDA.CONVOLUTION_BACKWARD_DATA_INT8_NCHW4_DP4A test as "
  257. "current device doesn't support\n");
  258. return;
  259. }
  260. using namespace convolution;
  261. std::vector<TestArg> args = get_args_int8_nchw4_conv_bwd_data();
  262. struct AlgoParam {
  263. int threadblock_m;
  264. int threadblock_n;
  265. int threadblock_k;
  266. int warp_m;
  267. int warp_n;
  268. int warp_k;
  269. int stage;
  270. std::string to_string() {
  271. return ssprintf("_%dX%dX%d_%dX%dX%d_%dstage", threadblock_m,
  272. threadblock_n, threadblock_k, warp_m, warp_n,
  273. warp_k, stage);
  274. }
  275. };
  276. std::vector<AlgoParam> all_params;
  277. all_params.emplace_back(AlgoParam{16, 64, 8, 16, 64, 8, 2});
  278. all_params.emplace_back(AlgoParam{16, 128, 16, 16, 64, 16, 2});
  279. all_params.emplace_back(AlgoParam{16, 128, 16, 16, 128, 16, 1});
  280. all_params.emplace_back(AlgoParam{32, 128, 32, 32, 64, 32, 2});
  281. all_params.emplace_back(AlgoParam{64, 128, 32, 64, 32, 32, 2});
  282. for (auto algo_param : all_params) {
  283. Checker<ConvolutionBackwardData> checker(handle_cuda());
  284. std::string algo_name(ssprintf("INT8_NCHW4_DOTPROD_IMPLICIT_GEMM%s",
  285. algo_param.to_string().c_str()));
  286. checker.set_before_exec_callback(
  287. AlgoChecker<ConvolutionBackwardData>(algo_name.c_str()));
  288. checker.set_epsilon(1 + 1e-3).set_max_avg_error(1e-1);
  289. for (auto&& arg : args) {
  290. UniformIntRNG rng(-3, 3);
  291. auto src = TensorLayout(arg.src, dtype::QuantizedS8{1.2f});
  292. auto filter = TensorLayout(arg.filter, dtype::QuantizedS8{1.3f});
  293. TensorLayout dst;
  294. dst.dtype = dtype::QuantizedS8{1.2f};
  295. {
  296. auto opr = handle_cuda()->create_operator<Convolution>();
  297. opr->param() = arg.param;
  298. opr->deduce_layout(src, filter, dst);
  299. }
  300. checker.set_rng(0, &rng).set_rng(1, &rng).set_param(arg.param).exec(
  301. TensorLayoutArray{filter, dst, src});
  302. }
  303. }
  304. }
  305. TEST_F(CUDA, CONVOLUTION_BACKWARD_DATA_INT8_NCHW_DP4A) {
  306. if (!cuda::is_compute_capability_required(6, 1)) {
  307. printf("Skip CUDA.CONVOLUTION_BACKWARD_DATA_INT8_NCHW_DP4A test as "
  308. "current device doesn't support\n");
  309. return;
  310. }
  311. using namespace convolution;
  312. std::vector<TestArg> args = get_args_int8_nchw_conv_bwd_data();
  313. Checker<ConvolutionBackwardData> checker(handle_cuda());
  314. checker.set_before_exec_callback(AlgoChecker<ConvolutionBackwardData>(
  315. "INT8_NCHW_DOTPROD_IMPLICIT_GEMM"));
  316. checker.set_epsilon(1 + 1e-3).set_max_avg_error(1e-1);
  317. for (auto&& arg : args) {
  318. UniformIntRNG rng(-3, 3);
  319. auto src = TensorLayout(arg.src, dtype::QuantizedS8{1.2f});
  320. auto filter = TensorLayout(arg.filter, dtype::QuantizedS8{1.3f});
  321. TensorLayout dst;
  322. dst.dtype = dtype::QuantizedS8{1.2f};
  323. {
  324. auto opr = handle_cuda()->create_operator<Convolution>();
  325. opr->param() = arg.param;
  326. opr->deduce_layout(src, filter, dst);
  327. }
  328. checker.set_rng(0, &rng).set_rng(1, &rng).set_param(arg.param).exec(
  329. TensorLayoutArray{filter, dst, src});
  330. }
  331. }
  332. TEST_F(CUDA, CONVOLUTION_BACKWARD_DATA_FAILED_CUDNN7_5) {
  333. // BRAIN-481 failed on architectures 7.0, remove the following if statement,
  334. // when cudnn fixed the problem.
  335. if (cuda::is_compute_capability_required(7, 0))
  336. return;
  337. using namespace convolution;
  338. std::vector<TestArg> args = get_args_cudnn_7_5_failures();
  339. Checker<ConvolutionBackwardData> checker(handle_cuda());
  340. NormalRNG default_rng;
  341. for (auto&& arg : args) {
  342. float scale =
  343. 128.f / sqrt(arg.filter[0] * arg.filter[2] * arg.filter[3]);
  344. scale = std::max(scale, 1.f);
  345. UniformFloatRNG rng(scale, 2 * scale);
  346. auto src = TensorLayout(arg.src, dtype::Float32());
  347. auto filter = TensorLayout(arg.filter, dtype::Float32());
  348. TensorLayout dst;
  349. {
  350. auto opr = handle_cuda()->create_operator<Convolution>();
  351. opr->param() = arg.param;
  352. opr->deduce_layout(src, filter, dst);
  353. }
  354. src.dtype = dst.dtype = filter.dtype = dtype::Float32();
  355. checker.set_rng(0, &default_rng)
  356. .set_rng(1, &default_rng)
  357. .set_epsilon(1e-3)
  358. .set_param(arg.param)
  359. .exec(TensorLayoutArray{filter, dst, src});
  360. src.dtype = dst.dtype = filter.dtype = dtype::Float16();
  361. checker.set_rng(0, &rng)
  362. .set_rng(1, &rng)
  363. .set_epsilon(1e-1)
  364. .set_param(arg.param)
  365. .exec(TensorLayoutArray{filter, dst, src});
  366. arg.param.compute_mode = param::Convolution::ComputeMode::FLOAT32;
  367. checker.set_rng(0, &rng)
  368. .set_rng(1, &rng)
  369. .set_epsilon(1e-1)
  370. .set_param(arg.param)
  371. .exec(TensorLayoutArray{filter, dst, src});
  372. }
  373. }
  374. TEST_F(CUDA, CONVOLUTION_BACKWARD_FILTER) {
  375. using namespace convolution;
  376. std::vector<TestArg> args = get_args();
  377. Checker<ConvolutionBackwardFilter> checker(handle_cuda());
  378. bool f16_checked = false;
  379. for (auto&& arg : args) {
  380. auto src = TensorLayout(arg.src, dtype::Float32());
  381. auto filter = TensorLayout(arg.filter, dtype::Float32());
  382. TensorLayout dst;
  383. {
  384. auto opr = handle_cuda()->create_operator<Convolution>();
  385. opr->param() = arg.param;
  386. opr->deduce_layout(src, filter, dst);
  387. }
  388. float scale = 1.0f / sqrt(dst[2] * dst[3]);
  389. UniformFloatRNG rng(scale, 2 * scale);
  390. src.dtype = dst.dtype = filter.dtype = dtype::Float32();
  391. checker.set_rng(0, &rng)
  392. .set_rng(1, &rng)
  393. .set_epsilon(1e-3)
  394. .set_param(arg.param)
  395. .exec(TensorLayoutArray{src, dst, filter});
  396. // reduce on large f16 array may introduce significant error
  397. if (dst.total_nr_elems() >= 1000 && f16_checked)
  398. continue;
  399. f16_checked = true;
  400. src.dtype = dst.dtype = filter.dtype = dtype::Float16();
  401. checker.set_rng(0, &rng)
  402. .set_rng(1, &rng)
  403. .set_epsilon(1e-1)
  404. .set_param(arg.param)
  405. .exec(TensorLayoutArray{src, dst, filter});
  406. arg.param.compute_mode = param::Convolution::ComputeMode::FLOAT32;
  407. checker.set_rng(0, &rng)
  408. .set_rng(1, &rng)
  409. .set_epsilon(1e-1)
  410. .set_param(arg.param)
  411. .exec(TensorLayoutArray{src, dst, filter});
  412. checker.set_before_exec_callback(AlgoChecker<ConvolutionBackwardFilter>(
  413. ExecutionPolicyAlgoName{"CONVOLUTION_BACKWARD_FILTER_BFLOAT16",
  414. {{"MATMUL", {{"CUBLAS", {}}}}}}));
  415. src.dtype = dst.dtype = filter.dtype = dtype::BFloat16();
  416. checker.set_rng(0, &rng)
  417. .set_rng(1, &rng)
  418. .set_epsilon(1e-1)
  419. .set_param(arg.param)
  420. .exec(TensorLayoutArray{src, dst, filter});
  421. checker.reset_before_exec_callback();
  422. checker.opr()->execution_policy() = {};
  423. }
  424. }
  425. TEST_F(CUDA, CONVOLUTION_BACKWARD_FILTER_MATMUL) {
  426. using namespace convolution;
  427. std::vector<TestArg> args = get_args();
  428. Checker<ConvolutionBackwardFilter> checker(handle_cuda());
  429. checker.set_before_exec_callback(AlgoChecker<ConvolutionBackwardFilter>(
  430. ExecutionPolicyAlgoName{"MATMUL", {{"CUBLAS", {}}}}));
  431. for (auto&& arg : args) {
  432. auto src = TensorLayout(arg.src, dtype::Float32());
  433. auto filter = TensorLayout(arg.filter, dtype::Float32());
  434. TensorLayout dst;
  435. {
  436. auto opr = handle_cuda()->create_operator<Convolution>();
  437. opr->param() = arg.param;
  438. opr->deduce_layout(src, filter, dst);
  439. }
  440. float scale = 1.0f / sqrt(dst[2] * dst[3]);
  441. UniformFloatRNG rng(scale, 2 * scale);
  442. src.dtype = dst.dtype = filter.dtype = dtype::Float32();
  443. checker.set_rng(0, &rng)
  444. .set_rng(1, &rng)
  445. .set_epsilon(1e-3)
  446. .set_param(arg.param)
  447. .exec(TensorLayoutArray{src, dst, filter});
  448. }
  449. }
  450. TEST_F(CUDA, CONV_CONFIG_COMBINATIONS) {
  451. auto eps_getter = [](bool f16, int stage, const char* name) -> float {
  452. if (f16) {
  453. return stage == 2 ? 0.5 : 0.2;
  454. }
  455. if (strstr(name, "WINOGRAD_NONFUSED"))
  456. return 0.3;
  457. return 1e-3;
  458. };
  459. convolution::test_conv_config_combinations(2, handle_cuda(), false, true,
  460. true, eps_getter, true);
  461. convolution::test_conv_config_combinations(3, handle_cuda(), false, true,
  462. true, eps_getter, true);
  463. convolution::test_conv_config_combinations(5, handle_cuda(), false, true,
  464. true, eps_getter, true);
  465. }
  466. TEST_F(CUDA, CONVOLUTION_BACKWARD_DATA_1) {
  467. if (cuda::is_compute_capability_required(7, 0))
  468. return;
  469. using namespace convolution;
  470. Checker<ConvolutionBackwardData> checker(handle_cuda());
  471. checker.set_before_exec_callback(AlgoChecker<ConvolutionBackwardData>(
  472. "CUDNN_CONVOLUTION_BWD_DATA_ALGO_1" CUDNN_VERSION_STRING));
  473. NormalRNG default_rng;
  474. TensorShape s_filter = TensorShape{8, 8, 2, 2},
  475. s_src = TensorShape{2, 8, 18, 18};
  476. float scale = 1.0f / sqrt(s_filter[0] * s_filter[2] * s_filter[3]);
  477. UniformFloatRNG rng(scale, 2 * scale);
  478. auto src = TensorLayout(s_src, dtype::Float16());
  479. auto filter = TensorLayout(s_filter, dtype::Float16());
  480. TensorLayout dst;
  481. param::Convolution param;
  482. param.pad_h = param.pad_w = 2;
  483. param.stride_h = param.stride_w = 2;
  484. {
  485. auto opr = handle_cuda()->create_operator<Convolution>();
  486. opr->param() = param;
  487. opr->deduce_layout(src, filter, dst);
  488. }
  489. src.dtype = dst.dtype = filter.dtype = dtype::Float16();
  490. param.compute_mode = param::Convolution::ComputeMode::FLOAT32;
  491. checker.set_rng(0, &rng)
  492. .set_rng(1, &rng)
  493. .set_epsilon(0.2)
  494. .set_param(param)
  495. .exec(TensorLayoutArray{filter, dst, src});
  496. }
  497. #if MEGDNN_WITH_BENCHMARK
  498. TEST_F(CUDA, CONV_FWD_BENCHMARK) {
  499. auto run = [&](size_t N, size_t OC, size_t IC, size_t IH, size_t IW,
  500. size_t SH = 1, size_t SW = 1, size_t FH = 1, size_t FW = 1,
  501. size_t PH = 0, size_t PW = 0, bool fp16io_c32 = false) {
  502. auto benchmarker = Benchmarker<ConvolutionForward>(handle_cuda());
  503. benchmarker.set_dtype(0, dtype::Float16())
  504. .set_dtype(1, dtype::Float16())
  505. .set_dtype(2, dtype::Float16());
  506. ConvolutionForward::Param param;
  507. param.stride_h = SH;
  508. param.stride_w = SW;
  509. param.pad_h = PH;
  510. param.pad_w = PW;
  511. if (fp16io_c32) {
  512. param.compute_mode =
  513. ConvolutionForward::Param::ComputeMode::FLOAT32;
  514. }
  515. benchmarker.set_param(param);
  516. std::unique_ptr<OprProxy<ConvolutionForward>> proxy{
  517. new OprProxy<ConvolutionForward>{true}};
  518. benchmarker.set_proxy(proxy);
  519. size_t OH = (IH - FH + 2 * PH) / SH + 1;
  520. size_t OW = (IW - FW + 2 * PW) / SW + 1;
  521. auto time = benchmarker.execs(
  522. {{N, IC, IH, IW}, {OC, IC, FH, FW}, {N, OC, OH, OW}});
  523. time /= 1000.0 * 10.0;
  524. auto flo = (double)N * OC * IC * OH * OW * FH * FW * 2;
  525. auto flops = flo / time / 1e12;
  526. printf("comp_type %s: ", fp16io_c32 ? "32" : "16");
  527. printf("%.3fG FLO, flops %.3fTFLOPS\n", flo / 1e9, flops);
  528. };
  529. run(32, 512, 256, 56, 56, 1, 1, 1, 1, 0, 0, false);
  530. run(32, 512, 256, 56, 56, 1, 1, 1, 1, 0, 0, true);
  531. }
  532. TEST_F(CUDA, CONVOLUTION_FWD_BENCHMARK) {
  533. CUBenchmarker<ConvolutionForward> bench{handle_cuda()};
  534. std::unique_ptr<OprProxy<ConvolutionForward>> proxy{
  535. new OprProxy<ConvolutionForward>{true}};
  536. size_t RUNS = 10;
  537. bench.set_proxy(proxy).set_times(RUNS);
  538. auto run = [&](size_t N, size_t OC, size_t IC, size_t IH, size_t IW,
  539. size_t FH, size_t SH, size_t PH) {
  540. bench.set_dtype(0, dtype::Float32())
  541. .set_dtype(1, dtype::Float32())
  542. .set_dtype(2, dtype::Float32());
  543. param::Convolution param;
  544. param.stride_h = param.stride_w = SH;
  545. param.pad_h = param.pad_w = PH;
  546. param.compute_mode = param::Convolution::ComputeMode::DEFAULT;
  547. bench.set_param(param);
  548. bench.proxy()->target_execution_policy.algo.reset();
  549. TensorLayout src{{N, IC, IH, IW}, dtype::Float32()},
  550. filter{{OC, IC, FH, FH}, dtype::Float32()};
  551. TensorLayout dst;
  552. {
  553. auto&& opr = handle_cuda()->create_operator<Convolution>();
  554. opr->param() = param;
  555. opr->deduce_layout(src, filter, dst);
  556. }
  557. auto time_ms_fp32 = bench.execl({src, filter, dst}) / RUNS;
  558. src.dtype = filter.dtype = dst.dtype = dtype::Float16();
  559. bench.proxy()->target_execution_policy.algo.reset();
  560. bench.set_dtype(0, dtype::Float16())
  561. .set_dtype(1, dtype::Float16())
  562. .set_dtype(2, dtype::Float16());
  563. auto time_ms_true_fp16 = bench.execl({src, filter, dst}) / RUNS;
  564. param.compute_mode = param::Convolution::ComputeMode::FLOAT32;
  565. bench.proxy()->target_execution_policy.algo.reset();
  566. bench.set_param(param);
  567. auto time_ms_pseudo_fp16 = bench.execl({src, filter, dst}) / RUNS;
  568. float flo = 2.0 * N * OC * IC * dst[2] * dst[3] * FH * FH;
  569. printf("inp=%s, kern=%s, dst=%s ", src.to_string().c_str(),
  570. filter.to_string().c_str(), dst.to_string().c_str());
  571. printf("time_fp32=%.2fms, flops=%.3fTFLOPS\ntime_true_fp16=%.2fms, "
  572. "flops=%.3fTFLOPS\ntime_pseudo_fp16=%.2fms, flops=%.3fFLOPS\n",
  573. time_ms_fp32, (flo / (time_ms_fp32 * 1e9)), time_ms_true_fp16,
  574. (flo / (time_ms_true_fp16 * 1e9)), time_ms_pseudo_fp16,
  575. (flo / (time_ms_pseudo_fp16 * 1e9)));
  576. printf("speedup (true_fp16/fp32)=%.2f, (true_fp16/pseudo_fp16)=%.2f\n",
  577. time_ms_fp32 / time_ms_true_fp16,
  578. time_ms_pseudo_fp16 / time_ms_true_fp16);
  579. };
  580. run(32, 64, 3, 224, 224, 7, 2, 3);
  581. run(32, 128, 128, 28, 28, 3, 1, 1);
  582. run(32, 256, 256, 14, 14, 3, 1, 1);
  583. run(32, 512, 512, 7, 7, 3, 1, 1);
  584. run(32, 64, 64, 56, 56, 3, 1, 1);
  585. run(32, 512, 256, 56, 56, 1, 2, 0);
  586. run(32, 1024, 512, 28, 28, 1, 2, 0);
  587. run(32, 2048, 1024, 14, 14, 1, 2, 0);
  588. run(32, 512, 128, 28, 28, 1, 1, 0);
  589. run(32, 128, 512, 28, 28, 1, 1, 0);
  590. run(32, 1024, 256, 14, 14, 1, 1, 0);
  591. run(32, 256, 1024, 14, 14, 1, 1, 0);
  592. run(32, 2048, 512, 7, 7, 1, 1, 0);
  593. run(32, 512, 2048, 7, 7, 1, 1, 0);
  594. run(32, 256, 64, 56, 56, 1, 1, 0);
  595. run(32, 64, 256, 56, 56, 1, 1, 0);
  596. run(32, 128, 256, 56, 56, 1, 2, 0);
  597. run(32, 256, 512, 28, 28, 1, 2, 0);
  598. run(32, 512, 1024, 14, 14, 1, 2, 0);
  599. run(32, 64, 64, 56, 56, 1, 1, 0);
  600. }
  601. TEST_F(CUDA, CONVOLUTION_BWD_DATA_BENCHMARK) {
  602. CUBenchmarker<ConvolutionBackwardData> bench{handle_cuda()};
  603. std::unique_ptr<OprProxy<ConvolutionBackwardData>> proxy{
  604. new OprProxy<ConvolutionBackwardData>{true}};
  605. size_t RUNS = 10;
  606. bench.set_proxy(proxy).set_times(RUNS);
  607. auto run = [&](size_t N, size_t OC, size_t IC, size_t IH, size_t IW,
  608. size_t FH, size_t SH, size_t PH) {
  609. bench.set_dtype(0, dtype::Float32())
  610. .set_dtype(1, dtype::Float32())
  611. .set_dtype(2, dtype::Float32());
  612. param::Convolution param;
  613. param.stride_h = param.stride_w = SH;
  614. param.pad_h = param.pad_w = PH;
  615. param.compute_mode = param::Convolution::ComputeMode::DEFAULT;
  616. bench.set_param(param);
  617. bench.proxy()->target_execution_policy.algo.reset();
  618. TensorLayout src{{N, IC, IH, IW}, dtype::Float32()},
  619. filter{{OC, IC, FH, FH}, dtype::Float32()};
  620. TensorLayout dst;
  621. {
  622. auto&& opr = handle_cuda()->create_operator<Convolution>();
  623. opr->param() = param;
  624. opr->deduce_layout(src, filter, dst);
  625. }
  626. auto time_ms_fp32 = bench.execl({filter, dst, src}) / RUNS;
  627. src.dtype = filter.dtype = dst.dtype = dtype::Float16();
  628. bench.proxy()->target_execution_policy.algo.reset();
  629. bench.set_dtype(0, dtype::Float16())
  630. .set_dtype(1, dtype::Float16())
  631. .set_dtype(2, dtype::Float16());
  632. auto time_ms_true_fp16 = bench.execl({filter, dst, src}) / RUNS;
  633. param.compute_mode = param::Convolution::ComputeMode::FLOAT32;
  634. bench.proxy()->target_execution_policy.algo.reset();
  635. bench.set_param(param);
  636. auto time_ms_pseudo_fp16 = bench.execl({filter, dst, src}) / RUNS;
  637. float flo = 2.0 * N * OC * IC * dst[2] * dst[3] * FH * FH;
  638. printf("inp=%s, kern=%s, dst=%s ", src.to_string().c_str(),
  639. filter.to_string().c_str(), dst.to_string().c_str());
  640. printf("time_fp32=%.2fms, flops=%.3fTFLOPS\ntime_true_fp16=%.2fms, "
  641. "flops=%.3fTFLOPS\ntime_pseudo_fp16=%.2fms, flops=%.3fFLOPS\n",
  642. time_ms_fp32, (flo / (time_ms_fp32 * 1e9)), time_ms_true_fp16,
  643. (flo / (time_ms_true_fp16 * 1e9)), time_ms_pseudo_fp16,
  644. (flo / (time_ms_pseudo_fp16 * 1e9)));
  645. printf("speedup (true_fp16/fp32)=%.2f, (true_fp16/pseudo_fp16)=%.2f\n",
  646. time_ms_fp32 / time_ms_true_fp16,
  647. time_ms_pseudo_fp16 / time_ms_true_fp16);
  648. };
  649. run(32, 64, 3, 224, 224, 7, 2, 3);
  650. run(32, 128, 128, 28, 28, 3, 1, 1);
  651. run(32, 256, 256, 14, 14, 3, 1, 1);
  652. run(32, 512, 512, 7, 7, 3, 1, 1);
  653. run(32, 64, 64, 56, 56, 3, 1, 1);
  654. run(32, 512, 256, 56, 56, 1, 2, 0);
  655. run(32, 1024, 512, 28, 28, 1, 2, 0);
  656. run(32, 2048, 1024, 14, 14, 1, 2, 0);
  657. run(32, 512, 128, 28, 28, 1, 1, 0);
  658. run(32, 128, 512, 28, 28, 1, 1, 0);
  659. run(32, 1024, 256, 14, 14, 1, 1, 0);
  660. run(32, 256, 1024, 14, 14, 1, 1, 0);
  661. run(32, 2048, 512, 7, 7, 1, 1, 0);
  662. run(32, 512, 2048, 7, 7, 1, 1, 0);
  663. run(32, 256, 64, 56, 56, 1, 1, 0);
  664. run(32, 64, 256, 56, 56, 1, 1, 0);
  665. run(32, 128, 256, 56, 56, 1, 2, 0);
  666. run(32, 256, 512, 28, 28, 1, 2, 0);
  667. run(32, 512, 1024, 14, 14, 1, 2, 0);
  668. run(32, 64, 64, 56, 56, 1, 1, 0);
  669. }
  670. TEST_F(CUDA, BENCHMARK_CONVOLUTION_BWD_DATA_BF16) {
  671. CUBenchmarker<ConvolutionBackwardData> bench{handle_cuda()};
  672. std::unique_ptr<OprProxy<ConvolutionBackwardData>> proxy{
  673. new OprProxy<ConvolutionBackwardData>{true}};
  674. size_t RUNS = 10;
  675. bench.set_proxy(proxy).set_times(RUNS);
  676. auto run = [&](size_t N, size_t OC, size_t IC, size_t IH, size_t IW,
  677. size_t FH, size_t SH, size_t PH) {
  678. bench.set_dtype(0, dtype::BFloat16())
  679. .set_dtype(1, dtype::BFloat16())
  680. .set_dtype(2, dtype::BFloat16());
  681. param::Convolution param;
  682. param.stride_h = param.stride_w = SH;
  683. param.pad_h = param.pad_w = PH;
  684. param.compute_mode = param::Convolution::ComputeMode::DEFAULT;
  685. bench.set_param(param);
  686. bench.proxy()->target_execution_policy = {};
  687. TensorLayout src{{N, IC, IH, IW}, dtype::BFloat16()},
  688. filter{{OC, IC, FH, FH}, dtype::BFloat16()};
  689. TensorLayout dst;
  690. {
  691. auto&& opr = handle_cuda()->create_operator<Convolution>();
  692. opr->param() = param;
  693. opr->deduce_layout(src, filter, dst);
  694. }
  695. auto used = bench.execl({filter, dst, src}) / RUNS;
  696. float flo = 2.0 * N * OC * IC * dst[2] * dst[3] * FH * FH;
  697. printf("inp=%s, kern=%s, dst=%s ", src.to_string().c_str(),
  698. filter.to_string().c_str(), dst.to_string().c_str());
  699. printf("time_fp32=%.2fms, flops=%.3fTFLOPS\n", used,
  700. (flo / (used * 1e9)));
  701. };
  702. run(32, 64, 3, 224, 224, 7, 2, 3);
  703. run(32, 128, 128, 28, 28, 3, 1, 1);
  704. run(32, 256, 256, 14, 14, 3, 1, 1);
  705. run(32, 512, 512, 7, 7, 3, 1, 1);
  706. run(32, 64, 64, 56, 56, 3, 1, 1);
  707. run(32, 512, 256, 56, 56, 1, 2, 0);
  708. run(32, 1024, 512, 28, 28, 1, 2, 0);
  709. run(32, 2048, 1024, 14, 14, 1, 2, 0);
  710. run(32, 512, 128, 28, 28, 1, 1, 0);
  711. run(32, 128, 512, 28, 28, 1, 1, 0);
  712. run(32, 1024, 256, 14, 14, 1, 1, 0);
  713. run(32, 256, 1024, 14, 14, 1, 1, 0);
  714. run(32, 2048, 512, 7, 7, 1, 1, 0);
  715. run(32, 512, 2048, 7, 7, 1, 1, 0);
  716. run(32, 256, 64, 56, 56, 1, 1, 0);
  717. run(32, 64, 256, 56, 56, 1, 1, 0);
  718. run(32, 128, 256, 56, 56, 1, 2, 0);
  719. run(32, 256, 512, 28, 28, 1, 2, 0);
  720. run(32, 512, 1024, 14, 14, 1, 2, 0);
  721. run(32, 64, 64, 56, 56, 1, 1, 0);
  722. }
  723. TEST_F(CUDA, BENCHMARK_CONVOLUTION_BWD_DATA_INT8_DP4A) {
  724. CUBenchmarker<ConvolutionBackwardData> bench{handle_cuda()};
  725. std::unique_ptr<OprProxy<ConvolutionBackwardData>> proxy{
  726. new OprProxy<ConvolutionBackwardData>{true}};
  727. size_t RUNS = 10;
  728. bench.set_proxy(proxy).set_times(RUNS);
  729. auto run = [&](size_t N, size_t OC, size_t IC, size_t IH, size_t IW,
  730. size_t FH, size_t SH, size_t PH) {
  731. bench.set_dtype(0, dtype::QuantizedS8{1.0f})
  732. .set_dtype(1, dtype::QuantizedS8{1.0f})
  733. .set_dtype(2, dtype::QuantizedS8{1.0f});
  734. param::Convolution param;
  735. param.format = param::Convolution::Format::NCHW4;
  736. param.stride_h = param.stride_w = SH;
  737. param.pad_h = param.pad_w = PH;
  738. param.compute_mode = param::Convolution::ComputeMode::DEFAULT;
  739. bench.set_param(param);
  740. bench.proxy()->target_execution_policy = {};
  741. TensorLayout src{{N, IC / 4, IH, IW, 4}, dtype::QuantizedS8{1.0f}},
  742. filter{{OC, IC / 4, FH, FH, 4}, dtype::QuantizedS8{1.0f}};
  743. TensorLayout dst;
  744. dst.dtype = dtype::QuantizedS8{1.0f};
  745. {
  746. auto&& opr = handle_cuda()->create_operator<Convolution>();
  747. opr->param() = param;
  748. opr->deduce_layout(src, filter, dst);
  749. }
  750. auto used = bench.execl({filter, dst, src}) / RUNS;
  751. float flo = 2.0 * N * OC * IC * dst[2] * dst[3] * FH * FH;
  752. printf("inp=%s, kern=%s, dst=%s ", src.to_string().c_str(),
  753. filter.to_string().c_str(), dst.to_string().c_str());
  754. printf("time_fp32=%.2fms, flops=%.3fTFLOPS\n", used,
  755. (flo / (used * 1e9)));
  756. };
  757. run(64, 32, 32, 92, 180, 4, 2, 2);
  758. run(64, 32, 32, 46, 80, 4, 2, 2);
  759. run(16, 16, 16, 92, 180, 4, 2, 2);
  760. run(16, 16, 16, 46, 80, 4, 2, 2);
  761. }
  762. TEST_F(CUDA, CONVOLUTION_BWD_FILTER_BENCHMARK) {
  763. CUBenchmarker<ConvolutionBackwardFilter> bench{handle_cuda()};
  764. std::unique_ptr<OprProxy<ConvolutionBackwardFilter>> proxy{
  765. new OprProxy<ConvolutionBackwardFilter>{true}};
  766. size_t RUNS = 10;
  767. bench.set_proxy(proxy).set_times(RUNS);
  768. auto run = [&](size_t N, size_t OC, size_t IC, size_t IH, size_t IW,
  769. size_t FH, size_t SH, size_t PH) {
  770. bench.set_dtype(0, dtype::Float32())
  771. .set_dtype(1, dtype::Float32())
  772. .set_dtype(2, dtype::Float32());
  773. param::Convolution param;
  774. param.stride_h = param.stride_w = SH;
  775. param.pad_h = param.pad_w = PH;
  776. param.compute_mode = param::Convolution::ComputeMode::DEFAULT;
  777. bench.set_param(param);
  778. bench.proxy()->target_execution_policy.algo.reset();
  779. TensorLayout src{{N, IC, IH, IW}, dtype::Float32()},
  780. filter{{OC, IC, FH, FH}, dtype::Float32()};
  781. TensorLayout dst;
  782. {
  783. auto&& opr = handle_cuda()->create_operator<Convolution>();
  784. opr->param() = param;
  785. opr->deduce_layout(src, filter, dst);
  786. }
  787. auto time_ms_fp32 = bench.execl({src, dst, filter}) / RUNS;
  788. src.dtype = filter.dtype = dst.dtype = dtype::Float16();
  789. bench.proxy()->target_execution_policy.algo.reset();
  790. bench.set_dtype(0, dtype::Float16())
  791. .set_dtype(1, dtype::Float16())
  792. .set_dtype(2, dtype::Float16());
  793. auto time_ms_true_fp16 = bench.execl({src, dst, filter}) / RUNS;
  794. param.compute_mode = param::Convolution::ComputeMode::FLOAT32;
  795. bench.proxy()->target_execution_policy.algo.reset();
  796. bench.set_param(param);
  797. auto time_ms_pseudo_fp16 = bench.execl({src, dst, filter}) / RUNS;
  798. float flo = 2.0 * N * OC * IC * dst[2] * dst[3] * FH * FH;
  799. printf("inp=%s, kern=%s, dst=%s ", src.to_string().c_str(),
  800. filter.to_string().c_str(), dst.to_string().c_str());
  801. printf("time_fp32=%.2fms, flops=%.3fTFLOPS\ntime_true_fp16=%.2fms, "
  802. "flops=%.3fTFLOPS\ntime_pseudo_fp16=%.2fms, flops=%.3fFLOPS\n",
  803. time_ms_fp32, (flo / (time_ms_fp32 * 1e9)), time_ms_true_fp16,
  804. (flo / (time_ms_true_fp16 * 1e9)), time_ms_pseudo_fp16,
  805. (flo / (time_ms_pseudo_fp16 * 1e9)));
  806. printf("speedup (true_fp16/fp32)=%.2f, (true_fp16/pseudo_fp16)=%.2f\n",
  807. time_ms_fp32 / time_ms_true_fp16,
  808. time_ms_pseudo_fp16 / time_ms_true_fp16);
  809. };
  810. run(32, 64, 3, 224, 224, 7, 2, 3);
  811. run(32, 128, 128, 28, 28, 3, 1, 1);
  812. run(32, 256, 256, 14, 14, 3, 1, 1);
  813. run(32, 512, 512, 7, 7, 3, 1, 1);
  814. run(32, 64, 64, 56, 56, 3, 1, 1);
  815. run(32, 512, 256, 56, 56, 1, 2, 0);
  816. run(32, 1024, 512, 28, 28, 1, 2, 0);
  817. run(32, 2048, 1024, 14, 14, 1, 2, 0);
  818. run(32, 512, 128, 28, 28, 1, 1, 0);
  819. run(32, 128, 512, 28, 28, 1, 1, 0);
  820. run(32, 1024, 256, 14, 14, 1, 1, 0);
  821. run(32, 256, 1024, 14, 14, 1, 1, 0);
  822. run(32, 2048, 512, 7, 7, 1, 1, 0);
  823. run(32, 512, 2048, 7, 7, 1, 1, 0);
  824. run(32, 256, 64, 56, 56, 1, 1, 0);
  825. run(32, 64, 256, 56, 56, 1, 1, 0);
  826. run(32, 128, 256, 56, 56, 1, 2, 0);
  827. run(32, 256, 512, 28, 28, 1, 2, 0);
  828. run(32, 512, 1024, 14, 14, 1, 2, 0);
  829. run(32, 64, 64, 56, 56, 1, 1, 0);
  830. }
  831. #endif
  832. #undef CUDNN_VERSION_STRING
  833. #undef V
  834. #undef V1
  835. } // namespace test
  836. } // namespace megdnn
  837. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台