You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

opr_footprint.cpp 35 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898
  1. /**
  2. * \file src/plugin/impl/opr_footprint.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "megbrain/plugin/opr_footprint.h"
  12. #include "megbrain/opr/basic_arith.h"
  13. #include "megbrain/opr/blas.h"
  14. #include "megbrain/opr/dnn/convolution.h"
  15. #include "megbrain/opr/dnn/images2neibs.h"
  16. #include "megbrain/opr/dnn/local.h"
  17. #include "megbrain/opr/dnn/lrn.h"
  18. #include "megbrain/opr/dnn/pooling.h"
  19. #include "megbrain/opr/dnn/adaptive_pooling.h"
  20. #include "megbrain/opr/dnn/roi_pooling.h"
  21. #include "megbrain/opr/dnn/roi_align.h"
  22. #include "megbrain/opr/imgproc.h"
  23. #include "megbrain/opr/standalone/nms_opr.h"
  24. #include "megbrain/opr/io.h"
  25. #include "megbrain/opr/tensor_manip.h"
  26. #include "megbrain/opr/rand.h"
  27. #include "megbrain/opr/dnn/batch_norm.h"
  28. #include "megbrain/opr/misc.h"
  29. #include "megbrain/opr/indexing.h"
  30. #include "megbrain/opr/internal/indexing_helper.h"
  31. #include "megbrain/opr/nn_int.h"
  32. #include "megbrain/opr/tensor_gen.h"
  33. #if MGB_ENABLE_JSON
  34. #include "megdnn/opr_param_json.h"
  35. #endif
  36. #include "megbrain/utils/hash_ct.h"
  37. #include "midout.h"
  38. MIDOUT_DECL(megbrain_opr_footprint)
  39. #define MIDOUT_B(...) \
  40. MIDOUT_BEGIN(megbrain_opr_footprint, __VA_ARGS__) {
  41. #define MIDOUT_E \
  42. } \
  43. MIDOUT_END();
  44. using namespace mgb;
  45. namespace {
  46. template <class T>
  47. uint64_t opr_footprint_func(cg::OperatorNodeBase* opr);
  48. // Elemwise
  49. template <>
  50. uint64_t opr_footprint_func<opr::Elemwise>(cg::OperatorNodeBase* opr) {
  51. return opr->output()[0]->shape().total_nr_elems() *
  52. (std::max<size_t>(opr->input().size(), 2) - 1);
  53. }
  54. // AddUpdate
  55. template <>
  56. uint64_t opr_footprint_func<opr::AddUpdate>(cg::OperatorNodeBase* opr) {
  57. mgb_assert(opr->input().size() == 2,
  58. "AddUpdate opr should have two inputs");
  59. auto&& out_shape = opr->output()[0]->shape();
  60. return out_shape.total_nr_elems() * 3;
  61. }
  62. template <class Conv>
  63. uint64_t eval_conv_computation(const TensorShape& src_shape,
  64. const TensorShape& filter_shape,
  65. const TensorShape& dst_shape,
  66. cg::OperatorNodeBase* opr) {
  67. using Param = opr::ConvolutionForward::Param;
  68. auto&& param = opr->cast_final_safe<Conv>().param();
  69. if (param.format == Param::Format::NHWCD4) {
  70. size_t fh, fw;
  71. size_t group = 1;
  72. if (param.sparse == Param::Sparse::DENSE) {
  73. fh = filter_shape[1];
  74. fw = filter_shape[2];
  75. group = 1;
  76. } else {
  77. // chanwise conv
  78. mgb_assert(param.sparse == Param::Sparse::GROUP);
  79. fh = filter_shape[2];
  80. fw = filter_shape[3];
  81. group = filter_shape[0];
  82. if (filter_shape.ndim == 5) {
  83. group *= 4;
  84. }
  85. }
  86. return dst_shape.total_nr_elems() * fh * fw *
  87. src_shape[2] * 4 / group * 2;
  88. }
  89. auto eval_conv_computation_nchwx = [&param, &src_shape, &filter_shape,
  90. &dst_shape]() -> uint64_t {
  91. size_t fh, fw;
  92. bool hybird_nchwx = false;
  93. size_t group = 1;
  94. if (param.sparse == Param::Sparse::DENSE) {
  95. //! if nchwxx mode src is nchw output is nchwxx
  96. if (dst_shape.ndim == 5 && src_shape.ndim == 4) {
  97. fh = filter_shape[1];
  98. fw = filter_shape[2];
  99. hybird_nchwx = true;
  100. } else {
  101. fh = filter_shape[2];
  102. fw = filter_shape[3];
  103. }
  104. group = 1;
  105. } else {
  106. mgb_assert(param.sparse == Param::Sparse::GROUP);
  107. fh = filter_shape[3];
  108. fw = filter_shape[4];
  109. group = filter_shape[0];
  110. }
  111. if (param.format == Param::Format::NCHW88) {
  112. //! if channel wise weight layout is {group/8, FH, FW, 1, 1, 8}
  113. if (filter_shape[1] == 1 && filter_shape[2] == 1) {
  114. group *= 8;
  115. }
  116. size_t computation = dst_shape.total_nr_elems() * fh * fw *
  117. src_shape[1] / group * 2;
  118. return hybird_nchwx ? computation : computation * 8;
  119. }
  120. if (param.format == Param::Format::NCHW44 ||
  121. param.format == Param::Format::NCHW44_DOT) {
  122. //! if channel wise weight layout is {group/4, FH, FW, 1, 1, 4}
  123. if (filter_shape[1] == 1 && filter_shape[2] == 1) {
  124. group *= 4;
  125. }
  126. size_t computation = dst_shape.total_nr_elems() * fh * fw *
  127. src_shape[1] / group * 2;
  128. return hybird_nchwx ? computation : computation * 4;
  129. }
  130. if (param.format == Param::Format::NCHW32 ||
  131. param.format == Param::Format::NCHW32_NCHW4) {
  132. return dst_shape.total_nr_elems() * fh * fw * src_shape[1] * 32 /
  133. group * 2;
  134. }
  135. mgb_assert(param.format == Param::Format::NCHW4 ||
  136. param.format == Param::Format::NCHW4_NCHW ||
  137. param.format == Param::Format::NCHW4_NCHW32,
  138. "format should be NCHW4/NCHW4_NCHW/NCHW4_NCHW32");
  139. return dst_shape.total_nr_elems() * fh * fw * src_shape[1] * 4 / group *
  140. 2;
  141. };
  142. auto eval_conv_computation_chwn4 = [&param, &src_shape, &filter_shape,
  143. &dst_shape]() -> uint64_t {
  144. size_t fh, fw;
  145. size_t group = 1;
  146. if (param.sparse == Param::Sparse::DENSE) {
  147. fh = filter_shape[1];
  148. fw = filter_shape[2];
  149. group = 1;
  150. } else {
  151. mgb_assert(param.sparse == Param::Sparse::GROUP);
  152. fh = filter_shape[2];
  153. fw = filter_shape[3];
  154. group = filter_shape[0];
  155. }
  156. return dst_shape.total_nr_elems() * fh * fw * src_shape[0] * 4 / group *
  157. 2;
  158. };
  159. if (param.format == Param::Format::NCHW4 ||
  160. param.format == Param::Format::NCHW4_NCHW ||
  161. param.format == Param::Format::NCHW4_NCHW32 ||
  162. param.format == Param::Format::NCHW88 ||
  163. param.format == Param::Format::NCHW44 ||
  164. param.format == Param::Format::NCHW44_DOT ||
  165. param.format == Param::Format::NCHW32 ||
  166. param.format == Param::Format::NCHW32_NCHW4) {
  167. return eval_conv_computation_nchwx();
  168. }
  169. if (param.format == Param::Format::CHWN4) {
  170. return eval_conv_computation_chwn4();
  171. }
  172. size_t cpos;
  173. size_t spatial_start;
  174. size_t group = 1;
  175. switch (param.format) {
  176. case Param::Format::NCHW:
  177. cpos = 1;
  178. spatial_start = 2;
  179. break;
  180. case Param::Format::NHWC:
  181. cpos = 3;
  182. spatial_start = 1;
  183. break;
  184. default:
  185. mgb_assert(false, "Unknown CONV Param::Format type");
  186. }
  187. switch (param.sparse) {
  188. case Param::Sparse::DENSE:
  189. mgb_assert(filter_shape.ndim == 4 || filter_shape.ndim == 6,
  190. "DENSE conv filter shape dimension should be "
  191. "4/6(winograd mk4)");
  192. break;
  193. case Param::Sparse::GROUP:
  194. mgb_assert(filter_shape.ndim == 5 || filter_shape.ndim == 7,
  195. "GROUP conv filter shape dimension should be "
  196. "5/7(winograd mk4)");
  197. spatial_start++;
  198. group = filter_shape[0];
  199. break;
  200. default:
  201. mgb_assert(false, "Unkown CONV Param::Sparse type");
  202. }
  203. uint64_t fh = static_cast<uint64_t>(filter_shape[spatial_start]);
  204. uint64_t fw = static_cast<uint64_t>(filter_shape[spatial_start + 1]);
  205. // mul and add are counted as 2 operations
  206. return dst_shape.total_nr_elems() * fh * fw *
  207. static_cast<uint64_t>(src_shape[cpos]) / group * 2;
  208. }
  209. // ConvolutionForward
  210. template <>
  211. uint64_t opr_footprint_func<opr::ConvolutionForward>(
  212. cg::OperatorNodeBase* opr) {
  213. mgb_assert(opr->input().size() == 2,
  214. "ConvolutionFwd opr should have two inputs");
  215. auto&& out_shape = opr->output()[0]->shape();
  216. auto&& src_shape = opr->input()[0]->shape();
  217. auto&& filter_shape = opr->input()[1]->shape();
  218. return eval_conv_computation<opr::ConvolutionForward>(
  219. src_shape, filter_shape, out_shape, opr);
  220. }
  221. template <>
  222. uint64_t opr_footprint_func<opr::ConvBiasForward>(
  223. cg::OperatorNodeBase* opr) {
  224. mgb_assert(opr->input().size() == 2 || opr->input().size() == 3 ||
  225. opr->input().size() == 4,
  226. "ConvBiasForward opr should have two/three/four inputs");
  227. auto&& out_shape = opr->output()[0]->shape();
  228. auto&& src_shape = opr->input()[0]->shape();
  229. auto&& filter_shape = opr->input()[1]->shape();
  230. uint64_t res = eval_conv_computation<opr::ConvBiasForward>(
  231. src_shape, filter_shape, out_shape, opr);
  232. if (opr->input().size() == 3) {
  233. res += out_shape.total_nr_elems();
  234. }
  235. return res;
  236. }
  237. // ConvolutionBackwardData
  238. template <>
  239. uint64_t opr_footprint_func<opr::ConvolutionBackwardData>(
  240. cg::OperatorNodeBase* opr) {
  241. mgb_assert(opr->input().size() == 2 || opr->input().size() == 3,
  242. "ConvolutionBackwardData opr should have two or three inputs");
  243. auto&& filter_shape = opr->input()[0]->shape();
  244. auto&& diff_shape = opr->input()[1]->shape();
  245. auto&& grad_shape = opr->output()[0]->shape();
  246. return eval_conv_computation<opr::ConvolutionBackwardData>(
  247. grad_shape, filter_shape, diff_shape, opr);
  248. }
  249. // ConvolutionBackwardFilter
  250. template <>
  251. uint64_t opr_footprint_func<opr::ConvolutionBackwardFilter>(
  252. cg::OperatorNodeBase* opr) {
  253. mgb_assert(opr->input().size() == 3,
  254. "ConvolutionBackwardData opr should have three inputs");
  255. auto&& filter_shape = opr->input()[2]->shape();
  256. auto&& diff_shape = opr->input()[1]->shape();
  257. auto&& src_shape = opr->input()[0]->shape();
  258. return eval_conv_computation<opr::ConvolutionBackwardFilter>(
  259. src_shape, filter_shape, diff_shape, opr);
  260. }
  261. // MatrixMul
  262. template <>
  263. uint64_t opr_footprint_func<opr::MatrixMul>(cg::OperatorNodeBase* opr) {
  264. auto&& mopr = opr->cast_final_safe<opr::MatrixMul>();
  265. auto &&i0 = opr->input(0)->shape(), &&i1 = opr->input(1)->shape();
  266. mgb_assert(i0.ndim == 2 && i1.ndim == 2);
  267. auto m = i0[0], k0 = i0[1], k1 = i1[0], n = i1[1];
  268. if (mopr.param().transposeA) {
  269. std::swap(m, k0);
  270. }
  271. if (mopr.param().transposeB) {
  272. std::swap(k1, n);
  273. }
  274. mgb_assert(k0 == k1);
  275. // mul and add are counted as 2 operations
  276. return m * k0 * n * 2;
  277. }
  278. template <>
  279. uint64_t opr_footprint_func<opr::LocalShareForward>(cg::OperatorNodeBase* opr) {
  280. mgb_assert(opr->input().size() == 2,
  281. "LocalShare opr should have two inputs");
  282. auto&& out_shape = opr->output()[0]->shape();
  283. auto&& src_shape = opr->input()[0]->shape();
  284. auto&& filter_shape = opr->input()[1]->shape();
  285. using Param = opr::LocalShareForward::Param;
  286. auto&& param = opr->cast_final_safe<opr::LocalShareForward>().param();
  287. mgb_assert(param.format == Param::Format::NCHW);
  288. size_t groups = 1;
  289. size_t kern_spatial_pos = 3;
  290. if (param.sparse == Param::Sparse::GROUP) {
  291. groups = filter_shape[0];
  292. kern_spatial_pos = 4;
  293. }
  294. size_t fh = filter_shape[kern_spatial_pos],
  295. fw = filter_shape[kern_spatial_pos + 1];
  296. return out_shape.total_nr_elems() * fh * fw * src_shape[1] * 2 / groups;
  297. }
  298. template <>
  299. uint64_t opr_footprint_func<opr::LocalShareBackwardData>(cg::OperatorNodeBase* opr) {
  300. mgb_assert(opr->input().size() == 3,
  301. "LocalShareBackwardData opr should have three inputs");
  302. auto&& filter_shape = opr->input()[0]->shape();
  303. auto&& diff_shape = opr->input()[1]->shape();
  304. auto&& grad_shape = opr->output()[0]->shape();
  305. using Param = opr::LocalShareForward::Param;
  306. auto&& param = opr->cast_final_safe<opr::LocalShareBackwardData>().param();
  307. mgb_assert(param.format == Param::Format::NCHW);
  308. size_t groups = 1;
  309. size_t kern_spatial_pos = 3;
  310. if (param.sparse == Param::Sparse::GROUP) {
  311. groups = filter_shape[0];
  312. kern_spatial_pos = 4;
  313. }
  314. size_t fh = filter_shape[kern_spatial_pos],
  315. fw = filter_shape[kern_spatial_pos + 1];
  316. return diff_shape.total_nr_elems() * fh * fw * grad_shape[1] * 2 / groups;
  317. }
  318. template <>
  319. uint64_t opr_footprint_func<opr::LocalShareBackwardFilter>(cg::OperatorNodeBase* opr) {
  320. mgb_assert(opr->input().size() == 3,
  321. "LocalShareBackwardFilter opr should have three inputs");
  322. auto&& src_shape = opr->input()[0]->shape();
  323. auto&& diff_shape = opr->input()[1]->shape();
  324. auto&& grad_shape = opr->output()[0]->shape();
  325. using Param = opr::LocalShareForward::Param;
  326. auto&& param = opr->cast_final_safe<opr::LocalShareBackwardFilter>().param();
  327. mgb_assert(param.format == Param::Format::NCHW);
  328. size_t groups = 1;
  329. size_t kern_spatial_pos = 3;
  330. if (param.sparse == Param::Sparse::GROUP) {
  331. groups = grad_shape[0];
  332. kern_spatial_pos = 4;
  333. }
  334. size_t fh = grad_shape[kern_spatial_pos],
  335. fw = grad_shape[kern_spatial_pos + 1];
  336. return diff_shape.total_nr_elems() * fh * fw * src_shape[1] * 2 / groups;
  337. }
  338. template <>
  339. uint64_t opr_footprint_func<opr::DeformableConvForward>(
  340. cg::OperatorNodeBase* opr) {
  341. mgb_assert(opr->input().size() == 4,
  342. "DeformableConvForward opr should have four inputs");
  343. auto&& out_shape = opr->output()[0]->shape();
  344. auto&& filter_shape = opr->input()[1]->shape();
  345. using Param = opr::DeformableConvForward::Param;
  346. auto&& param = opr->cast_final_safe<opr::DeformableConvForward>().param();
  347. size_t fh, fw, icpg;
  348. mgb_assert(param.format == Param::Format::NCHW);
  349. if (param.sparse == Param::Sparse::GROUP) {
  350. icpg = filter_shape[2];
  351. fh = filter_shape[3], fw = filter_shape[4];
  352. } else {
  353. icpg = filter_shape[1];
  354. fh = filter_shape[2], fw = filter_shape[3];
  355. }
  356. //! conv(1 mul), mask(1, mul), accumulate(1 add)
  357. return out_shape.total_nr_elems() * fh * fw * icpg * 3;
  358. }
  359. template <>
  360. uint64_t opr_footprint_func<opr::DeformableConvBackwardFilter>(
  361. cg::OperatorNodeBase* opr) {
  362. mgb_assert(opr->input().size() == 5,
  363. "DeformableConvBackwardFilter opr should have four inputs");
  364. auto&& out_shape = opr->output()[0]->shape();
  365. auto&& filter_shape = opr->input()[1]->shape();
  366. using Param = opr::DeformableConvBackwardFilter::Param;
  367. auto&& param = opr->cast_final_safe<opr::Convolution>().param();
  368. size_t fh, fw, icpg;
  369. mgb_assert(param.format == Param::Format::NCHW);
  370. if (param.sparse == Param::Sparse::GROUP) {
  371. icpg = filter_shape[2];
  372. fh = filter_shape[3], fw = filter_shape[4];
  373. } else {
  374. icpg = filter_shape[1];
  375. fh = filter_shape[2], fw = filter_shape[3];
  376. }
  377. //! deconv(1 mul), mask(1 mul), accumulate(1 add), bilinear(4 add, 4mul,
  378. //! skip)
  379. return out_shape.total_nr_elems() * fh * fw * icpg * 3;
  380. }
  381. template <>
  382. uint64_t opr_footprint_func<opr::DeformableConvBackwardData>(
  383. cg::OperatorNodeBase* opr) {
  384. mgb_assert(opr->input().size() == 5,
  385. "DeformableConvBackwardData opr should have four inputs");
  386. auto&& out_shape = opr->output()[0]->shape();
  387. auto&& filter_shape = opr->input()[1]->shape();
  388. using Param = opr::DeformableConvForward::Param;
  389. auto&& param = opr->cast_final_safe<opr::Convolution>().param();
  390. size_t fh, fw, icpg;
  391. mgb_assert(param.format == Param::Format::NCHW);
  392. if (param.sparse == Param::Sparse::GROUP) {
  393. icpg = filter_shape[2];
  394. fh = filter_shape[3], fw = filter_shape[4];
  395. } else {
  396. icpg = filter_shape[1];
  397. fh = filter_shape[2], fw = filter_shape[3];
  398. }
  399. //! deconv(1 mul), mask(1 mul), accumulate(1 add), grad_weight(1 mul, skip),
  400. //! grad_coord(4mul, 4 add)
  401. return out_shape.total_nr_elems() * fh * fw * icpg * 12;
  402. }
  403. template <>
  404. uint64_t opr_footprint_func<opr::BatchConvBiasForward>(
  405. cg::OperatorNodeBase* opr) {
  406. mgb_assert(opr->input().size() == 2 || opr->input().size() == 3 ||
  407. opr->input().size() == 4,
  408. "BatchConvBias opr should have two/three/four inputs");
  409. auto&& out_shape = opr->output()[0]->shape();
  410. auto&& src_shape = opr->input()[0]->shape();
  411. auto&& filter_shape = opr->input()[1]->shape();
  412. using Param = opr::BatchConvBiasForward::Param;
  413. auto&& param = opr->cast_final_safe<opr::BatchConvBiasForward>().param();
  414. size_t packed_channels = 1;
  415. size_t kern_spatial_pos = 3;
  416. if (param.format == Param::Format::NCHW4) {
  417. packed_channels = 4;
  418. }
  419. size_t fh = filter_shape[kern_spatial_pos],
  420. fw = filter_shape[kern_spatial_pos + 1];
  421. return out_shape.total_nr_elems() * fh * fw * src_shape[1] *
  422. packed_channels * 2;
  423. }
  424. // Pooling
  425. template <>
  426. uint64_t opr_footprint_func<opr::PoolingForward>(cg::OperatorNodeBase* opr) {
  427. auto&& param = opr->cast_final_safe<opr::PoolingForward>().param();
  428. auto area = param.window_h * param.window_w;
  429. return opr->output(0)->shape().total_nr_elems() * area;
  430. }
  431. // Concat
  432. template <>
  433. uint64_t opr_footprint_func<opr::Concat>(cg::OperatorNodeBase* opr) {
  434. auto&& out_shape = opr->output()[0]->shape();
  435. return out_shape.total_nr_elems();
  436. }
  437. // Dimshuffle
  438. template <>
  439. uint64_t opr_footprint_func<opr::Dimshuffle>(cg::OperatorNodeBase* opr) {
  440. auto&& out = opr->output()[0];
  441. return out->shape().total_nr_elems();
  442. }
  443. // Reduce
  444. template <>
  445. uint64_t opr_footprint_func<opr::Reduce>(cg::OperatorNodeBase* opr) {
  446. return opr->input()[0]->shape().total_nr_elems();
  447. }
  448. // Host2DeviceCopy
  449. template <>
  450. uint64_t opr_footprint_func<opr::Host2DeviceCopy>(cg::OperatorNodeBase* opr) {
  451. auto&& out_shape = opr->output()[0]->shape();
  452. return out_shape.total_nr_elems();
  453. }
  454. /******************* Registe Param Json Functions *************************/
  455. #if MGB_ENABLE_JSON
  456. template <class T>
  457. std::shared_ptr<json::Value> opr_param_json_func(cg::OperatorNodeBase* opr);
  458. #define REGISTE_PARAM_JSON_FUNC(cls) \
  459. template <> \
  460. std::shared_ptr<json::Value> opr_param_json_func<opr::cls>( \
  461. cg::OperatorNodeBase * opr) { \
  462. return opr::opr_param_to_json( \
  463. opr->cast_final_safe<opr::cls>().param()); \
  464. }
  465. REGISTE_PARAM_JSON_FUNC(Elemwise)
  466. REGISTE_PARAM_JSON_FUNC(ConvolutionForward)
  467. REGISTE_PARAM_JSON_FUNC(Convolution3D)
  468. REGISTE_PARAM_JSON_FUNC(ConvBiasForward)
  469. REGISTE_PARAM_JSON_FUNC(ConvolutionBackwardData)
  470. REGISTE_PARAM_JSON_FUNC(Convolution3DBackwardData)
  471. REGISTE_PARAM_JSON_FUNC(ConvolutionBackwardFilter)
  472. REGISTE_PARAM_JSON_FUNC(MatrixMul)
  473. REGISTE_PARAM_JSON_FUNC(BatchedMatrixMul)
  474. REGISTE_PARAM_JSON_FUNC(Dot)
  475. REGISTE_PARAM_JSON_FUNC(MatrixInverse)
  476. REGISTE_PARAM_JSON_FUNC(PoolingForward)
  477. REGISTE_PARAM_JSON_FUNC(SVD)
  478. REGISTE_PARAM_JSON_FUNC(MaskConvolution)
  479. REGISTE_PARAM_JSON_FUNC(Images2Neibs)
  480. REGISTE_PARAM_JSON_FUNC(Local)
  481. REGISTE_PARAM_JSON_FUNC(GroupLocal)
  482. REGISTE_PARAM_JSON_FUNC(LRN)
  483. REGISTE_PARAM_JSON_FUNC(Concat)
  484. REGISTE_PARAM_JSON_FUNC(Reduce)
  485. REGISTE_PARAM_JSON_FUNC(LocalShareForward)
  486. REGISTE_PARAM_JSON_FUNC(LocalShareBackwardData)
  487. REGISTE_PARAM_JSON_FUNC(LocalShareBackwardFilter)
  488. REGISTE_PARAM_JSON_FUNC(DeformableConvForward)
  489. REGISTE_PARAM_JSON_FUNC(DeformableConvBackwardFilter)
  490. REGISTE_PARAM_JSON_FUNC(DeformableConvBackwardData)
  491. REGISTE_PARAM_JSON_FUNC(DeformablePSROIPoolingForward)
  492. REGISTE_PARAM_JSON_FUNC(BatchConvBiasForward)
  493. REGISTE_PARAM_JSON_FUNC(BatchNormForward)
  494. REGISTE_PARAM_JSON_FUNC(ElemwiseMultiType)
  495. REGISTE_PARAM_JSON_FUNC(Argsort)
  496. REGISTE_PARAM_JSON_FUNC(Argmax)
  497. REGISTE_PARAM_JSON_FUNC(Argmin)
  498. REGISTE_PARAM_JSON_FUNC(AdaptivePooling)
  499. REGISTE_PARAM_JSON_FUNC(ROIPooling)
  500. REGISTE_PARAM_JSON_FUNC(ROIAlign)
  501. REGISTE_PARAM_JSON_FUNC(WarpPerspective)
  502. REGISTE_PARAM_JSON_FUNC(WarpAffine)
  503. REGISTE_PARAM_JSON_FUNC(Remap)
  504. REGISTE_PARAM_JSON_FUNC(Resize)
  505. REGISTE_PARAM_JSON_FUNC(IndexingOneHot)
  506. REGISTE_PARAM_JSON_FUNC(IndexingSetOneHot)
  507. REGISTE_PARAM_JSON_FUNC(TopK)
  508. REGISTE_PARAM_JSON_FUNC(UniformRNG)
  509. REGISTE_PARAM_JSON_FUNC(GaussianRNG)
  510. REGISTE_PARAM_JSON_FUNC(Linspace)
  511. REGISTE_PARAM_JSON_FUNC(Eye)
  512. REGISTE_PARAM_JSON_FUNC(CvtColor)
  513. template <>
  514. std::shared_ptr<json::Value> opr_param_json_func<opr::Dimshuffle>(
  515. cg::OperatorNodeBase * opr) {
  516. auto param = opr->cast_final_safe<opr::Dimshuffle>().param();
  517. auto pattern = json::Array::make();
  518. for (size_t i = 0; i < param.pattern_len; i++)
  519. pattern->add(json::NumberInt::make(param.pattern[i]));
  520. return json::Object::make({
  521. {"ndim", json::NumberInt::make(param.ndim)},
  522. {"pattern", pattern},
  523. });
  524. }
  525. template <>
  526. std::shared_ptr<json::Value> opr_param_json_func<opr::AxisAddRemove>(
  527. cg::OperatorNodeBase * opr) {
  528. auto param = opr->cast_final_safe<opr::AxisAddRemove>().param();
  529. auto desc = json::Array::make();
  530. for (size_t i = 0; i < param.nr_desc; i++) {
  531. auto axisdesc = param.desc[i];
  532. desc->add(
  533. json::Object::make({
  534. {"method", json::NumberInt::make(
  535. static_cast<int32_t>(axisdesc.method))},
  536. {"axisnum", json::NumberInt::make(axisdesc.axis.get_raw())},
  537. }));
  538. }
  539. return json::Object::make({
  540. {"nr_desc", json::NumberInt::make(param.nr_desc)},
  541. {"desc", desc},
  542. });
  543. }
  544. std::shared_ptr<json::Value> indexing_param_to_json(
  545. const std::vector<opr::indexing::AxisIndexer>& indices) {
  546. auto desc = json::Array::make();
  547. for (auto& index : indices) {
  548. desc->add(json::Object::make({
  549. {"axis", json::NumberInt::make(index.axis.get_raw())},
  550. {"begin",
  551. json::NumberInt::make(index.begin.node() != nullptr)},
  552. {"end", json::NumberInt::make(index.end.node() != nullptr)},
  553. {"step",
  554. json::NumberInt::make(index.step.node() != nullptr)},
  555. {"idx", json::NumberInt::make(index.idx.node() != nullptr)},
  556. }));
  557. }
  558. return desc;
  559. }
  560. #define REGISTE_INDEXING_PARAM_JSON_FUNC(cls) \
  561. template <> \
  562. std::shared_ptr<json::Value> opr_param_json_func<opr::cls>( \
  563. cg::OperatorNodeBase * opr) { \
  564. auto indices = opr->cast_final_safe<opr::cls>().index_desc(); \
  565. return indexing_param_to_json(indices); \
  566. }
  567. REGISTE_INDEXING_PARAM_JSON_FUNC(Subtensor);
  568. REGISTE_INDEXING_PARAM_JSON_FUNC(SetSubtensor);
  569. REGISTE_INDEXING_PARAM_JSON_FUNC(IncrSubtensor);
  570. REGISTE_INDEXING_PARAM_JSON_FUNC(IndexingMultiAxisVec);
  571. REGISTE_INDEXING_PARAM_JSON_FUNC(IndexingSetMultiAxisVec);
  572. REGISTE_INDEXING_PARAM_JSON_FUNC(IndexingIncrMultiAxisVec);
  573. REGISTE_INDEXING_PARAM_JSON_FUNC(MeshIndexing);
  574. REGISTE_INDEXING_PARAM_JSON_FUNC(IncrMeshIndexing);
  575. REGISTE_INDEXING_PARAM_JSON_FUNC(SetMeshIndexing);
  576. REGISTE_INDEXING_PARAM_JSON_FUNC(BatchedMeshIndexing);
  577. REGISTE_INDEXING_PARAM_JSON_FUNC(BatchedIncrMeshIndexing);
  578. REGISTE_INDEXING_PARAM_JSON_FUNC(BatchedSetMeshIndexing);
  579. template <>
  580. std::shared_ptr<json::Value> opr_param_json_func<opr::Reshape>(
  581. cg::OperatorNodeBase * opr) {
  582. auto desc = json::Array::make();
  583. auto axis_param = opr->cast_final_safe<opr::Reshape>().param();
  584. if (axis_param.axis != axis_param.MAX_NDIM){
  585. return json::Object::make({
  586. {"axis", json::NumberInt::make(axis_param.axis)},
  587. });
  588. } else {
  589. return json::Object::make();
  590. }
  591. }
  592. template <>
  593. std::shared_ptr<json::Value> opr_param_json_func<opr::GetVarShape>(
  594. cg::OperatorNodeBase * opr) {
  595. auto desc = json::Array::make();
  596. auto axis_param = opr->cast_final_safe<opr::GetVarShape>().param();
  597. if (axis_param.axis != axis_param.MAX_NDIM){
  598. return json::Object::make({
  599. {"axis", json::NumberInt::make(axis_param.axis)},
  600. });
  601. } else {
  602. return json::Object::make();
  603. }
  604. }
  605. template <>
  606. std::shared_ptr<json::Value> opr_param_json_func<opr::standalone::NMSKeep>(
  607. cg::OperatorNodeBase * opr) {
  608. auto nms_param = opr->cast_final_safe<opr::standalone::NMSKeep>().param();
  609. return json::Object::make({
  610. {"iou_thresh", json::Number::make(nms_param.iou_thresh)},
  611. {"max_output", json::Number::make(nms_param.max_output)},
  612. });
  613. }
  614. #endif // MGB_ENABLE_JSON
  615. } // namespace
  616. template <class OprType>
  617. void OprFootprint::add_single_comp_footprint() {
  618. MIDOUT_B(OprType,
  619. midout_iv(MGB_HASH_STR("OprFootprint::add_single_comp_footprint")))
  620. auto&& record = m_type2comp_footprint.emplace(OprType::typeinfo(),
  621. opr_footprint_func<OprType>);
  622. mgb_assert(record.second, "duplicate opr typeinfo");
  623. MIDOUT_E
  624. }
  625. #if MGB_ENABLE_JSON
  626. template <class OprType>
  627. void OprFootprint::add_single_param_json() {
  628. auto&& record = m_type2param_json.emplace(OprType::typeinfo(),
  629. opr_param_json_func<OprType>);
  630. mgb_assert(record.second, "duplicate opr typeinfo");
  631. }
  632. #endif
  633. void OprFootprint::init_all_footprints() {
  634. add_single_comp_footprint<opr::Elemwise>();
  635. add_single_comp_footprint<opr::AddUpdate>();
  636. add_single_comp_footprint<opr::ConvolutionForward>();
  637. add_single_comp_footprint<opr::ConvBiasForward>();
  638. add_single_comp_footprint<opr::ConvolutionBackwardData>();
  639. add_single_comp_footprint<opr::ConvolutionBackwardFilter>();
  640. add_single_comp_footprint<opr::MatrixMul>();
  641. add_single_comp_footprint<opr::PoolingForward>();
  642. add_single_comp_footprint<opr::Concat>();
  643. add_single_comp_footprint<opr::Dimshuffle>();
  644. add_single_comp_footprint<opr::Reduce>();
  645. add_single_comp_footprint<opr::Host2DeviceCopy>();
  646. add_single_comp_footprint<opr::LocalShareForward>();
  647. add_single_comp_footprint<opr::LocalShareBackwardData>();
  648. add_single_comp_footprint<opr::LocalShareBackwardFilter>();
  649. add_single_comp_footprint<opr::DeformableConvForward>();
  650. add_single_comp_footprint<opr::DeformableConvBackwardFilter>();
  651. add_single_comp_footprint<opr::DeformableConvBackwardData>();
  652. add_single_comp_footprint<opr::BatchConvBiasForward>();
  653. #if MGB_ENABLE_JSON
  654. add_single_param_json<opr::Elemwise>();
  655. add_single_param_json<opr::ConvolutionForward>();
  656. add_single_param_json<opr::Convolution3D>();
  657. add_single_param_json<opr::ConvBiasForward>();
  658. add_single_param_json<opr::ConvolutionBackwardData>();
  659. add_single_param_json<opr::Convolution3DBackwardData>();
  660. add_single_param_json<opr::ConvolutionBackwardFilter>();
  661. add_single_param_json<opr::MatrixMul>();
  662. add_single_param_json<opr::BatchedMatrixMul>();
  663. add_single_param_json<opr::Dot>();
  664. add_single_param_json<opr::MatrixInverse>();
  665. add_single_param_json<opr::PoolingForward>();
  666. add_single_param_json<opr::SVD>();
  667. add_single_param_json<opr::MaskConvolution>();
  668. add_single_param_json<opr::Images2Neibs>();
  669. add_single_param_json<opr::Local>();
  670. add_single_param_json<opr::GroupLocal>();
  671. add_single_param_json<opr::LRN>();
  672. add_single_param_json<opr::Concat>();
  673. add_single_param_json<opr::Dimshuffle>();
  674. add_single_param_json<opr::AxisAddRemove>();
  675. add_single_param_json<opr::Subtensor>();
  676. add_single_param_json<opr::SetSubtensor>();
  677. add_single_param_json<opr::IncrSubtensor>();
  678. add_single_param_json<opr::IndexingMultiAxisVec>();
  679. add_single_param_json<opr::IndexingSetMultiAxisVec>();
  680. add_single_param_json<opr::IndexingIncrMultiAxisVec>();
  681. add_single_param_json<opr::MeshIndexing>();
  682. add_single_param_json<opr::SetMeshIndexing>();
  683. add_single_param_json<opr::IncrMeshIndexing>();
  684. add_single_param_json<opr::BatchedMeshIndexing>();
  685. add_single_param_json<opr::BatchedSetMeshIndexing>();
  686. add_single_param_json<opr::BatchedIncrMeshIndexing>();
  687. add_single_param_json<opr::Reduce>();
  688. add_single_param_json<opr::LocalShareForward>();
  689. add_single_param_json<opr::LocalShareBackwardData>();
  690. add_single_param_json<opr::LocalShareBackwardFilter>();
  691. add_single_param_json<opr::DeformableConvForward>();
  692. add_single_param_json<opr::DeformableConvBackwardFilter>();
  693. add_single_param_json<opr::DeformableConvBackwardData>();
  694. add_single_param_json<opr::DeformablePSROIPoolingForward>();
  695. add_single_param_json<opr::BatchConvBiasForward>();
  696. add_single_param_json<opr::BatchNormForward>();
  697. add_single_param_json<opr::Reshape>();
  698. add_single_param_json<opr::GetVarShape>();
  699. add_single_param_json<opr::Argsort>();
  700. add_single_param_json<opr::Argmin>();
  701. add_single_param_json<opr::Argmax>();
  702. add_single_param_json<opr::ElemwiseMultiType>();
  703. add_single_param_json<opr::AdaptivePooling>();
  704. add_single_param_json<opr::ROIPooling>();
  705. add_single_param_json<opr::ROIAlign>();
  706. add_single_param_json<opr::WarpPerspective>();
  707. add_single_param_json<opr::Remap>();
  708. add_single_param_json<opr::Resize>();
  709. add_single_param_json<opr::IndexingOneHot>();
  710. add_single_param_json<opr::IndexingSetOneHot>();
  711. add_single_param_json<opr::WarpAffine>();
  712. add_single_param_json<opr::TopK>();
  713. add_single_param_json<opr::UniformRNG>();
  714. add_single_param_json<opr::GaussianRNG>();
  715. add_single_param_json<opr::Linspace>();
  716. add_single_param_json<opr::Eye>();
  717. add_single_param_json<opr::standalone::NMSKeep>();
  718. add_single_param_json<opr::CvtColor>();
  719. #endif
  720. }
  721. OprFootprint::Result OprFootprint::calc_footprint(cg::OperatorNodeBase* opr) {
  722. Result rst;
  723. auto&& dep_map = opr->node_prop().dep_map();
  724. for (auto&& inp : opr->input()) {
  725. if (inp->mem_plan().valid())
  726. rst.inp_layout.push_back(inp->layout());
  727. else
  728. rst.inp_layout.push_back({inp->shape(), inp->dtype()});
  729. if (cg::OperatorNodeBase::NodeProp::is_device_value_dep(
  730. dep_map.at(inp))) {
  731. rst.memory += inp->dtype().size(inp->shape().total_nr_elems());
  732. }
  733. }
  734. for (auto&& out : opr->output()) {
  735. if (out->contain_flag(VarNode::Flag::VOLATILE_CONTENT))
  736. continue;
  737. rst.out_shape.push_back(out->shape());
  738. rst.memory += out->dtype().size(out->shape().total_nr_elems());
  739. }
  740. rst.computation = get_computation(opr);
  741. #if MGB_ENABLE_JSON
  742. rst.param = get_param_json(opr);
  743. #endif
  744. rst.opr_type = opr->dyn_typeinfo();
  745. return rst;
  746. }
  747. uint64_t OprFootprint::get_computation(cg::OperatorNodeBase* opr) {
  748. auto comp_trait = m_type2comp_footprint.find(opr->dyn_typeinfo());
  749. if (comp_trait != m_type2comp_footprint.end()) {
  750. return (comp_trait->second)(opr);
  751. }
  752. return 0;
  753. }
  754. #if MGB_ENABLE_JSON
  755. std::shared_ptr<json::Value> OprFootprint::get_param_json(
  756. cg::OperatorNodeBase* opr) {
  757. auto param_trait = m_type2param_json.find(opr->dyn_typeinfo());
  758. if (param_trait != m_type2param_json.end()) {
  759. return (param_trait->second)(opr);
  760. }
  761. return json::Object::make();
  762. }
  763. std::shared_ptr<json::Value> OprFootprint::Result::to_json() const {
  764. using namespace json;
  765. std::shared_ptr<Value> comp;
  766. if (computation) {
  767. comp = NumberInt::make(computation);
  768. } else {
  769. comp = Null::make();
  770. }
  771. auto format_shape_arr = [](const TensorShapeArray& arr) {
  772. auto ret = Array::make();
  773. for (auto&& shp : arr) {
  774. auto cur = Array::make();
  775. for (size_t i = 0; i < shp.ndim; ++i) {
  776. cur->add(NumberInt::make(shp[i]));
  777. }
  778. ret->add(std::move(cur));
  779. }
  780. return ret;
  781. };
  782. auto format_layout_arr =
  783. [](const TensorLayoutArray& arr) -> std::shared_ptr<Value> {
  784. auto ret = Array::make();
  785. bool have_non_contig = false;
  786. for (auto&& item : arr) {
  787. if (item.is_contiguous()) {
  788. ret->add(json::Null::make());
  789. } else {
  790. have_non_contig = true;
  791. auto cur = Array::make();
  792. for (size_t i = 0; i < item.ndim; ++i) {
  793. cur->add(NumberInt::make(item.stride[i]));
  794. }
  795. ret->add(std::move(cur));
  796. }
  797. }
  798. if (!have_non_contig) {
  799. ret.reset();
  800. }
  801. return ret;
  802. };
  803. TensorShapeArray inp_shape;
  804. for (auto&& i : inp_layout)
  805. inp_shape.push_back(i);
  806. auto ret = Object::make({{"computation", std::move(comp)},
  807. {"memory", NumberInt::make(memory)},
  808. {"in_shapes", format_shape_arr(inp_shape)},
  809. {"out_shapes", format_shape_arr(out_shape)},
  810. {"param", param}});
  811. if (auto inp_layout_json = format_layout_arr(inp_layout)) {
  812. ret->operator[]("in_layouts") = std::move(inp_layout_json);
  813. }
  814. return ret;
  815. }
  816. std::shared_ptr<json::Value> OprFootprint::get_opr_fp_graph_exec(
  817. cg::ComputingGraph& graph, const SymbolVarArray& outputs) {
  818. OprFootprint m_opr_footprint;
  819. ComputingGraph::OutputSpec out_spec;
  820. for (auto i : outputs) {
  821. out_spec.emplace_back(i, nullptr);
  822. }
  823. graph.options().allocate_static_mem_after_graph_compile = true;
  824. auto async_exec = graph.compile(out_spec);
  825. std::vector<std::pair<json::String, std::shared_ptr<json::Value>>> rst_vals;
  826. auto on_opr = [&m_opr_footprint, &rst_vals](cg::OperatorNodeBase* opr) {
  827. Result trait(m_opr_footprint.calc_footprint(opr));
  828. rst_vals.emplace_back(json::String(opr->id_str()), trait.to_json());
  829. return true;
  830. };
  831. async_exec->iter_opr_seq(on_opr);
  832. auto opr_fp = json::Object::make(rst_vals);
  833. return json::Object::make(
  834. {{"opr_footprint", opr_fp}, {"graph_exec", async_exec->to_json()}});
  835. }
  836. #endif
  837. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台