You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

opdef.h.inl 88 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798
  1. // clang-format off
  2. class AdaptivePooling : public OpDefImplBase<AdaptivePooling> {
  3. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  4. public:
  5. using Mode = ::megdnn::param::AdaptivePooling::Mode;
  6. using Format = ::megdnn::param::AdaptivePooling::Format;
  7. Mode mode = ::megdnn::param::AdaptivePooling::Mode::MAX;
  8. Format format = ::megdnn::param::AdaptivePooling::Format::NCHW;
  9. std::vector<int32_t> shape;
  10. AdaptivePooling() = default;
  11. AdaptivePooling(Mode mode_, Format format_, std::vector<int32_t> shape_, std::string scope_ = {}): mode(mode_), format(format_), shape(shape_) { set_scope(scope_); }
  12. AdaptivePooling(::megdnn::param::AdaptivePooling packed_param_0, std::vector<int32_t> shape_): mode(packed_param_0.mode), format(packed_param_0.format), shape(shape_) {}
  13. ::megdnn::param::AdaptivePooling param() const {
  14. return {mode, format};
  15. }
  16. };
  17. class AddAxis : public OpDefImplBase<AddAxis> {
  18. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  19. public:
  20. std::vector<int32_t> axis;
  21. AddAxis() = default;
  22. AddAxis(std::vector<int32_t> axis_, std::string scope_ = {}): axis(axis_) { set_scope(scope_); }
  23. };
  24. class Argmax : public OpDefImplBase<Argmax> {
  25. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  26. public:
  27. int32_t axis = 0;
  28. Argmax() = default;
  29. Argmax(int32_t axis_, std::string scope_ = {}): axis(axis_) { set_scope(scope_); }
  30. Argmax(::megdnn::param::Axis packed_param_0): axis(packed_param_0.axis) {}
  31. ::megdnn::param::Axis param() const {
  32. return {axis};
  33. }
  34. };
  35. class Argmin : public OpDefImplBase<Argmin> {
  36. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  37. public:
  38. int32_t axis = 0;
  39. Argmin() = default;
  40. Argmin(int32_t axis_, std::string scope_ = {}): axis(axis_) { set_scope(scope_); }
  41. Argmin(::megdnn::param::Axis packed_param_0): axis(packed_param_0.axis) {}
  42. ::megdnn::param::Axis param() const {
  43. return {axis};
  44. }
  45. };
  46. class Argsort : public OpDefImplBase<Argsort> {
  47. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  48. public:
  49. using Order = ::megdnn::param::Argsort::Order;
  50. Order order = ::megdnn::param::Argsort::Order::ASCENDING;
  51. Argsort() = default;
  52. Argsort(Order order_, std::string scope_ = {}): order(order_) { set_scope(scope_); }
  53. Argsort(::megdnn::param::Argsort packed_param_0): order(packed_param_0.order) {}
  54. ::megdnn::param::Argsort param() const {
  55. return {order};
  56. }
  57. };
  58. class AssertEqual : public OpDefImplBase<AssertEqual> {
  59. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  60. public:
  61. float maxerr = 0.0001;
  62. bool verbose = false;
  63. AssertEqual() = default;
  64. AssertEqual(float maxerr_, bool verbose_, std::string scope_ = {}): maxerr(maxerr_), verbose(verbose_) { set_scope(scope_); }
  65. AssertEqual(::megdnn::param::AssertEqual packed_param_0): maxerr(packed_param_0.maxerr), verbose(packed_param_0.verbose) {}
  66. ::megdnn::param::AssertEqual param() const {
  67. return {maxerr, verbose};
  68. }
  69. };
  70. class AtlasRuntime : public OpDefImplBase<AtlasRuntime> {
  71. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  72. public:
  73. std::string buf;
  74. size_t buf_size;
  75. AtlasRuntime() = default;
  76. AtlasRuntime(std::string buf_, size_t buf_size_, std::string scope_ = {}): buf(buf_), buf_size(buf_size_) { set_scope(scope_); }
  77. };
  78. class Barrier : public OpDefImplBase<Barrier> {
  79. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  80. public:
  81. ::mgb::CompNode comp_node;
  82. uint32_t nr_outputs;
  83. Barrier() = default;
  84. Barrier(::mgb::CompNode comp_node_, uint32_t nr_outputs_, std::string scope_ = {}): comp_node(comp_node_), nr_outputs(nr_outputs_) { set_scope(scope_); }
  85. };
  86. class BatchConvBias : public OpDefImplBase<BatchConvBias> {
  87. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  88. public:
  89. using NonlineMode = ::megdnn::param::BatchConvBias::NonlineMode;
  90. using Mode = ::megdnn::param::BatchConvBias::Mode;
  91. using Sparse = ::megdnn::param::BatchConvBias::Sparse;
  92. using Format = ::megdnn::param::BatchConvBias::Format;
  93. using ComputeMode = ::megdnn::param::BatchConvBias::ComputeMode;
  94. using Strategy = ::megdnn::param::ExecutionPolicy::Strategy;
  95. NonlineMode nonlineMode = ::megdnn::param::BatchConvBias::NonlineMode::IDENTITY;
  96. Mode mode = ::megdnn::param::BatchConvBias::Mode::CROSS_CORRELATION;
  97. uint32_t pad_h = 0;
  98. uint32_t pad_w = 0;
  99. uint32_t stride_h = 1;
  100. uint32_t stride_w = 1;
  101. uint32_t dilate_h = 1;
  102. uint32_t dilate_w = 1;
  103. Sparse sparse = ::megdnn::param::BatchConvBias::Sparse::DENSE;
  104. Format format = ::megdnn::param::BatchConvBias::Format::NCHW;
  105. ComputeMode compute_mode = ::megdnn::param::BatchConvBias::ComputeMode::DEFAULT;
  106. Strategy strategy = static_cast<::megdnn::param::ExecutionPolicy::Strategy>(1);
  107. uint64_t workspace_limit = 18446744073709551615ull;
  108. ::megdnn::DType dtype;
  109. BatchConvBias() = default;
  110. BatchConvBias(NonlineMode nonlineMode_, Mode mode_, uint32_t pad_h_, uint32_t pad_w_, uint32_t stride_h_, uint32_t stride_w_, uint32_t dilate_h_, uint32_t dilate_w_, Sparse sparse_, Format format_, ComputeMode compute_mode_, Strategy strategy_, uint64_t workspace_limit_, ::megdnn::DType dtype_, std::string scope_ = {}): nonlineMode(nonlineMode_), mode(mode_), pad_h(pad_h_), pad_w(pad_w_), stride_h(stride_h_), stride_w(stride_w_), dilate_h(dilate_h_), dilate_w(dilate_w_), sparse(sparse_), format(format_), compute_mode(compute_mode_), strategy(strategy_), workspace_limit(workspace_limit_), dtype(dtype_) {
  111. set_scope(scope_);
  112. mgb_assert(static_cast<uint32_t>(strategy) <= uint32_t(8));
  113. }
  114. BatchConvBias(::megdnn::param::BatchConvBias packed_param_0, ::megdnn::param::ExecutionPolicy packed_param_1, ::megdnn::DType dtype_): nonlineMode(packed_param_0.nonlineMode), mode(packed_param_0.mode), pad_h(packed_param_0.pad_h), pad_w(packed_param_0.pad_w), stride_h(packed_param_0.stride_h), stride_w(packed_param_0.stride_w), dilate_h(packed_param_0.dilate_h), dilate_w(packed_param_0.dilate_w), sparse(packed_param_0.sparse), format(packed_param_0.format), compute_mode(packed_param_0.compute_mode), strategy(packed_param_1.strategy), workspace_limit(packed_param_1.workspace_limit), dtype(dtype_) {
  115. mgb_assert(static_cast<uint32_t>(strategy) <= uint32_t(8));
  116. }
  117. ::megdnn::param::BatchConvBias param() const {
  118. return {nonlineMode, mode, pad_h, pad_w, stride_h, stride_w, dilate_h, dilate_w, sparse, format, compute_mode};
  119. }
  120. ::megdnn::param::ExecutionPolicy policy() const {
  121. return {strategy, workspace_limit};
  122. }
  123. };
  124. class BatchNorm : public OpDefImplBase<BatchNorm> {
  125. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  126. public:
  127. using ParamDim = ::megdnn::param::BN::ParamDim;
  128. using FwdMode = ::megdnn::param::BN::FwdMode;
  129. ParamDim param_dim = ::megdnn::param::BN::ParamDim::DIM_11HW;
  130. FwdMode fwd_mode = ::megdnn::param::BN::FwdMode::TRAINING;
  131. double epsilon = 1e-4f;
  132. double avg_factor = 1.f;
  133. float scale = 1.f;
  134. float bias = 0.f;
  135. BatchNorm() = default;
  136. BatchNorm(ParamDim param_dim_, FwdMode fwd_mode_, double epsilon_, double avg_factor_, float scale_, float bias_, std::string scope_ = {}): param_dim(param_dim_), fwd_mode(fwd_mode_), epsilon(epsilon_), avg_factor(avg_factor_), scale(scale_), bias(bias_) { set_scope(scope_); }
  137. BatchNorm(::megdnn::param::BN packed_param_0): param_dim(packed_param_0.param_dim), fwd_mode(packed_param_0.fwd_mode), epsilon(packed_param_0.epsilon), avg_factor(packed_param_0.avg_factor), scale(packed_param_0.scale), bias(packed_param_0.bias) {}
  138. ::megdnn::param::BN param() const {
  139. return {param_dim, fwd_mode, epsilon, avg_factor, scale, bias};
  140. }
  141. };
  142. class BatchNormBackward : public OpDefImplBase<BatchNormBackward> {
  143. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  144. public:
  145. using ParamDim = ::megdnn::param::BN::ParamDim;
  146. using FwdMode = ::megdnn::param::BN::FwdMode;
  147. ParamDim param_dim = ::megdnn::param::BN::ParamDim::DIM_11HW;
  148. FwdMode fwd_mode = ::megdnn::param::BN::FwdMode::TRAINING;
  149. double epsilon = 1e-4f;
  150. double avg_factor = 1.f;
  151. float scale = 1.f;
  152. float bias = 0.f;
  153. BatchNormBackward() = default;
  154. BatchNormBackward(ParamDim param_dim_, FwdMode fwd_mode_, double epsilon_, double avg_factor_, float scale_, float bias_, std::string scope_ = {}): param_dim(param_dim_), fwd_mode(fwd_mode_), epsilon(epsilon_), avg_factor(avg_factor_), scale(scale_), bias(bias_) { set_scope(scope_); }
  155. BatchNormBackward(::megdnn::param::BN packed_param_0): param_dim(packed_param_0.param_dim), fwd_mode(packed_param_0.fwd_mode), epsilon(packed_param_0.epsilon), avg_factor(packed_param_0.avg_factor), scale(packed_param_0.scale), bias(packed_param_0.bias) {}
  156. ::megdnn::param::BN param() const {
  157. return {param_dim, fwd_mode, epsilon, avg_factor, scale, bias};
  158. }
  159. };
  160. class BatchedIncrMeshIndexing : public OpDefImplBase<BatchedIncrMeshIndexing> {
  161. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  162. public:
  163. std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items;
  164. BatchedIncrMeshIndexing() = default;
  165. BatchedIncrMeshIndexing(std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items_, std::string scope_ = {}): items(items_) { set_scope(scope_); }
  166. };
  167. class BatchedMatrixMul : public OpDefImplBase<BatchedMatrixMul> {
  168. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  169. public:
  170. using ComputeMode = ::megdnn::param::MatrixMul::ComputeMode;
  171. using Format = ::megdnn::param::MatrixMul::Format;
  172. using Strategy = ::megdnn::param::ExecutionPolicy::Strategy;
  173. bool transposeA = false;
  174. bool transposeB = false;
  175. ComputeMode compute_mode = ::megdnn::param::MatrixMul::ComputeMode::DEFAULT;
  176. Format format = ::megdnn::param::MatrixMul::Format::DEFAULT;
  177. Strategy strategy = static_cast<::megdnn::param::ExecutionPolicy::Strategy>(1);
  178. uint64_t workspace_limit = 18446744073709551615ull;
  179. uint32_t dimA;
  180. uint32_t dimB;
  181. BatchedMatrixMul() = default;
  182. BatchedMatrixMul(bool transposeA_, bool transposeB_, ComputeMode compute_mode_, Format format_, Strategy strategy_, uint64_t workspace_limit_, uint32_t dimA_, uint32_t dimB_, std::string scope_ = {}): transposeA(transposeA_), transposeB(transposeB_), compute_mode(compute_mode_), format(format_), strategy(strategy_), workspace_limit(workspace_limit_), dimA(dimA_), dimB(dimB_) {
  183. set_scope(scope_);
  184. mgb_assert(static_cast<uint32_t>(strategy) <= uint32_t(8));
  185. }
  186. BatchedMatrixMul(::megdnn::param::MatrixMul packed_param_0, ::megdnn::param::ExecutionPolicy packed_param_1, uint32_t dimA_, uint32_t dimB_): transposeA(packed_param_0.transposeA), transposeB(packed_param_0.transposeB), compute_mode(packed_param_0.compute_mode), format(packed_param_0.format), strategy(packed_param_1.strategy), workspace_limit(packed_param_1.workspace_limit), dimA(dimA_), dimB(dimB_) {
  187. mgb_assert(static_cast<uint32_t>(strategy) <= uint32_t(8));
  188. }
  189. ::megdnn::param::MatrixMul param() const {
  190. return {transposeA, transposeB, compute_mode, format};
  191. }
  192. ::megdnn::param::ExecutionPolicy policy() const {
  193. return {strategy, workspace_limit};
  194. }
  195. };
  196. class BatchedMeshIndexing : public OpDefImplBase<BatchedMeshIndexing> {
  197. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  198. public:
  199. std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items;
  200. BatchedMeshIndexing() = default;
  201. BatchedMeshIndexing(std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items_, std::string scope_ = {}): items(items_) { set_scope(scope_); }
  202. };
  203. class BatchedSetMeshIndexing : public OpDefImplBase<BatchedSetMeshIndexing> {
  204. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  205. public:
  206. std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items;
  207. BatchedSetMeshIndexing() = default;
  208. BatchedSetMeshIndexing(std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items_, std::string scope_ = {}): items(items_) { set_scope(scope_); }
  209. };
  210. class BetaRNG : public OpDefImplBase<BetaRNG> {
  211. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  212. public:
  213. uint64_t seed = 0;
  214. size_t handle;
  215. BetaRNG() = default;
  216. BetaRNG(uint64_t seed_, size_t handle_, std::string scope_ = {}): seed(seed_), handle(handle_) { set_scope(scope_); }
  217. BetaRNG(::megdnn::param::BetaRNG packed_param_0, size_t handle_): seed(packed_param_0.seed), handle(handle_) {}
  218. ::megdnn::param::BetaRNG param() const {
  219. return {seed};
  220. }
  221. };
  222. class Borrow : public OpDefImplBase<Borrow> {
  223. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  224. public:
  225. ::mgb::CompNode comp_node;
  226. Borrow() = default;
  227. Borrow(::mgb::CompNode comp_node_, std::string scope_ = {}): comp_node(comp_node_) { set_scope(scope_); }
  228. };
  229. class Broadcast : public OpDefImplBase<Broadcast> {
  230. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  231. public:
  232. std::vector<int32_t> shape;
  233. Broadcast() = default;
  234. Broadcast(std::vector<int32_t> shape_, std::string scope_ = {}): shape(shape_) { set_scope(scope_); }
  235. Broadcast(::megdnn::param::Empty, std::vector<int32_t> shape_): shape(shape_) {}
  236. ::megdnn::param::Empty param() const {
  237. return {};
  238. }
  239. };
  240. class CambriconRuntime : public OpDefImplBase<CambriconRuntime> {
  241. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  242. public:
  243. std::string buf;
  244. size_t buf_size;
  245. std::string symbol;
  246. bool tensor_dim_mutable;
  247. CambriconRuntime() = default;
  248. CambriconRuntime(std::string buf_, size_t buf_size_, std::string symbol_, bool tensor_dim_mutable_, std::string scope_ = {}): buf(buf_), buf_size(buf_size_), symbol(symbol_), tensor_dim_mutable(tensor_dim_mutable_) { set_scope(scope_); }
  249. };
  250. class CheckNonFinite : public OpDefImplBase<CheckNonFinite> {
  251. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  252. public:
  253. float scale = 1.0;
  254. CheckNonFinite() = default;
  255. CheckNonFinite(float scale_, std::string scope_ = {}): scale(scale_) { set_scope(scope_); }
  256. CheckNonFinite(::megdnn::param::CheckNonFinite packed_param_0): scale(packed_param_0.scale) {}
  257. ::megdnn::param::CheckNonFinite param() const {
  258. return {scale};
  259. }
  260. };
  261. class CollectiveComm : public OpDefImplBase<CollectiveComm> {
  262. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  263. public:
  264. using Mode = ::megdnn::param::CollectiveComm::Mode;
  265. Mode mode = ::megdnn::param::CollectiveComm::Mode::REDUCE_SUM;
  266. std::string key;
  267. uint32_t nr_devices;
  268. uint32_t rank;
  269. bool is_root;
  270. bool local_grad;
  271. std::string addr;
  272. uint32_t port;
  273. ::megdnn::DType dtype;
  274. std::string backend;
  275. std::string comp_node;
  276. CollectiveComm() = default;
  277. CollectiveComm(Mode mode_, std::string key_, uint32_t nr_devices_, uint32_t rank_, bool is_root_, bool local_grad_, std::string addr_, uint32_t port_, ::megdnn::DType dtype_, std::string backend_, std::string comp_node_, std::string scope_ = {}): mode(mode_), key(key_), nr_devices(nr_devices_), rank(rank_), is_root(is_root_), local_grad(local_grad_), addr(addr_), port(port_), dtype(dtype_), backend(backend_), comp_node(comp_node_) { set_scope(scope_); }
  278. CollectiveComm(::megdnn::param::CollectiveComm packed_param_0, std::string key_, uint32_t nr_devices_, uint32_t rank_, bool is_root_, bool local_grad_, std::string addr_, uint32_t port_, ::megdnn::DType dtype_, std::string backend_, std::string comp_node_): mode(packed_param_0.mode), key(key_), nr_devices(nr_devices_), rank(rank_), is_root(is_root_), local_grad(local_grad_), addr(addr_), port(port_), dtype(dtype_), backend(backend_), comp_node(comp_node_) {}
  279. ::megdnn::param::CollectiveComm param() const {
  280. return {mode};
  281. }
  282. };
  283. class Concat : public OpDefImplBase<Concat> {
  284. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  285. public:
  286. int32_t axis = 0;
  287. ::mgb::CompNode comp_node;
  288. Concat() = default;
  289. Concat(int32_t axis_, ::mgb::CompNode comp_node_, std::string scope_ = {}): axis(axis_), comp_node(comp_node_) { set_scope(scope_); }
  290. Concat(::megdnn::param::Axis packed_param_0, ::mgb::CompNode comp_node_): axis(packed_param_0.axis), comp_node(comp_node_) {}
  291. ::megdnn::param::Axis param() const {
  292. return {axis};
  293. }
  294. };
  295. class CondTake : public OpDefImplBase<CondTake> {
  296. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  297. public:
  298. CondTake() = default;
  299. };
  300. class ConvBias : public OpDefImplBase<ConvBias> {
  301. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  302. public:
  303. using NonlineMode = ::megdnn::param::ConvBias::NonlineMode;
  304. using Mode = ::megdnn::param::ConvBias::Mode;
  305. using Sparse = ::megdnn::param::ConvBias::Sparse;
  306. using Format = ::megdnn::param::ConvBias::Format;
  307. using ComputeMode = ::megdnn::param::ConvBias::ComputeMode;
  308. using Strategy = ::megdnn::param::ExecutionPolicy::Strategy;
  309. NonlineMode nonlineMode = ::megdnn::param::ConvBias::NonlineMode::IDENTITY;
  310. Mode mode = ::megdnn::param::ConvBias::Mode::CROSS_CORRELATION;
  311. Sparse sparse = ::megdnn::param::ConvBias::Sparse::DENSE;
  312. Format format = ::megdnn::param::ConvBias::Format::NCHW;
  313. uint32_t pad_h = 0;
  314. uint32_t pad_w = 0;
  315. uint32_t stride_h = 1;
  316. uint32_t stride_w = 1;
  317. uint32_t dilate_h = 1;
  318. uint32_t dilate_w = 1;
  319. ComputeMode compute_mode = ::megdnn::param::ConvBias::ComputeMode::DEFAULT;
  320. Strategy strategy = static_cast<::megdnn::param::ExecutionPolicy::Strategy>(1);
  321. uint64_t workspace_limit = 18446744073709551615ull;
  322. ::megdnn::DType dtype;
  323. ConvBias() = default;
  324. ConvBias(NonlineMode nonlineMode_, Mode mode_, Sparse sparse_, Format format_, uint32_t pad_h_, uint32_t pad_w_, uint32_t stride_h_, uint32_t stride_w_, uint32_t dilate_h_, uint32_t dilate_w_, ComputeMode compute_mode_, Strategy strategy_, uint64_t workspace_limit_, ::megdnn::DType dtype_, std::string scope_ = {}): nonlineMode(nonlineMode_), mode(mode_), sparse(sparse_), format(format_), pad_h(pad_h_), pad_w(pad_w_), stride_h(stride_h_), stride_w(stride_w_), dilate_h(dilate_h_), dilate_w(dilate_w_), compute_mode(compute_mode_), strategy(strategy_), workspace_limit(workspace_limit_), dtype(dtype_) {
  325. set_scope(scope_);
  326. mgb_assert(static_cast<uint32_t>(strategy) <= uint32_t(8));
  327. }
  328. ConvBias(::megdnn::param::ConvBias packed_param_0, ::megdnn::param::ExecutionPolicy packed_param_1, ::megdnn::DType dtype_): nonlineMode(packed_param_0.nonlineMode), mode(packed_param_0.mode), sparse(packed_param_0.sparse), format(packed_param_0.format), pad_h(packed_param_0.pad_h), pad_w(packed_param_0.pad_w), stride_h(packed_param_0.stride_h), stride_w(packed_param_0.stride_w), dilate_h(packed_param_0.dilate_h), dilate_w(packed_param_0.dilate_w), compute_mode(packed_param_0.compute_mode), strategy(packed_param_1.strategy), workspace_limit(packed_param_1.workspace_limit), dtype(dtype_) {
  329. mgb_assert(static_cast<uint32_t>(strategy) <= uint32_t(8));
  330. }
  331. ::megdnn::param::ConvBias param() const {
  332. return {nonlineMode, mode, sparse, format, pad_h, pad_w, stride_h, stride_w, dilate_h, dilate_w, compute_mode};
  333. }
  334. ::megdnn::param::ExecutionPolicy policy() const {
  335. return {strategy, workspace_limit};
  336. }
  337. };
  338. class Convolution : public OpDefImplBase<Convolution> {
  339. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  340. public:
  341. using Mode = ::megdnn::param::Convolution::Mode;
  342. using Sparse = ::megdnn::param::Convolution::Sparse;
  343. using Format = ::megdnn::param::Convolution::Format;
  344. using ComputeMode = ::megdnn::param::Convolution::ComputeMode;
  345. using Strategy = ::megdnn::param::ExecutionPolicy::Strategy;
  346. Mode mode = ::megdnn::param::Convolution::Mode::CROSS_CORRELATION;
  347. uint32_t pad_h = 0;
  348. uint32_t pad_w = 0;
  349. uint32_t stride_h = 1;
  350. uint32_t stride_w = 1;
  351. uint32_t dilate_h = 1;
  352. uint32_t dilate_w = 1;
  353. Sparse sparse = ::megdnn::param::Convolution::Sparse::DENSE;
  354. Format format = ::megdnn::param::Convolution::Format::NCHW;
  355. ComputeMode compute_mode = ::megdnn::param::Convolution::ComputeMode::DEFAULT;
  356. Strategy strategy = static_cast<::megdnn::param::ExecutionPolicy::Strategy>(1);
  357. uint64_t workspace_limit = 18446744073709551615ull;
  358. Convolution() = default;
  359. Convolution(Mode mode_, uint32_t pad_h_, uint32_t pad_w_, uint32_t stride_h_, uint32_t stride_w_, uint32_t dilate_h_, uint32_t dilate_w_, Sparse sparse_, Format format_, ComputeMode compute_mode_, Strategy strategy_, uint64_t workspace_limit_, std::string scope_ = {}): mode(mode_), pad_h(pad_h_), pad_w(pad_w_), stride_h(stride_h_), stride_w(stride_w_), dilate_h(dilate_h_), dilate_w(dilate_w_), sparse(sparse_), format(format_), compute_mode(compute_mode_), strategy(strategy_), workspace_limit(workspace_limit_) {
  360. set_scope(scope_);
  361. mgb_assert(static_cast<uint32_t>(strategy) <= uint32_t(8));
  362. }
  363. Convolution(::megdnn::param::Convolution packed_param_0, ::megdnn::param::ExecutionPolicy packed_param_1): mode(packed_param_0.mode), pad_h(packed_param_0.pad_h), pad_w(packed_param_0.pad_w), stride_h(packed_param_0.stride_h), stride_w(packed_param_0.stride_w), dilate_h(packed_param_0.dilate_h), dilate_w(packed_param_0.dilate_w), sparse(packed_param_0.sparse), format(packed_param_0.format), compute_mode(packed_param_0.compute_mode), strategy(packed_param_1.strategy), workspace_limit(packed_param_1.workspace_limit) {
  364. mgb_assert(static_cast<uint32_t>(strategy) <= uint32_t(8));
  365. }
  366. ::megdnn::param::Convolution param() const {
  367. return {mode, pad_h, pad_w, stride_h, stride_w, dilate_h, dilate_w, sparse, format, compute_mode};
  368. }
  369. ::megdnn::param::ExecutionPolicy policy() const {
  370. return {strategy, workspace_limit};
  371. }
  372. };
  373. class Convolution3D : public OpDefImplBase<Convolution3D> {
  374. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  375. public:
  376. using Mode = ::megdnn::param::Convolution3D::Mode;
  377. using Sparse = ::megdnn::param::Convolution3D::Sparse;
  378. using DataType = ::megdnn::param::Convolution3D::DataType;
  379. using Format = ::megdnn::param::Convolution3D::Format;
  380. using Strategy = ::megdnn::param::ExecutionPolicy::Strategy;
  381. Mode mode = ::megdnn::param::Convolution3D::Mode::CROSS_CORRELATION;
  382. uint32_t pad_d = 0;
  383. uint32_t pad_h = 0;
  384. uint32_t pad_w = 0;
  385. uint32_t stride_d = 1;
  386. uint32_t stride_h = 1;
  387. uint32_t stride_w = 1;
  388. uint32_t dilate_d = 1;
  389. uint32_t dilate_h = 1;
  390. uint32_t dilate_w = 1;
  391. Sparse sparse = ::megdnn::param::Convolution3D::Sparse::DENSE;
  392. DataType data_type = ::megdnn::param::Convolution3D::DataType::FLOAT;
  393. Format format = ::megdnn::param::Convolution3D::Format::NCDHW;
  394. Strategy strategy = static_cast<::megdnn::param::ExecutionPolicy::Strategy>(1);
  395. uint64_t workspace_limit = 18446744073709551615ull;
  396. Convolution3D() = default;
  397. Convolution3D(Mode mode_, uint32_t pad_d_, uint32_t pad_h_, uint32_t pad_w_, uint32_t stride_d_, uint32_t stride_h_, uint32_t stride_w_, uint32_t dilate_d_, uint32_t dilate_h_, uint32_t dilate_w_, Sparse sparse_, DataType data_type_, Format format_, Strategy strategy_, uint64_t workspace_limit_, std::string scope_ = {}): mode(mode_), pad_d(pad_d_), pad_h(pad_h_), pad_w(pad_w_), stride_d(stride_d_), stride_h(stride_h_), stride_w(stride_w_), dilate_d(dilate_d_), dilate_h(dilate_h_), dilate_w(dilate_w_), sparse(sparse_), data_type(data_type_), format(format_), strategy(strategy_), workspace_limit(workspace_limit_) {
  398. set_scope(scope_);
  399. mgb_assert(static_cast<uint32_t>(strategy) <= uint32_t(8));
  400. }
  401. Convolution3D(::megdnn::param::Convolution3D packed_param_0, ::megdnn::param::ExecutionPolicy packed_param_1): mode(packed_param_0.mode), pad_d(packed_param_0.pad_d), pad_h(packed_param_0.pad_h), pad_w(packed_param_0.pad_w), stride_d(packed_param_0.stride_d), stride_h(packed_param_0.stride_h), stride_w(packed_param_0.stride_w), dilate_d(packed_param_0.dilate_d), dilate_h(packed_param_0.dilate_h), dilate_w(packed_param_0.dilate_w), sparse(packed_param_0.sparse), data_type(packed_param_0.data_type), format(packed_param_0.format), strategy(packed_param_1.strategy), workspace_limit(packed_param_1.workspace_limit) {
  402. mgb_assert(static_cast<uint32_t>(strategy) <= uint32_t(8));
  403. }
  404. ::megdnn::param::Convolution3D param() const {
  405. return {mode, pad_d, pad_h, pad_w, stride_d, stride_h, stride_w, dilate_d, dilate_h, dilate_w, sparse, data_type, format};
  406. }
  407. ::megdnn::param::ExecutionPolicy policy() const {
  408. return {strategy, workspace_limit};
  409. }
  410. };
  411. class Convolution3DBackwardData : public OpDefImplBase<Convolution3DBackwardData> {
  412. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  413. public:
  414. using Mode = ::megdnn::param::Convolution3D::Mode;
  415. using Sparse = ::megdnn::param::Convolution3D::Sparse;
  416. using DataType = ::megdnn::param::Convolution3D::DataType;
  417. using Format = ::megdnn::param::Convolution3D::Format;
  418. using Strategy = ::megdnn::param::ExecutionPolicy::Strategy;
  419. Mode mode = ::megdnn::param::Convolution3D::Mode::CROSS_CORRELATION;
  420. uint32_t pad_d = 0;
  421. uint32_t pad_h = 0;
  422. uint32_t pad_w = 0;
  423. uint32_t stride_d = 1;
  424. uint32_t stride_h = 1;
  425. uint32_t stride_w = 1;
  426. uint32_t dilate_d = 1;
  427. uint32_t dilate_h = 1;
  428. uint32_t dilate_w = 1;
  429. Sparse sparse = ::megdnn::param::Convolution3D::Sparse::DENSE;
  430. DataType data_type = ::megdnn::param::Convolution3D::DataType::FLOAT;
  431. Format format = ::megdnn::param::Convolution3D::Format::NCDHW;
  432. Strategy strategy = static_cast<::megdnn::param::ExecutionPolicy::Strategy>(1);
  433. uint64_t workspace_limit = 18446744073709551615ull;
  434. Convolution3DBackwardData() = default;
  435. Convolution3DBackwardData(Mode mode_, uint32_t pad_d_, uint32_t pad_h_, uint32_t pad_w_, uint32_t stride_d_, uint32_t stride_h_, uint32_t stride_w_, uint32_t dilate_d_, uint32_t dilate_h_, uint32_t dilate_w_, Sparse sparse_, DataType data_type_, Format format_, Strategy strategy_, uint64_t workspace_limit_, std::string scope_ = {}): mode(mode_), pad_d(pad_d_), pad_h(pad_h_), pad_w(pad_w_), stride_d(stride_d_), stride_h(stride_h_), stride_w(stride_w_), dilate_d(dilate_d_), dilate_h(dilate_h_), dilate_w(dilate_w_), sparse(sparse_), data_type(data_type_), format(format_), strategy(strategy_), workspace_limit(workspace_limit_) {
  436. set_scope(scope_);
  437. mgb_assert(static_cast<uint32_t>(strategy) <= uint32_t(8));
  438. }
  439. Convolution3DBackwardData(::megdnn::param::Convolution3D packed_param_0, ::megdnn::param::ExecutionPolicy packed_param_1): mode(packed_param_0.mode), pad_d(packed_param_0.pad_d), pad_h(packed_param_0.pad_h), pad_w(packed_param_0.pad_w), stride_d(packed_param_0.stride_d), stride_h(packed_param_0.stride_h), stride_w(packed_param_0.stride_w), dilate_d(packed_param_0.dilate_d), dilate_h(packed_param_0.dilate_h), dilate_w(packed_param_0.dilate_w), sparse(packed_param_0.sparse), data_type(packed_param_0.data_type), format(packed_param_0.format), strategy(packed_param_1.strategy), workspace_limit(packed_param_1.workspace_limit) {
  440. mgb_assert(static_cast<uint32_t>(strategy) <= uint32_t(8));
  441. }
  442. ::megdnn::param::Convolution3D param() const {
  443. return {mode, pad_d, pad_h, pad_w, stride_d, stride_h, stride_w, dilate_d, dilate_h, dilate_w, sparse, data_type, format};
  444. }
  445. ::megdnn::param::ExecutionPolicy policy() const {
  446. return {strategy, workspace_limit};
  447. }
  448. };
  449. class ConvolutionBackwardData : public OpDefImplBase<ConvolutionBackwardData> {
  450. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  451. public:
  452. using Mode = ::megdnn::param::Convolution::Mode;
  453. using Sparse = ::megdnn::param::Convolution::Sparse;
  454. using Format = ::megdnn::param::Convolution::Format;
  455. using ComputeMode = ::megdnn::param::Convolution::ComputeMode;
  456. using Strategy = ::megdnn::param::ExecutionPolicy::Strategy;
  457. Mode mode = ::megdnn::param::Convolution::Mode::CROSS_CORRELATION;
  458. uint32_t pad_h = 0;
  459. uint32_t pad_w = 0;
  460. uint32_t stride_h = 1;
  461. uint32_t stride_w = 1;
  462. uint32_t dilate_h = 1;
  463. uint32_t dilate_w = 1;
  464. Sparse sparse = ::megdnn::param::Convolution::Sparse::DENSE;
  465. Format format = ::megdnn::param::Convolution::Format::NCHW;
  466. ComputeMode compute_mode = ::megdnn::param::Convolution::ComputeMode::DEFAULT;
  467. Strategy strategy = static_cast<::megdnn::param::ExecutionPolicy::Strategy>(1);
  468. uint64_t workspace_limit = 18446744073709551615ull;
  469. ::megdnn::DType dtype;
  470. ConvolutionBackwardData() = default;
  471. ConvolutionBackwardData(Mode mode_, uint32_t pad_h_, uint32_t pad_w_, uint32_t stride_h_, uint32_t stride_w_, uint32_t dilate_h_, uint32_t dilate_w_, Sparse sparse_, Format format_, ComputeMode compute_mode_, Strategy strategy_, uint64_t workspace_limit_, ::megdnn::DType dtype_, std::string scope_ = {}): mode(mode_), pad_h(pad_h_), pad_w(pad_w_), stride_h(stride_h_), stride_w(stride_w_), dilate_h(dilate_h_), dilate_w(dilate_w_), sparse(sparse_), format(format_), compute_mode(compute_mode_), strategy(strategy_), workspace_limit(workspace_limit_), dtype(dtype_) {
  472. set_scope(scope_);
  473. mgb_assert(static_cast<uint32_t>(strategy) <= uint32_t(8));
  474. }
  475. ConvolutionBackwardData(::megdnn::param::Convolution packed_param_0, ::megdnn::param::ExecutionPolicy packed_param_1, ::megdnn::DType dtype_): mode(packed_param_0.mode), pad_h(packed_param_0.pad_h), pad_w(packed_param_0.pad_w), stride_h(packed_param_0.stride_h), stride_w(packed_param_0.stride_w), dilate_h(packed_param_0.dilate_h), dilate_w(packed_param_0.dilate_w), sparse(packed_param_0.sparse), format(packed_param_0.format), compute_mode(packed_param_0.compute_mode), strategy(packed_param_1.strategy), workspace_limit(packed_param_1.workspace_limit), dtype(dtype_) {
  476. mgb_assert(static_cast<uint32_t>(strategy) <= uint32_t(8));
  477. }
  478. ::megdnn::param::Convolution param() const {
  479. return {mode, pad_h, pad_w, stride_h, stride_w, dilate_h, dilate_w, sparse, format, compute_mode};
  480. }
  481. ::megdnn::param::ExecutionPolicy policy() const {
  482. return {strategy, workspace_limit};
  483. }
  484. };
  485. class Copy : public OpDefImplBase<Copy> {
  486. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  487. public:
  488. ::mgb::CompNode comp_node;
  489. Copy() = default;
  490. Copy(::mgb::CompNode comp_node_, std::string scope_ = {}): comp_node(comp_node_) { set_scope(scope_); }
  491. };
  492. class Correlation : public OpDefImplBase<Correlation> {
  493. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  494. public:
  495. using Format = ::megdnn::param::Correlation::Format;
  496. Format format = ::megdnn::param::Correlation::Format::NCHW;
  497. uint32_t kernel_size = 1;
  498. uint32_t max_displacement = 1;
  499. uint32_t stride1 = 1;
  500. uint32_t stride2 = 1;
  501. uint32_t pad_size = 0;
  502. bool is_multiply = true;
  503. Correlation() = default;
  504. Correlation(Format format_, uint32_t kernel_size_, uint32_t max_displacement_, uint32_t stride1_, uint32_t stride2_, uint32_t pad_size_, bool is_multiply_, std::string scope_ = {}): format(format_), kernel_size(kernel_size_), max_displacement(max_displacement_), stride1(stride1_), stride2(stride2_), pad_size(pad_size_), is_multiply(is_multiply_) { set_scope(scope_); }
  505. Correlation(::megdnn::param::Correlation packed_param_0): format(packed_param_0.format), kernel_size(packed_param_0.kernel_size), max_displacement(packed_param_0.max_displacement), stride1(packed_param_0.stride1), stride2(packed_param_0.stride2), pad_size(packed_param_0.pad_size), is_multiply(packed_param_0.is_multiply) {}
  506. ::megdnn::param::Correlation param() const {
  507. return {format, kernel_size, max_displacement, stride1, stride2, pad_size, is_multiply};
  508. }
  509. };
  510. class Cumsum : public OpDefImplBase<Cumsum> {
  511. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  512. public:
  513. int32_t axis = 2147483647;
  514. bool exclusive = true;
  515. bool reverse = false;
  516. Cumsum() = default;
  517. Cumsum(int32_t axis_, bool exclusive_, bool reverse_, std::string scope_ = {}): axis(axis_), exclusive(exclusive_), reverse(reverse_) { set_scope(scope_); }
  518. Cumsum(::megdnn::param::Cumsum packed_param_0): axis(packed_param_0.axis), exclusive(packed_param_0.exclusive), reverse(packed_param_0.reverse) {}
  519. ::megdnn::param::Cumsum param() const {
  520. return {axis, exclusive, reverse};
  521. }
  522. };
  523. class CvtColor : public OpDefImplBase<CvtColor> {
  524. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  525. public:
  526. using Mode = ::megdnn::param::CvtColor::Mode;
  527. Mode mode = ::megdnn::param::CvtColor::Mode::RGB2GRAY;
  528. CvtColor() = default;
  529. CvtColor(Mode mode_, std::string scope_ = {}): mode(mode_) { set_scope(scope_); }
  530. CvtColor(::megdnn::param::CvtColor packed_param_0): mode(packed_param_0.mode) {}
  531. ::megdnn::param::CvtColor param() const {
  532. return {mode};
  533. }
  534. };
  535. class DeformableConv : public OpDefImplBase<DeformableConv> {
  536. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  537. public:
  538. using Mode = ::megdnn::param::Convolution::Mode;
  539. using Sparse = ::megdnn::param::Convolution::Sparse;
  540. using Format = ::megdnn::param::Convolution::Format;
  541. using ComputeMode = ::megdnn::param::Convolution::ComputeMode;
  542. using Strategy = ::megdnn::param::ExecutionPolicy::Strategy;
  543. Mode mode = ::megdnn::param::Convolution::Mode::CROSS_CORRELATION;
  544. uint32_t pad_h = 0;
  545. uint32_t pad_w = 0;
  546. uint32_t stride_h = 1;
  547. uint32_t stride_w = 1;
  548. uint32_t dilate_h = 1;
  549. uint32_t dilate_w = 1;
  550. Sparse sparse = ::megdnn::param::Convolution::Sparse::DENSE;
  551. Format format = ::megdnn::param::Convolution::Format::NCHW;
  552. ComputeMode compute_mode = ::megdnn::param::Convolution::ComputeMode::DEFAULT;
  553. Strategy strategy = static_cast<::megdnn::param::ExecutionPolicy::Strategy>(1);
  554. uint64_t workspace_limit = 18446744073709551615ull;
  555. DeformableConv() = default;
  556. DeformableConv(Mode mode_, uint32_t pad_h_, uint32_t pad_w_, uint32_t stride_h_, uint32_t stride_w_, uint32_t dilate_h_, uint32_t dilate_w_, Sparse sparse_, Format format_, ComputeMode compute_mode_, Strategy strategy_, uint64_t workspace_limit_, std::string scope_ = {}): mode(mode_), pad_h(pad_h_), pad_w(pad_w_), stride_h(stride_h_), stride_w(stride_w_), dilate_h(dilate_h_), dilate_w(dilate_w_), sparse(sparse_), format(format_), compute_mode(compute_mode_), strategy(strategy_), workspace_limit(workspace_limit_) {
  557. set_scope(scope_);
  558. mgb_assert(static_cast<uint32_t>(strategy) <= uint32_t(8));
  559. }
  560. DeformableConv(::megdnn::param::Convolution packed_param_0, ::megdnn::param::ExecutionPolicy packed_param_1): mode(packed_param_0.mode), pad_h(packed_param_0.pad_h), pad_w(packed_param_0.pad_w), stride_h(packed_param_0.stride_h), stride_w(packed_param_0.stride_w), dilate_h(packed_param_0.dilate_h), dilate_w(packed_param_0.dilate_w), sparse(packed_param_0.sparse), format(packed_param_0.format), compute_mode(packed_param_0.compute_mode), strategy(packed_param_1.strategy), workspace_limit(packed_param_1.workspace_limit) {
  561. mgb_assert(static_cast<uint32_t>(strategy) <= uint32_t(8));
  562. }
  563. ::megdnn::param::Convolution param() const {
  564. return {mode, pad_h, pad_w, stride_h, stride_w, dilate_h, dilate_w, sparse, format, compute_mode};
  565. }
  566. ::megdnn::param::ExecutionPolicy policy() const {
  567. return {strategy, workspace_limit};
  568. }
  569. };
  570. class DeformablePSROIPooling : public OpDefImplBase<DeformablePSROIPooling> {
  571. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  572. public:
  573. bool no_trans = true;
  574. float spatial_scale = 1;
  575. float trans_std = 1;
  576. uint32_t pooled_h = 1;
  577. uint32_t pooled_w = 1;
  578. uint32_t part_size = 1;
  579. uint32_t sample_per_part = 1;
  580. DeformablePSROIPooling() = default;
  581. DeformablePSROIPooling(bool no_trans_, float spatial_scale_, float trans_std_, uint32_t pooled_h_, uint32_t pooled_w_, uint32_t part_size_, uint32_t sample_per_part_, std::string scope_ = {}): no_trans(no_trans_), spatial_scale(spatial_scale_), trans_std(trans_std_), pooled_h(pooled_h_), pooled_w(pooled_w_), part_size(part_size_), sample_per_part(sample_per_part_) { set_scope(scope_); }
  582. DeformablePSROIPooling(::megdnn::param::DeformablePSROIPooling packed_param_0): no_trans(packed_param_0.no_trans), spatial_scale(packed_param_0.spatial_scale), trans_std(packed_param_0.trans_std), pooled_h(packed_param_0.pooled_h), pooled_w(packed_param_0.pooled_w), part_size(packed_param_0.part_size), sample_per_part(packed_param_0.sample_per_part) {}
  583. ::megdnn::param::DeformablePSROIPooling param() const {
  584. return {no_trans, spatial_scale, trans_std, pooled_h, pooled_w, part_size, sample_per_part};
  585. }
  586. };
  587. class Diag : public OpDefImplBase<Diag> {
  588. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  589. public:
  590. int32_t k = 0;
  591. Diag() = default;
  592. Diag(int32_t k_, std::string scope_ = {}): k(k_) { set_scope(scope_); }
  593. Diag(::megdnn::param::Diag packed_param_0): k(packed_param_0.k) {}
  594. ::megdnn::param::Diag param() const {
  595. return {k};
  596. }
  597. };
  598. class Dimshuffle : public OpDefImplBase<Dimshuffle> {
  599. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  600. public:
  601. std::vector<int32_t> pattern;
  602. Dimshuffle() = default;
  603. Dimshuffle(std::vector<int32_t> pattern_, std::string scope_ = {}): pattern(pattern_) { set_scope(scope_); }
  604. };
  605. class Dot : public OpDefImplBase<Dot> {
  606. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  607. public:
  608. Dot() = default;
  609. Dot(::megdnn::param::Empty) {}
  610. ::megdnn::param::Empty param() const {
  611. return {};
  612. }
  613. };
  614. class Dropout : public OpDefImplBase<Dropout> {
  615. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  616. public:
  617. float drop_prob = 0;
  618. uint64_t seed = 0;
  619. size_t handle;
  620. Dropout() = default;
  621. Dropout(float drop_prob_, uint64_t seed_, size_t handle_, std::string scope_ = {}): drop_prob(drop_prob_), seed(seed_), handle(handle_) { set_scope(scope_); }
  622. Dropout(::megdnn::param::Dropout packed_param_0, size_t handle_): drop_prob(packed_param_0.drop_prob), seed(packed_param_0.seed), handle(handle_) {}
  623. ::megdnn::param::Dropout param() const {
  624. return {drop_prob, seed};
  625. }
  626. };
  627. class Elemwise : public OpDefImplBase<Elemwise> {
  628. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  629. public:
  630. using Mode = ::megdnn::param::Elemwise::Mode;
  631. Mode mode = ::megdnn::param::Elemwise::Mode::RELU;
  632. Elemwise() = default;
  633. Elemwise(Mode mode_, std::string scope_ = {}): mode(mode_) { set_scope(scope_); }
  634. Elemwise(::megdnn::param::Elemwise packed_param_0): mode(packed_param_0.mode) {}
  635. ::megdnn::param::Elemwise param() const {
  636. return {mode};
  637. }
  638. };
  639. template <>
  640. struct ToStringTrait<Elemwise::Mode> {
  641. std::string operator()(Elemwise::Mode e) const {
  642. switch (e) {
  643. case Elemwise::Mode::RELU: return "RELU";
  644. case Elemwise::Mode::ABS: return "ABS";
  645. case Elemwise::Mode::ACOS: return "ACOS";
  646. case Elemwise::Mode::ASIN: return "ASIN";
  647. case Elemwise::Mode::CEIL: return "CEIL";
  648. case Elemwise::Mode::COS: return "COS";
  649. case Elemwise::Mode::EXP: return "EXP";
  650. case Elemwise::Mode::EXPM1: return "EXPM1";
  651. case Elemwise::Mode::FLOOR: return "FLOOR";
  652. case Elemwise::Mode::LOG: return "LOG";
  653. case Elemwise::Mode::LOG1P: return "LOG1P";
  654. case Elemwise::Mode::NEGATE: return "NEGATE";
  655. case Elemwise::Mode::SIGMOID: return "SIGMOID";
  656. case Elemwise::Mode::SIN: return "SIN";
  657. case Elemwise::Mode::TANH: return "TANH";
  658. case Elemwise::Mode::ABS_GRAD: return "ABS_GRAD";
  659. case Elemwise::Mode::ADD: return "ADD";
  660. case Elemwise::Mode::FLOOR_DIV: return "FLOOR_DIV";
  661. case Elemwise::Mode::MAX: return "MAX";
  662. case Elemwise::Mode::MIN: return "MIN";
  663. case Elemwise::Mode::MOD: return "MOD";
  664. case Elemwise::Mode::MUL: return "MUL";
  665. case Elemwise::Mode::POW: return "POW";
  666. case Elemwise::Mode::SIGMOID_GRAD: return "SIGMOID_GRAD";
  667. case Elemwise::Mode::SUB: return "SUB";
  668. case Elemwise::Mode::SWITCH_GT0: return "SWITCH_GT0";
  669. case Elemwise::Mode::TANH_GRAD: return "TANH_GRAD";
  670. case Elemwise::Mode::TRUE_DIV: return "TRUE_DIV";
  671. case Elemwise::Mode::LOG_SUM_EXP: return "LOG_SUM_EXP";
  672. case Elemwise::Mode::LT: return "LT";
  673. case Elemwise::Mode::LEQ: return "LEQ";
  674. case Elemwise::Mode::EQ: return "EQ";
  675. case Elemwise::Mode::SHL: return "SHL";
  676. case Elemwise::Mode::SHR: return "SHR";
  677. case Elemwise::Mode::COND_LEQ_MOV: return "COND_LEQ_MOV";
  678. case Elemwise::Mode::FUSE_MUL_ADD3: return "FUSE_MUL_ADD3";
  679. case Elemwise::Mode::FUSE_MUL_ADD4: return "FUSE_MUL_ADD4";
  680. case Elemwise::Mode::FUSE_ADD_RELU: return "FUSE_ADD_RELU";
  681. case Elemwise::Mode::FUSE_ADD_SIGMOID: return "FUSE_ADD_SIGMOID";
  682. case Elemwise::Mode::FUSE_ADD_TANH: return "FUSE_ADD_TANH";
  683. case Elemwise::Mode::FAST_TANH: return "FAST_TANH";
  684. case Elemwise::Mode::FAST_TANH_GRAD: return "FAST_TANH_GRAD";
  685. case Elemwise::Mode::ROUND: return "ROUND";
  686. case Elemwise::Mode::RMULH: return "RMULH";
  687. case Elemwise::Mode::ATAN2: return "ATAN2";
  688. case Elemwise::Mode::ERF: return "ERF";
  689. case Elemwise::Mode::ERFINV: return "ERFINV";
  690. case Elemwise::Mode::ERFC: return "ERFC";
  691. case Elemwise::Mode::ERFCINV: return "ERFCINV";
  692. case Elemwise::Mode::H_SWISH: return "H_SWISH";
  693. case Elemwise::Mode::H_SWISH_GRAD: return "H_SWISH_GRAD";
  694. case Elemwise::Mode::FUSE_ADD_H_SWISH: return "FUSE_ADD_H_SWISH";
  695. case Elemwise::Mode::NOT: return "NOT";
  696. case Elemwise::Mode::AND: return "AND";
  697. case Elemwise::Mode::OR: return "OR";
  698. case Elemwise::Mode::XOR: return "XOR";
  699. case Elemwise::Mode::SILU: return "SILU";
  700. case Elemwise::Mode::SILU_GRAD: return "SILU_GRAD";
  701. case Elemwise::Mode::GELU: return "GELU";
  702. case Elemwise::Mode::GELU_GRAD: return "GELU_GRAD";
  703. case Elemwise::Mode::COND_LT_MOV: return "COND_LT_MOV";
  704. case Elemwise::Mode::NEQ: return "NEQ";
  705. case Elemwise::Mode::ISNAN: return "ISNAN";
  706. case Elemwise::Mode::ISINF: return "ISINF";
  707. default:
  708. return "Elemwise::Mode::Unknown";
  709. }
  710. }
  711. };
  712. class ElemwiseMultiType : public OpDefImplBase<ElemwiseMultiType> {
  713. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  714. public:
  715. using Mode = ::megdnn::param::ElemwiseMultiType::Mode;
  716. Mode mode = ::megdnn::param::ElemwiseMultiType::Mode::FUSE_MUL_ADD3_INT16x32x32x32;
  717. ::megdnn::DType dtype;
  718. ElemwiseMultiType() = default;
  719. ElemwiseMultiType(Mode mode_, ::megdnn::DType dtype_, std::string scope_ = {}): mode(mode_), dtype(dtype_) { set_scope(scope_); }
  720. ElemwiseMultiType(::megdnn::param::ElemwiseMultiType packed_param_0, ::megdnn::DType dtype_): mode(packed_param_0.mode), dtype(dtype_) {}
  721. ::megdnn::param::ElemwiseMultiType param() const {
  722. return {mode};
  723. }
  724. };
  725. template <>
  726. struct ToStringTrait<ElemwiseMultiType::Mode> {
  727. std::string operator()(ElemwiseMultiType::Mode e) const {
  728. switch (e) {
  729. case ElemwiseMultiType::Mode::FUSE_MUL_ADD3_INT16x32x32x32: return "FUSE_MUL_ADD3_INT16x32x32x32";
  730. case ElemwiseMultiType::Mode::FUSE_MUL_ADD3_IXxF32xF32xI8: return "FUSE_MUL_ADD3_IXxF32xF32xI8";
  731. case ElemwiseMultiType::Mode::ROUND_SHR_SATURATE_IXxI8xI8: return "ROUND_SHR_SATURATE_IXxI8xI8";
  732. case ElemwiseMultiType::Mode::FUSE_ADD_RMULH_ROUND_SHR_SATURATE_INT16x16x16x8: return "FUSE_ADD_RMULH_ROUND_SHR_SATURATE_INT16x16x16x8";
  733. case ElemwiseMultiType::Mode::FUSE_ADD_RMULH_ROUND_SHR_SATURATE_INT32x32x32x8: return "FUSE_ADD_RMULH_ROUND_SHR_SATURATE_INT32x32x32x8";
  734. case ElemwiseMultiType::Mode::ROUND_SHR_SATURATE_IXxI8xI16: return "ROUND_SHR_SATURATE_IXxI8xI16";
  735. case ElemwiseMultiType::Mode::QADD: return "QADD";
  736. case ElemwiseMultiType::Mode::QFUSE_ADD_RELU: return "QFUSE_ADD_RELU";
  737. case ElemwiseMultiType::Mode::QMUL: return "QMUL";
  738. case ElemwiseMultiType::Mode::QMIN: return "QMIN";
  739. case ElemwiseMultiType::Mode::QMAX: return "QMAX";
  740. case ElemwiseMultiType::Mode::QSUB: return "QSUB";
  741. case ElemwiseMultiType::Mode::QTRUE_DIV: return "QTRUE_DIV";
  742. case ElemwiseMultiType::Mode::QFUSE_ADD_SIGMOID: return "QFUSE_ADD_SIGMOID";
  743. case ElemwiseMultiType::Mode::QFUSE_ADD_TANH: return "QFUSE_ADD_TANH";
  744. case ElemwiseMultiType::Mode::QRELU: return "QRELU";
  745. case ElemwiseMultiType::Mode::QABS: return "QABS";
  746. case ElemwiseMultiType::Mode::QSIGMOID: return "QSIGMOID";
  747. case ElemwiseMultiType::Mode::QEXP: return "QEXP";
  748. case ElemwiseMultiType::Mode::QTANH: return "QTANH";
  749. case ElemwiseMultiType::Mode::QFUSE_MUL_ADD3: return "QFUSE_MUL_ADD3";
  750. case ElemwiseMultiType::Mode::QFAST_TANH: return "QFAST_TANH";
  751. case ElemwiseMultiType::Mode::QNEGATE: return "QNEGATE";
  752. case ElemwiseMultiType::Mode::QACOS: return "QACOS";
  753. case ElemwiseMultiType::Mode::QASIN: return "QASIN";
  754. case ElemwiseMultiType::Mode::QCEIL: return "QCEIL";
  755. case ElemwiseMultiType::Mode::QCOS: return "QCOS";
  756. case ElemwiseMultiType::Mode::QEXPM1: return "QEXPM1";
  757. case ElemwiseMultiType::Mode::QFLOOR: return "QFLOOR";
  758. case ElemwiseMultiType::Mode::QLOG: return "QLOG";
  759. case ElemwiseMultiType::Mode::QLOG1P: return "QLOG1P";
  760. case ElemwiseMultiType::Mode::QSIN: return "QSIN";
  761. case ElemwiseMultiType::Mode::QROUND: return "QROUND";
  762. case ElemwiseMultiType::Mode::QERF: return "QERF";
  763. case ElemwiseMultiType::Mode::QERFINV: return "QERFINV";
  764. case ElemwiseMultiType::Mode::QERFC: return "QERFC";
  765. case ElemwiseMultiType::Mode::QERFCINV: return "QERFCINV";
  766. case ElemwiseMultiType::Mode::QABS_GRAD: return "QABS_GRAD";
  767. case ElemwiseMultiType::Mode::QFLOOR_DIV: return "QFLOOR_DIV";
  768. case ElemwiseMultiType::Mode::QMOD: return "QMOD";
  769. case ElemwiseMultiType::Mode::QSIGMOID_GRAD: return "QSIGMOID_GRAD";
  770. case ElemwiseMultiType::Mode::QSWITCH_GT0: return "QSWITCH_GT0";
  771. case ElemwiseMultiType::Mode::QTANH_GRAD: return "QTANH_GRAD";
  772. case ElemwiseMultiType::Mode::QLT: return "QLT";
  773. case ElemwiseMultiType::Mode::QLEQ: return "QLEQ";
  774. case ElemwiseMultiType::Mode::QEQ: return "QEQ";
  775. case ElemwiseMultiType::Mode::QPOW: return "QPOW";
  776. case ElemwiseMultiType::Mode::QLOG_SUM_EXP: return "QLOG_SUM_EXP";
  777. case ElemwiseMultiType::Mode::QFAST_TANH_GRAD: return "QFAST_TANH_GRAD";
  778. case ElemwiseMultiType::Mode::QATAN2: return "QATAN2";
  779. case ElemwiseMultiType::Mode::QCOND_LEQ_MOV: return "QCOND_LEQ_MOV";
  780. case ElemwiseMultiType::Mode::QH_SWISH: return "QH_SWISH";
  781. case ElemwiseMultiType::Mode::QFUSE_ADD_H_SWISH: return "QFUSE_ADD_H_SWISH";
  782. case ElemwiseMultiType::Mode::QH_SWISH_GRAD: return "QH_SWISH_GRAD";
  783. case ElemwiseMultiType::Mode::FUSE_MUL_ADD3_INT16xF32xF32xF32: return "FUSE_MUL_ADD3_INT16xF32xF32xF32";
  784. case ElemwiseMultiType::Mode::MUL_INT16xF32xF32: return "MUL_INT16xF32xF32";
  785. case ElemwiseMultiType::Mode::FUSE_MUL_ADD3_UINT8xF32xF32xF32: return "FUSE_MUL_ADD3_UINT8xF32xF32xF32";
  786. case ElemwiseMultiType::Mode::QCOND_LT_MOV: return "QCOND_LT_MOV";
  787. case ElemwiseMultiType::Mode::EQ: return "EQ";
  788. case ElemwiseMultiType::Mode::NEQ: return "NEQ";
  789. case ElemwiseMultiType::Mode::LT: return "LT";
  790. case ElemwiseMultiType::Mode::LEQ: return "LEQ";
  791. case ElemwiseMultiType::Mode::ISNAN: return "ISNAN";
  792. case ElemwiseMultiType::Mode::ISINF: return "ISINF";
  793. default:
  794. return "ElemwiseMultiType::Mode::Unknown";
  795. }
  796. }
  797. };
  798. class ExternOpr : public OpDefImplBase<ExternOpr> {
  799. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  800. public:
  801. std::vector<std::vector<size_t>> output_shapes;
  802. std::string name;
  803. std::string data;
  804. size_t data_len;
  805. std::vector<::megdnn::DType> output_dtypes;
  806. ExternOpr() = default;
  807. ExternOpr(std::vector<std::vector<size_t>> output_shapes_, std::string name_, std::string data_, size_t data_len_, std::vector<::megdnn::DType> output_dtypes_, std::string scope_ = {}): output_shapes(output_shapes_), name(name_), data(data_), data_len(data_len_), output_dtypes(output_dtypes_) { set_scope(scope_); }
  808. };
  809. class Eye : public OpDefImplBase<Eye> {
  810. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  811. public:
  812. int32_t k = 0;
  813. ::megdnn::DType dtype = megdnn::DType::from_enum(megdnn::DTypeEnum::Float32);
  814. ::mgb::CompNode comp_node;
  815. Eye() = default;
  816. Eye(int32_t k_, ::megdnn::DType dtype_, ::mgb::CompNode comp_node_, std::string scope_ = {}): k(k_), dtype(dtype_), comp_node(comp_node_) { set_scope(scope_); }
  817. };
  818. class FakeQuant : public OpDefImplBase<FakeQuant> {
  819. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  820. public:
  821. int32_t qmin = -2147483648;
  822. int32_t qmax = 2147483647;
  823. FakeQuant() = default;
  824. FakeQuant(int32_t qmin_, int32_t qmax_, std::string scope_ = {}): qmin(qmin_), qmax(qmax_) { set_scope(scope_); }
  825. FakeQuant(::megdnn::param::FakeQuant packed_param_0): qmin(packed_param_0.qmin), qmax(packed_param_0.qmax) {}
  826. ::megdnn::param::FakeQuant param() const {
  827. return {qmin, qmax};
  828. }
  829. };
  830. class FastpathCopy : public OpDefImplBase<FastpathCopy> {
  831. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  832. public:
  833. FastpathCopy() = default;
  834. };
  835. class GammaRNG : public OpDefImplBase<GammaRNG> {
  836. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  837. public:
  838. uint64_t seed = 0;
  839. size_t handle;
  840. GammaRNG() = default;
  841. GammaRNG(uint64_t seed_, size_t handle_, std::string scope_ = {}): seed(seed_), handle(handle_) { set_scope(scope_); }
  842. GammaRNG(::megdnn::param::GammaRNG packed_param_0, size_t handle_): seed(packed_param_0.seed), handle(handle_) {}
  843. ::megdnn::param::GammaRNG param() const {
  844. return {seed};
  845. }
  846. };
  847. class GaussianRNG : public OpDefImplBase<GaussianRNG> {
  848. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  849. public:
  850. uint64_t seed = 0;
  851. float mean = 0;
  852. float std = 1;
  853. ::megdnn::DType dtype = megdnn::DType::from_enum(megdnn::DTypeEnum::Float32);
  854. size_t handle;
  855. GaussianRNG() = default;
  856. GaussianRNG(uint64_t seed_, float mean_, float std_, ::megdnn::DType dtype_, size_t handle_, std::string scope_ = {}): seed(seed_), mean(mean_), std(std_), dtype(dtype_), handle(handle_) { set_scope(scope_); }
  857. };
  858. class GetVarShape : public OpDefImplBase<GetVarShape> {
  859. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  860. public:
  861. int32_t axis = ::megdnn::param::OptionalAxisV1::INVALID_AXIS;
  862. GetVarShape() = default;
  863. GetVarShape(int32_t axis_, std::string scope_ = {}): axis(axis_) { set_scope(scope_); }
  864. GetVarShape(::megdnn::param::OptionalAxisV1 packed_param_0): axis(packed_param_0.axis) {}
  865. ::megdnn::param::OptionalAxisV1 param() const {
  866. return {axis};
  867. }
  868. };
  869. class GroupLocal : public OpDefImplBase<GroupLocal> {
  870. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  871. public:
  872. using Mode = ::megdnn::param::Convolution::Mode;
  873. using Sparse = ::megdnn::param::Convolution::Sparse;
  874. using Format = ::megdnn::param::Convolution::Format;
  875. using ComputeMode = ::megdnn::param::Convolution::ComputeMode;
  876. Mode mode = ::megdnn::param::Convolution::Mode::CROSS_CORRELATION;
  877. uint32_t pad_h = 0;
  878. uint32_t pad_w = 0;
  879. uint32_t stride_h = 1;
  880. uint32_t stride_w = 1;
  881. uint32_t dilate_h = 1;
  882. uint32_t dilate_w = 1;
  883. Sparse sparse = ::megdnn::param::Convolution::Sparse::DENSE;
  884. Format format = ::megdnn::param::Convolution::Format::NCHW;
  885. ComputeMode compute_mode = ::megdnn::param::Convolution::ComputeMode::DEFAULT;
  886. GroupLocal() = default;
  887. GroupLocal(Mode mode_, uint32_t pad_h_, uint32_t pad_w_, uint32_t stride_h_, uint32_t stride_w_, uint32_t dilate_h_, uint32_t dilate_w_, Sparse sparse_, Format format_, ComputeMode compute_mode_, std::string scope_ = {}): mode(mode_), pad_h(pad_h_), pad_w(pad_w_), stride_h(stride_h_), stride_w(stride_w_), dilate_h(dilate_h_), dilate_w(dilate_w_), sparse(sparse_), format(format_), compute_mode(compute_mode_) { set_scope(scope_); }
  888. GroupLocal(::megdnn::param::Convolution packed_param_0): mode(packed_param_0.mode), pad_h(packed_param_0.pad_h), pad_w(packed_param_0.pad_w), stride_h(packed_param_0.stride_h), stride_w(packed_param_0.stride_w), dilate_h(packed_param_0.dilate_h), dilate_w(packed_param_0.dilate_w), sparse(packed_param_0.sparse), format(packed_param_0.format), compute_mode(packed_param_0.compute_mode) {}
  889. ::megdnn::param::Convolution param() const {
  890. return {mode, pad_h, pad_w, stride_h, stride_w, dilate_h, dilate_w, sparse, format, compute_mode};
  891. }
  892. };
  893. class Identity : public OpDefImplBase<Identity> {
  894. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  895. public:
  896. Identity() = default;
  897. };
  898. class Images2Neibs : public OpDefImplBase<Images2Neibs> {
  899. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  900. public:
  901. uint32_t pad_h = 0;
  902. uint32_t pad_w = 0;
  903. uint32_t stride_h = 1;
  904. uint32_t stride_w = 1;
  905. uint32_t dilate_h = 1;
  906. uint32_t dilate_w = 1;
  907. uint32_t window_h = 3;
  908. uint32_t window_w = 3;
  909. Images2Neibs() = default;
  910. Images2Neibs(uint32_t pad_h_, uint32_t pad_w_, uint32_t stride_h_, uint32_t stride_w_, uint32_t dilate_h_, uint32_t dilate_w_, uint32_t window_h_, uint32_t window_w_, std::string scope_ = {}): pad_h(pad_h_), pad_w(pad_w_), stride_h(stride_h_), stride_w(stride_w_), dilate_h(dilate_h_), dilate_w(dilate_w_), window_h(window_h_), window_w(window_w_) { set_scope(scope_); }
  911. Images2Neibs(::megdnn::param::Images2Neibs packed_param_0): pad_h(packed_param_0.pad_h), pad_w(packed_param_0.pad_w), stride_h(packed_param_0.stride_h), stride_w(packed_param_0.stride_w), dilate_h(packed_param_0.dilate_h), dilate_w(packed_param_0.dilate_w), window_h(packed_param_0.window_h), window_w(packed_param_0.window_w) {}
  912. ::megdnn::param::Images2Neibs param() const {
  913. return {pad_h, pad_w, stride_h, stride_w, dilate_h, dilate_w, window_h, window_w};
  914. }
  915. };
  916. class IncrMeshIndexing : public OpDefImplBase<IncrMeshIndexing> {
  917. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  918. public:
  919. std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items;
  920. IncrMeshIndexing() = default;
  921. IncrMeshIndexing(std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items_, std::string scope_ = {}): items(items_) { set_scope(scope_); }
  922. };
  923. class IncrSubtensor : public OpDefImplBase<IncrSubtensor> {
  924. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  925. public:
  926. std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items;
  927. IncrSubtensor() = default;
  928. IncrSubtensor(std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items_, std::string scope_ = {}): items(items_) { set_scope(scope_); }
  929. };
  930. class IndexingIncrMultiAxisVec : public OpDefImplBase<IndexingIncrMultiAxisVec> {
  931. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  932. public:
  933. std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items;
  934. IndexingIncrMultiAxisVec() = default;
  935. IndexingIncrMultiAxisVec(std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items_, std::string scope_ = {}): items(items_) { set_scope(scope_); }
  936. };
  937. class IndexingMultiAxisVec : public OpDefImplBase<IndexingMultiAxisVec> {
  938. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  939. public:
  940. std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items;
  941. IndexingMultiAxisVec() = default;
  942. IndexingMultiAxisVec(std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items_, std::string scope_ = {}): items(items_) { set_scope(scope_); }
  943. };
  944. class IndexingOneHot : public OpDefImplBase<IndexingOneHot> {
  945. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  946. public:
  947. int32_t axis = 0;
  948. int32_t ndim;
  949. IndexingOneHot() = default;
  950. IndexingOneHot(int32_t axis_, int32_t ndim_, std::string scope_ = {}): axis(axis_), ndim(ndim_) { set_scope(scope_); }
  951. IndexingOneHot(::megdnn::param::Axis packed_param_0, int32_t ndim_): axis(packed_param_0.axis), ndim(ndim_) {}
  952. ::megdnn::param::Axis param() const {
  953. return {axis};
  954. }
  955. };
  956. class IndexingSetMultiAxisVec : public OpDefImplBase<IndexingSetMultiAxisVec> {
  957. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  958. public:
  959. std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items;
  960. IndexingSetMultiAxisVec() = default;
  961. IndexingSetMultiAxisVec(std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items_, std::string scope_ = {}): items(items_) { set_scope(scope_); }
  962. };
  963. class IndexingSetOneHot : public OpDefImplBase<IndexingSetOneHot> {
  964. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  965. public:
  966. int32_t axis = 0;
  967. int32_t ndim;
  968. IndexingSetOneHot() = default;
  969. IndexingSetOneHot(int32_t axis_, int32_t ndim_, std::string scope_ = {}): axis(axis_), ndim(ndim_) { set_scope(scope_); }
  970. IndexingSetOneHot(::megdnn::param::Axis packed_param_0, int32_t ndim_): axis(packed_param_0.axis), ndim(ndim_) {}
  971. ::megdnn::param::Axis param() const {
  972. return {axis};
  973. }
  974. };
  975. class InplaceAdd : public OpDefImplBase<InplaceAdd> {
  976. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  977. public:
  978. InplaceAdd() = default;
  979. InplaceAdd(::megdnn::param::Empty) {}
  980. ::megdnn::param::Empty param() const {
  981. return {};
  982. }
  983. };
  984. class LAMBUpdate : public OpDefImplBase<LAMBUpdate> {
  985. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  986. public:
  987. float beta_1 = 1.f;
  988. float beta_2 = 1.f;
  989. float step = 1.f;
  990. float lr = 1.f;
  991. float weight_decay = 1.f;
  992. float eps = 1.f;
  993. bool bias_correction = true;
  994. bool always_adapt = false;
  995. LAMBUpdate() = default;
  996. LAMBUpdate(float beta_1_, float beta_2_, float step_, float lr_, float weight_decay_, float eps_, bool bias_correction_, bool always_adapt_, std::string scope_ = {}): beta_1(beta_1_), beta_2(beta_2_), step(step_), lr(lr_), weight_decay(weight_decay_), eps(eps_), bias_correction(bias_correction_), always_adapt(always_adapt_) { set_scope(scope_); }
  997. LAMBUpdate(::megdnn::param::LAMBUpdate packed_param_0): beta_1(packed_param_0.beta_1), beta_2(packed_param_0.beta_2), step(packed_param_0.step), lr(packed_param_0.lr), weight_decay(packed_param_0.weight_decay), eps(packed_param_0.eps), bias_correction(packed_param_0.bias_correction), always_adapt(packed_param_0.always_adapt) {}
  998. ::megdnn::param::LAMBUpdate param() const {
  999. return {beta_1, beta_2, step, lr, weight_decay, eps, bias_correction, always_adapt};
  1000. }
  1001. };
  1002. class LRN : public OpDefImplBase<LRN> {
  1003. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1004. public:
  1005. uint32_t n = 5;
  1006. float k = 2.f;
  1007. float alpha = 1e-4f;
  1008. float beta = 0.75f;
  1009. LRN() = default;
  1010. LRN(uint32_t n_, float k_, float alpha_, float beta_, std::string scope_ = {}): n(n_), k(k_), alpha(alpha_), beta(beta_) { set_scope(scope_); }
  1011. LRN(::megdnn::param::LRN packed_param_0): n(packed_param_0.n), k(packed_param_0.k), alpha(packed_param_0.alpha), beta(packed_param_0.beta) {}
  1012. ::megdnn::param::LRN param() const {
  1013. return {n, k, alpha, beta};
  1014. }
  1015. };
  1016. class LSQ : public OpDefImplBase<LSQ> {
  1017. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1018. public:
  1019. int32_t qmin = -2147483648;
  1020. int32_t qmax = 2147483647;
  1021. LSQ() = default;
  1022. LSQ(int32_t qmin_, int32_t qmax_, std::string scope_ = {}): qmin(qmin_), qmax(qmax_) { set_scope(scope_); }
  1023. LSQ(::megdnn::param::LSQ packed_param_0): qmin(packed_param_0.qmin), qmax(packed_param_0.qmax) {}
  1024. ::megdnn::param::LSQ param() const {
  1025. return {qmin, qmax};
  1026. }
  1027. };
  1028. class LSTM : public OpDefImplBase<LSTM> {
  1029. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1030. public:
  1031. using FwdMode = ::megdnn::param::LSTM::FwdMode;
  1032. uint32_t num_layers = 1;
  1033. bool bidirectional = false;
  1034. bool bias = true;
  1035. uint32_t hidden_size = 128;
  1036. uint32_t proj_size = 0;
  1037. float dropout = 0.f;
  1038. FwdMode fwd_mode = ::megdnn::param::LSTM::FwdMode::TRAINING;
  1039. LSTM() = default;
  1040. LSTM(uint32_t num_layers_, bool bidirectional_, bool bias_, uint32_t hidden_size_, uint32_t proj_size_, float dropout_, FwdMode fwd_mode_, std::string scope_ = {}): num_layers(num_layers_), bidirectional(bidirectional_), bias(bias_), hidden_size(hidden_size_), proj_size(proj_size_), dropout(dropout_), fwd_mode(fwd_mode_) { set_scope(scope_); }
  1041. LSTM(::megdnn::param::LSTM packed_param_0): num_layers(packed_param_0.num_layers), bidirectional(packed_param_0.bidirectional), bias(packed_param_0.bias), hidden_size(packed_param_0.hidden_size), proj_size(packed_param_0.proj_size), dropout(packed_param_0.dropout), fwd_mode(packed_param_0.fwd_mode) {}
  1042. ::megdnn::param::LSTM param() const {
  1043. return {num_layers, bidirectional, bias, hidden_size, proj_size, dropout, fwd_mode};
  1044. }
  1045. };
  1046. class LSTMCell : public OpDefImplBase<LSTMCell> {
  1047. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1048. public:
  1049. LSTMCell() = default;
  1050. LSTMCell(::megdnn::param::Empty) {}
  1051. ::megdnn::param::Empty param() const {
  1052. return {};
  1053. }
  1054. };
  1055. class LayerNorm : public OpDefImplBase<LayerNorm> {
  1056. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1057. public:
  1058. bool affine = true;
  1059. float eps = 1e-5f;
  1060. uint64_t normalized_dim = 1;
  1061. uint64_t normalized_size = 1;
  1062. LayerNorm() = default;
  1063. LayerNorm(bool affine_, float eps_, uint64_t normalized_dim_, uint64_t normalized_size_, std::string scope_ = {}): affine(affine_), eps(eps_), normalized_dim(normalized_dim_), normalized_size(normalized_size_) { set_scope(scope_); }
  1064. LayerNorm(::megdnn::param::LayerNorm packed_param_0): affine(packed_param_0.affine), eps(packed_param_0.eps), normalized_dim(packed_param_0.normalized_dim), normalized_size(packed_param_0.normalized_size) {}
  1065. ::megdnn::param::LayerNorm param() const {
  1066. return {affine, eps, normalized_dim, normalized_size};
  1067. }
  1068. };
  1069. class Linspace : public OpDefImplBase<Linspace> {
  1070. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1071. public:
  1072. bool endpoint = true;
  1073. ::mgb::CompNode comp_node;
  1074. Linspace() = default;
  1075. Linspace(bool endpoint_, ::mgb::CompNode comp_node_, std::string scope_ = {}): endpoint(endpoint_), comp_node(comp_node_) { set_scope(scope_); }
  1076. Linspace(::megdnn::param::Linspace packed_param_0, ::mgb::CompNode comp_node_): endpoint(packed_param_0.endpoint), comp_node(comp_node_) {}
  1077. ::megdnn::param::Linspace param() const {
  1078. return {endpoint};
  1079. }
  1080. };
  1081. class MagicMindRuntime : public OpDefImplBase<MagicMindRuntime> {
  1082. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1083. public:
  1084. std::string buf;
  1085. size_t buf_size;
  1086. MagicMindRuntime() = default;
  1087. MagicMindRuntime(std::string buf_, size_t buf_size_, std::string scope_ = {}): buf(buf_), buf_size(buf_size_) { set_scope(scope_); }
  1088. };
  1089. class MatrixInverse : public OpDefImplBase<MatrixInverse> {
  1090. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1091. public:
  1092. MatrixInverse() = default;
  1093. MatrixInverse(::megdnn::param::Empty) {}
  1094. ::megdnn::param::Empty param() const {
  1095. return {};
  1096. }
  1097. };
  1098. class MatrixMul : public OpDefImplBase<MatrixMul> {
  1099. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1100. public:
  1101. using ComputeMode = ::megdnn::param::MatrixMul::ComputeMode;
  1102. using Format = ::megdnn::param::MatrixMul::Format;
  1103. using Strategy = ::megdnn::param::ExecutionPolicy::Strategy;
  1104. bool transposeA = false;
  1105. bool transposeB = false;
  1106. ComputeMode compute_mode = ::megdnn::param::MatrixMul::ComputeMode::DEFAULT;
  1107. Format format = ::megdnn::param::MatrixMul::Format::DEFAULT;
  1108. Strategy strategy = static_cast<::megdnn::param::ExecutionPolicy::Strategy>(1);
  1109. uint64_t workspace_limit = 18446744073709551615ull;
  1110. uint32_t dimA;
  1111. uint32_t dimB;
  1112. MatrixMul() = default;
  1113. MatrixMul(bool transposeA_, bool transposeB_, ComputeMode compute_mode_, Format format_, Strategy strategy_, uint64_t workspace_limit_, uint32_t dimA_, uint32_t dimB_, std::string scope_ = {}): transposeA(transposeA_), transposeB(transposeB_), compute_mode(compute_mode_), format(format_), strategy(strategy_), workspace_limit(workspace_limit_), dimA(dimA_), dimB(dimB_) {
  1114. set_scope(scope_);
  1115. mgb_assert(static_cast<uint32_t>(strategy) <= uint32_t(8));
  1116. }
  1117. MatrixMul(::megdnn::param::MatrixMul packed_param_0, ::megdnn::param::ExecutionPolicy packed_param_1, uint32_t dimA_, uint32_t dimB_): transposeA(packed_param_0.transposeA), transposeB(packed_param_0.transposeB), compute_mode(packed_param_0.compute_mode), format(packed_param_0.format), strategy(packed_param_1.strategy), workspace_limit(packed_param_1.workspace_limit), dimA(dimA_), dimB(dimB_) {
  1118. mgb_assert(static_cast<uint32_t>(strategy) <= uint32_t(8));
  1119. }
  1120. ::megdnn::param::MatrixMul param() const {
  1121. return {transposeA, transposeB, compute_mode, format};
  1122. }
  1123. ::megdnn::param::ExecutionPolicy policy() const {
  1124. return {strategy, workspace_limit};
  1125. }
  1126. };
  1127. class MeshIndexing : public OpDefImplBase<MeshIndexing> {
  1128. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1129. public:
  1130. std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items;
  1131. MeshIndexing() = default;
  1132. MeshIndexing(std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items_, std::string scope_ = {}): items(items_) { set_scope(scope_); }
  1133. };
  1134. class NMSKeep : public OpDefImplBase<NMSKeep> {
  1135. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1136. public:
  1137. float iou_thresh;
  1138. uint32_t max_output;
  1139. NMSKeep() = default;
  1140. NMSKeep(float iou_thresh_, uint32_t max_output_, std::string scope_ = {}): iou_thresh(iou_thresh_), max_output(max_output_) { set_scope(scope_); }
  1141. };
  1142. class NvOf : public OpDefImplBase<NvOf> {
  1143. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1144. public:
  1145. uint32_t precision = 1;
  1146. NvOf() = default;
  1147. NvOf(uint32_t precision_, std::string scope_ = {}): precision(precision_) { set_scope(scope_); }
  1148. NvOf(::megdnn::param::NvOf packed_param_0): precision(packed_param_0.precision) {}
  1149. ::megdnn::param::NvOf param() const {
  1150. return {precision};
  1151. }
  1152. };
  1153. class Padding : public OpDefImplBase<Padding> {
  1154. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1155. public:
  1156. using PaddingMode = ::megdnn::param::Padding::PaddingMode;
  1157. uint32_t front_offset_dim0 = 0;
  1158. uint32_t front_offset_dim1 = 0;
  1159. uint32_t front_offset_dim2 = 0;
  1160. uint32_t front_offset_dim3 = 0;
  1161. uint32_t front_offset_dim4 = 0;
  1162. uint32_t front_offset_dim5 = 0;
  1163. uint32_t front_offset_dim6 = 0;
  1164. uint32_t back_offset_dim0 = 0;
  1165. uint32_t back_offset_dim1 = 0;
  1166. uint32_t back_offset_dim2 = 0;
  1167. uint32_t back_offset_dim3 = 0;
  1168. uint32_t back_offset_dim4 = 0;
  1169. uint32_t back_offset_dim5 = 0;
  1170. uint32_t back_offset_dim6 = 0;
  1171. float padding_val = 0;
  1172. PaddingMode padding_mode = ::megdnn::param::Padding::PaddingMode::CONSTANT;
  1173. Padding() = default;
  1174. Padding(uint32_t front_offset_dim0_, uint32_t front_offset_dim1_, uint32_t front_offset_dim2_, uint32_t front_offset_dim3_, uint32_t front_offset_dim4_, uint32_t front_offset_dim5_, uint32_t front_offset_dim6_, uint32_t back_offset_dim0_, uint32_t back_offset_dim1_, uint32_t back_offset_dim2_, uint32_t back_offset_dim3_, uint32_t back_offset_dim4_, uint32_t back_offset_dim5_, uint32_t back_offset_dim6_, float padding_val_, PaddingMode padding_mode_, std::string scope_ = {}): front_offset_dim0(front_offset_dim0_), front_offset_dim1(front_offset_dim1_), front_offset_dim2(front_offset_dim2_), front_offset_dim3(front_offset_dim3_), front_offset_dim4(front_offset_dim4_), front_offset_dim5(front_offset_dim5_), front_offset_dim6(front_offset_dim6_), back_offset_dim0(back_offset_dim0_), back_offset_dim1(back_offset_dim1_), back_offset_dim2(back_offset_dim2_), back_offset_dim3(back_offset_dim3_), back_offset_dim4(back_offset_dim4_), back_offset_dim5(back_offset_dim5_), back_offset_dim6(back_offset_dim6_), padding_val(padding_val_), padding_mode(padding_mode_) { set_scope(scope_); }
  1175. Padding(::megdnn::param::Padding packed_param_0): front_offset_dim0(packed_param_0.front_offset_dim0), front_offset_dim1(packed_param_0.front_offset_dim1), front_offset_dim2(packed_param_0.front_offset_dim2), front_offset_dim3(packed_param_0.front_offset_dim3), front_offset_dim4(packed_param_0.front_offset_dim4), front_offset_dim5(packed_param_0.front_offset_dim5), front_offset_dim6(packed_param_0.front_offset_dim6), back_offset_dim0(packed_param_0.back_offset_dim0), back_offset_dim1(packed_param_0.back_offset_dim1), back_offset_dim2(packed_param_0.back_offset_dim2), back_offset_dim3(packed_param_0.back_offset_dim3), back_offset_dim4(packed_param_0.back_offset_dim4), back_offset_dim5(packed_param_0.back_offset_dim5), back_offset_dim6(packed_param_0.back_offset_dim6), padding_val(packed_param_0.padding_val), padding_mode(packed_param_0.padding_mode) {}
  1176. ::megdnn::param::Padding param() const {
  1177. return {front_offset_dim0, front_offset_dim1, front_offset_dim2, front_offset_dim3, front_offset_dim4, front_offset_dim5, front_offset_dim6, back_offset_dim0, back_offset_dim1, back_offset_dim2, back_offset_dim3, back_offset_dim4, back_offset_dim5, back_offset_dim6, padding_val, padding_mode};
  1178. }
  1179. };
  1180. class ParamPackConcat : public OpDefImplBase<ParamPackConcat> {
  1181. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1182. public:
  1183. std::vector<int32_t> offsets;
  1184. ParamPackConcat() = default;
  1185. ParamPackConcat(std::vector<int32_t> offsets_, std::string scope_ = {}): offsets(offsets_) { set_scope(scope_); }
  1186. };
  1187. class ParamPackSplit : public OpDefImplBase<ParamPackSplit> {
  1188. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1189. public:
  1190. std::vector<int32_t> offsets;
  1191. std::vector<std::vector<size_t>> shapes;
  1192. ParamPackSplit() = default;
  1193. ParamPackSplit(std::vector<int32_t> offsets_, std::vector<std::vector<size_t>> shapes_, std::string scope_ = {}): offsets(offsets_), shapes(shapes_) { set_scope(scope_); }
  1194. };
  1195. class PermutationRNG : public OpDefImplBase<PermutationRNG> {
  1196. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1197. public:
  1198. uint64_t seed = 0;
  1199. ::megdnn::DType dtype = megdnn::DType::from_enum(megdnn::DTypeEnum::Int32);
  1200. size_t handle;
  1201. PermutationRNG() = default;
  1202. PermutationRNG(uint64_t seed_, ::megdnn::DType dtype_, size_t handle_, std::string scope_ = {}): seed(seed_), dtype(dtype_), handle(handle_) { set_scope(scope_); }
  1203. };
  1204. class PixelShuffle : public OpDefImplBase<PixelShuffle> {
  1205. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1206. public:
  1207. int32_t factor;
  1208. PixelShuffle() = default;
  1209. PixelShuffle(int32_t factor_, std::string scope_ = {}): factor(factor_) { set_scope(scope_); }
  1210. };
  1211. class PixelShuffleBackward : public OpDefImplBase<PixelShuffleBackward> {
  1212. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1213. public:
  1214. int32_t factor;
  1215. PixelShuffleBackward() = default;
  1216. PixelShuffleBackward(int32_t factor_, std::string scope_ = {}): factor(factor_) { set_scope(scope_); }
  1217. };
  1218. class PoissonRNG : public OpDefImplBase<PoissonRNG> {
  1219. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1220. public:
  1221. uint64_t seed = 0;
  1222. size_t handle;
  1223. PoissonRNG() = default;
  1224. PoissonRNG(uint64_t seed_, size_t handle_, std::string scope_ = {}): seed(seed_), handle(handle_) { set_scope(scope_); }
  1225. PoissonRNG(::megdnn::param::PoissonRNG packed_param_0, size_t handle_): seed(packed_param_0.seed), handle(handle_) {}
  1226. ::megdnn::param::PoissonRNG param() const {
  1227. return {seed};
  1228. }
  1229. };
  1230. class Pooling : public OpDefImplBase<Pooling> {
  1231. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1232. public:
  1233. using Mode = ::megdnn::param::Pooling::Mode;
  1234. using Format = ::megdnn::param::Pooling::Format;
  1235. using Strategy = ::megdnn::param::ExecutionPolicy::Strategy;
  1236. Mode mode = ::megdnn::param::Pooling::Mode::MAX;
  1237. uint32_t pad_h = 0;
  1238. uint32_t pad_w = 0;
  1239. uint32_t stride_h = 2;
  1240. uint32_t stride_w = 2;
  1241. uint32_t window_h = 2;
  1242. uint32_t window_w = 2;
  1243. Format format = ::megdnn::param::Pooling::Format::NCHW;
  1244. Strategy strategy = static_cast<::megdnn::param::ExecutionPolicy::Strategy>(1);
  1245. uint64_t workspace_limit = 18446744073709551615ull;
  1246. Pooling() = default;
  1247. Pooling(Mode mode_, uint32_t pad_h_, uint32_t pad_w_, uint32_t stride_h_, uint32_t stride_w_, uint32_t window_h_, uint32_t window_w_, Format format_, Strategy strategy_, uint64_t workspace_limit_, std::string scope_ = {}): mode(mode_), pad_h(pad_h_), pad_w(pad_w_), stride_h(stride_h_), stride_w(stride_w_), window_h(window_h_), window_w(window_w_), format(format_), strategy(strategy_), workspace_limit(workspace_limit_) {
  1248. set_scope(scope_);
  1249. mgb_assert(static_cast<uint32_t>(strategy) <= uint32_t(8));
  1250. }
  1251. Pooling(::megdnn::param::Pooling packed_param_0, ::megdnn::param::ExecutionPolicy packed_param_1): mode(packed_param_0.mode), pad_h(packed_param_0.pad_h), pad_w(packed_param_0.pad_w), stride_h(packed_param_0.stride_h), stride_w(packed_param_0.stride_w), window_h(packed_param_0.window_h), window_w(packed_param_0.window_w), format(packed_param_0.format), strategy(packed_param_1.strategy), workspace_limit(packed_param_1.workspace_limit) {
  1252. mgb_assert(static_cast<uint32_t>(strategy) <= uint32_t(8));
  1253. }
  1254. ::megdnn::param::Pooling param() const {
  1255. return {mode, pad_h, pad_w, stride_h, stride_w, window_h, window_w, format};
  1256. }
  1257. ::megdnn::param::ExecutionPolicy policy() const {
  1258. return {strategy, workspace_limit};
  1259. }
  1260. };
  1261. class RNN : public OpDefImplBase<RNN> {
  1262. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1263. public:
  1264. using NonlineMode = ::megdnn::param::RNN::NonlineMode;
  1265. using FwdMode = ::megdnn::param::RNN::FwdMode;
  1266. uint32_t num_layers = 1;
  1267. bool bidirectional = false;
  1268. bool bias = true;
  1269. uint32_t hidden_size = 128;
  1270. float dropout = 0.f;
  1271. NonlineMode nonlineMode = ::megdnn::param::RNN::NonlineMode::IDENTITY;
  1272. FwdMode fwd_mode = ::megdnn::param::RNN::FwdMode::TRAINING;
  1273. RNN() = default;
  1274. RNN(uint32_t num_layers_, bool bidirectional_, bool bias_, uint32_t hidden_size_, float dropout_, NonlineMode nonlineMode_, FwdMode fwd_mode_, std::string scope_ = {}): num_layers(num_layers_), bidirectional(bidirectional_), bias(bias_), hidden_size(hidden_size_), dropout(dropout_), nonlineMode(nonlineMode_), fwd_mode(fwd_mode_) { set_scope(scope_); }
  1275. RNN(::megdnn::param::RNN packed_param_0): num_layers(packed_param_0.num_layers), bidirectional(packed_param_0.bidirectional), bias(packed_param_0.bias), hidden_size(packed_param_0.hidden_size), dropout(packed_param_0.dropout), nonlineMode(packed_param_0.nonlineMode), fwd_mode(packed_param_0.fwd_mode) {}
  1276. ::megdnn::param::RNN param() const {
  1277. return {num_layers, bidirectional, bias, hidden_size, dropout, nonlineMode, fwd_mode};
  1278. }
  1279. };
  1280. class RNNCell : public OpDefImplBase<RNNCell> {
  1281. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1282. public:
  1283. using NonlineMode = ::megdnn::param::RNNCell::NonlineMode;
  1284. NonlineMode nonlineMode = ::megdnn::param::RNNCell::NonlineMode::IDENTITY;
  1285. RNNCell() = default;
  1286. RNNCell(NonlineMode nonlineMode_, std::string scope_ = {}): nonlineMode(nonlineMode_) { set_scope(scope_); }
  1287. RNNCell(::megdnn::param::RNNCell packed_param_0): nonlineMode(packed_param_0.nonlineMode) {}
  1288. ::megdnn::param::RNNCell param() const {
  1289. return {nonlineMode};
  1290. }
  1291. };
  1292. class ROIAlign : public OpDefImplBase<ROIAlign> {
  1293. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1294. public:
  1295. using Mode = ::megdnn::param::ROIAlign::Mode;
  1296. using Format = ::megdnn::param::ROIAlign::Format;
  1297. Mode mode = ::megdnn::param::ROIAlign::Mode::MAX;
  1298. Format format = ::megdnn::param::ROIAlign::Format::NCHW;
  1299. float spatial_scale = 1.0;
  1300. float offset = 0.0;
  1301. uint32_t pooled_height = 1;
  1302. uint32_t pooled_width = 1;
  1303. uint32_t sample_height = 2;
  1304. uint32_t sample_width = 2;
  1305. ROIAlign() = default;
  1306. ROIAlign(Mode mode_, Format format_, float spatial_scale_, float offset_, uint32_t pooled_height_, uint32_t pooled_width_, uint32_t sample_height_, uint32_t sample_width_, std::string scope_ = {}): mode(mode_), format(format_), spatial_scale(spatial_scale_), offset(offset_), pooled_height(pooled_height_), pooled_width(pooled_width_), sample_height(sample_height_), sample_width(sample_width_) { set_scope(scope_); }
  1307. ROIAlign(::megdnn::param::ROIAlign packed_param_0): mode(packed_param_0.mode), format(packed_param_0.format), spatial_scale(packed_param_0.spatial_scale), offset(packed_param_0.offset), pooled_height(packed_param_0.pooled_height), pooled_width(packed_param_0.pooled_width), sample_height(packed_param_0.sample_height), sample_width(packed_param_0.sample_width) {}
  1308. ::megdnn::param::ROIAlign param() const {
  1309. return {mode, format, spatial_scale, offset, pooled_height, pooled_width, sample_height, sample_width};
  1310. }
  1311. };
  1312. class ROIPooling : public OpDefImplBase<ROIPooling> {
  1313. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1314. public:
  1315. using Mode = ::megdnn::param::ROIPooling::Mode;
  1316. Mode mode = ::megdnn::param::ROIPooling::Mode::MAX;
  1317. float scale = 1.f;
  1318. ROIPooling() = default;
  1319. ROIPooling(Mode mode_, float scale_, std::string scope_ = {}): mode(mode_), scale(scale_) { set_scope(scope_); }
  1320. ROIPooling(::megdnn::param::ROIPooling packed_param_0): mode(packed_param_0.mode), scale(packed_param_0.scale) {}
  1321. ::megdnn::param::ROIPooling param() const {
  1322. return {mode, scale};
  1323. }
  1324. };
  1325. class Reduce : public OpDefImplBase<Reduce> {
  1326. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1327. public:
  1328. using Mode = ::megdnn::param::Reduce::Mode;
  1329. using DataType = ::megdnn::param::Reduce::DataType;
  1330. Mode mode = ::megdnn::param::Reduce::Mode::SUM;
  1331. int32_t axis = 2147483647;
  1332. DataType data_type = ::megdnn::param::Reduce::DataType::DEFAULT;
  1333. bool keepdim = true;
  1334. Reduce() = default;
  1335. Reduce(Mode mode_, int32_t axis_, DataType data_type_, bool keepdim_, std::string scope_ = {}): mode(mode_), axis(axis_), data_type(data_type_), keepdim(keepdim_) { set_scope(scope_); }
  1336. Reduce(::megdnn::param::Reduce packed_param_0, bool keepdim_): mode(packed_param_0.mode), axis(packed_param_0.axis), data_type(packed_param_0.data_type), keepdim(keepdim_) {}
  1337. ::megdnn::param::Reduce param() const {
  1338. return {mode, axis, data_type};
  1339. }
  1340. };
  1341. class Remap : public OpDefImplBase<Remap> {
  1342. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1343. public:
  1344. using InterpolationMode = ::megdnn::param::Remap::InterpolationMode;
  1345. using BorderMode = ::megdnn::param::Remap::BorderMode;
  1346. using Format = ::megdnn::param::Remap::Format;
  1347. InterpolationMode imode = ::megdnn::param::Remap::InterpolationMode::LINEAR;
  1348. BorderMode border_type = ::megdnn::param::Remap::BorderMode::REPLICATE;
  1349. Format format = ::megdnn::param::Remap::Format::NHWC;
  1350. float scalar = 0.f;
  1351. Remap() = default;
  1352. Remap(InterpolationMode imode_, BorderMode border_type_, Format format_, float scalar_, std::string scope_ = {}): imode(imode_), border_type(border_type_), format(format_), scalar(scalar_) { set_scope(scope_); }
  1353. Remap(::megdnn::param::Remap packed_param_0): imode(packed_param_0.imode), border_type(packed_param_0.border_type), format(packed_param_0.format), scalar(packed_param_0.scalar) {}
  1354. ::megdnn::param::Remap param() const {
  1355. return {imode, border_type, format, scalar};
  1356. }
  1357. };
  1358. class RemoteRecv : public OpDefImplBase<RemoteRecv> {
  1359. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1360. public:
  1361. std::string key;
  1362. std::string addr;
  1363. uint32_t port;
  1364. uint32_t rank_from;
  1365. ::mgb::CompNode cn;
  1366. std::vector<int32_t> shape;
  1367. ::megdnn::DType dtype;
  1368. std::string backend;
  1369. RemoteRecv() = default;
  1370. RemoteRecv(std::string key_, std::string addr_, uint32_t port_, uint32_t rank_from_, ::mgb::CompNode cn_, std::vector<int32_t> shape_, ::megdnn::DType dtype_, std::string backend_, std::string scope_ = {}): key(key_), addr(addr_), port(port_), rank_from(rank_from_), cn(cn_), shape(shape_), dtype(dtype_), backend(backend_) { set_scope(scope_); }
  1371. };
  1372. class RemoteSend : public OpDefImplBase<RemoteSend> {
  1373. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1374. public:
  1375. std::string key;
  1376. std::string addr;
  1377. uint32_t port;
  1378. uint32_t rank_to;
  1379. std::string backend;
  1380. RemoteSend() = default;
  1381. RemoteSend(std::string key_, std::string addr_, uint32_t port_, uint32_t rank_to_, std::string backend_, std::string scope_ = {}): key(key_), addr(addr_), port(port_), rank_to(rank_to_), backend(backend_) { set_scope(scope_); }
  1382. };
  1383. class RemoveAxis : public OpDefImplBase<RemoveAxis> {
  1384. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1385. public:
  1386. std::vector<int32_t> axis;
  1387. RemoveAxis() = default;
  1388. RemoveAxis(std::vector<int32_t> axis_, std::string scope_ = {}): axis(axis_) { set_scope(scope_); }
  1389. };
  1390. class Reshape : public OpDefImplBase<Reshape> {
  1391. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1392. public:
  1393. int32_t axis = ::megdnn::param::OptionalAxisV1::INVALID_AXIS;
  1394. std::vector<int32_t> shape;
  1395. Reshape() = default;
  1396. Reshape(int32_t axis_, std::vector<int32_t> shape_, std::string scope_ = {}): axis(axis_), shape(shape_) { set_scope(scope_); }
  1397. Reshape(::megdnn::param::OptionalAxisV1 packed_param_0, std::vector<int32_t> shape_): axis(packed_param_0.axis), shape(shape_) {}
  1398. ::megdnn::param::OptionalAxisV1 param() const {
  1399. return {axis};
  1400. }
  1401. };
  1402. class Resize : public OpDefImplBase<Resize> {
  1403. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1404. public:
  1405. using InterpolationMode = ::megdnn::param::Resize::InterpolationMode;
  1406. using Format = ::megdnn::param::Resize::Format;
  1407. InterpolationMode imode = ::megdnn::param::Resize::InterpolationMode::LINEAR;
  1408. Format format = ::megdnn::param::Resize::Format::NHWC;
  1409. Resize() = default;
  1410. Resize(InterpolationMode imode_, Format format_, std::string scope_ = {}): imode(imode_), format(format_) { set_scope(scope_); }
  1411. Resize(::megdnn::param::Resize packed_param_0): imode(packed_param_0.imode), format(packed_param_0.format) {}
  1412. ::megdnn::param::Resize param() const {
  1413. return {imode, format};
  1414. }
  1415. };
  1416. class SVD : public OpDefImplBase<SVD> {
  1417. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1418. public:
  1419. bool full_matrices = false;
  1420. bool compute_uv = true;
  1421. SVD() = default;
  1422. SVD(bool full_matrices_, bool compute_uv_, std::string scope_ = {}): full_matrices(full_matrices_), compute_uv(compute_uv_) { set_scope(scope_); }
  1423. SVD(::megdnn::param::SVD packed_param_0): full_matrices(packed_param_0.full_matrices), compute_uv(packed_param_0.compute_uv) {}
  1424. ::megdnn::param::SVD param() const {
  1425. return {full_matrices, compute_uv};
  1426. }
  1427. };
  1428. class SetMeshIndexing : public OpDefImplBase<SetMeshIndexing> {
  1429. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1430. public:
  1431. std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items;
  1432. SetMeshIndexing() = default;
  1433. SetMeshIndexing(std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items_, std::string scope_ = {}): items(items_) { set_scope(scope_); }
  1434. };
  1435. class SetSubtensor : public OpDefImplBase<SetSubtensor> {
  1436. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1437. public:
  1438. std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items;
  1439. SetSubtensor() = default;
  1440. SetSubtensor(std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items_, std::string scope_ = {}): items(items_) { set_scope(scope_); }
  1441. };
  1442. class ShuffleRNG : public OpDefImplBase<ShuffleRNG> {
  1443. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1444. public:
  1445. uint64_t seed = 0;
  1446. size_t handle;
  1447. ShuffleRNG() = default;
  1448. ShuffleRNG(uint64_t seed_, size_t handle_, std::string scope_ = {}): seed(seed_), handle(handle_) { set_scope(scope_); }
  1449. ShuffleRNG(::megdnn::param::ShuffleRNG packed_param_0, size_t handle_): seed(packed_param_0.seed), handle(handle_) {}
  1450. ::megdnn::param::ShuffleRNG param() const {
  1451. return {seed};
  1452. }
  1453. };
  1454. class SlidingWindowTranspose : public OpDefImplBase<SlidingWindowTranspose> {
  1455. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1456. public:
  1457. uint32_t out_h = 0;
  1458. uint32_t out_w = 0;
  1459. uint32_t pad_h = 0;
  1460. uint32_t pad_w = 0;
  1461. uint32_t stride_h = 1;
  1462. uint32_t stride_w = 1;
  1463. uint32_t dilate_h = 1;
  1464. uint32_t dilate_w = 1;
  1465. uint32_t window_h = 3;
  1466. uint32_t window_w = 3;
  1467. SlidingWindowTranspose() = default;
  1468. SlidingWindowTranspose(uint32_t out_h_, uint32_t out_w_, uint32_t pad_h_, uint32_t pad_w_, uint32_t stride_h_, uint32_t stride_w_, uint32_t dilate_h_, uint32_t dilate_w_, uint32_t window_h_, uint32_t window_w_, std::string scope_ = {}): out_h(out_h_), out_w(out_w_), pad_h(pad_h_), pad_w(pad_w_), stride_h(stride_h_), stride_w(stride_w_), dilate_h(dilate_h_), dilate_w(dilate_w_), window_h(window_h_), window_w(window_w_) { set_scope(scope_); }
  1469. SlidingWindowTranspose(::megdnn::param::SlidingWindowTranspose packed_param_0): out_h(packed_param_0.out_h), out_w(packed_param_0.out_w), pad_h(packed_param_0.pad_h), pad_w(packed_param_0.pad_w), stride_h(packed_param_0.stride_h), stride_w(packed_param_0.stride_w), dilate_h(packed_param_0.dilate_h), dilate_w(packed_param_0.dilate_w), window_h(packed_param_0.window_h), window_w(packed_param_0.window_w) {}
  1470. ::megdnn::param::SlidingWindowTranspose param() const {
  1471. return {out_h, out_w, pad_h, pad_w, stride_h, stride_w, dilate_h, dilate_w, window_h, window_w};
  1472. }
  1473. };
  1474. class Softmax : public OpDefImplBase<Softmax> {
  1475. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1476. public:
  1477. int32_t axis = -1;
  1478. Softmax() = default;
  1479. Softmax(int32_t axis_, std::string scope_ = {}): axis(axis_) { set_scope(scope_); }
  1480. Softmax(::megdnn::param::Softmax packed_param_0): axis(packed_param_0.axis) {}
  1481. ::megdnn::param::Softmax param() const {
  1482. return {axis};
  1483. }
  1484. };
  1485. class Split : public OpDefImplBase<Split> {
  1486. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1487. public:
  1488. int32_t axis;
  1489. int32_t nsections;
  1490. Split() = default;
  1491. Split(int32_t axis_, int32_t nsections_, std::string scope_ = {}): axis(axis_), nsections(nsections_) { set_scope(scope_); }
  1492. Split(::megdnn::param::Empty, int32_t axis_, int32_t nsections_): axis(axis_), nsections(nsections_) {}
  1493. ::megdnn::param::Empty param() const {
  1494. return {};
  1495. }
  1496. };
  1497. class Subtensor : public OpDefImplBase<Subtensor> {
  1498. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1499. public:
  1500. std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items;
  1501. Subtensor() = default;
  1502. Subtensor(std::vector<std::tuple<int8_t, bool, bool, bool, bool>> items_, std::string scope_ = {}): items(items_) { set_scope(scope_); }
  1503. };
  1504. class TQT : public OpDefImplBase<TQT> {
  1505. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1506. public:
  1507. int32_t qmin = -2147483648;
  1508. int32_t qmax = 2147483647;
  1509. TQT() = default;
  1510. TQT(int32_t qmin_, int32_t qmax_, std::string scope_ = {}): qmin(qmin_), qmax(qmax_) { set_scope(scope_); }
  1511. TQT(::megdnn::param::TQT packed_param_0): qmin(packed_param_0.qmin), qmax(packed_param_0.qmax) {}
  1512. ::megdnn::param::TQT param() const {
  1513. return {qmin, qmax};
  1514. }
  1515. };
  1516. class TensorRTRuntime : public OpDefImplBase<TensorRTRuntime> {
  1517. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1518. public:
  1519. std::string buf;
  1520. size_t buf_size;
  1521. TensorRTRuntime() = default;
  1522. TensorRTRuntime(std::string buf_, size_t buf_size_, std::string scope_ = {}): buf(buf_), buf_size(buf_size_) { set_scope(scope_); }
  1523. };
  1524. class TopK : public OpDefImplBase<TopK> {
  1525. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1526. public:
  1527. using Mode = ::megdnn::param::TopK::Mode;
  1528. Mode mode = ::megdnn::param::TopK::Mode::KTH_ONLY;
  1529. TopK() = default;
  1530. TopK(Mode mode_, std::string scope_ = {}): mode(mode_) { set_scope(scope_); }
  1531. TopK(::megdnn::param::TopK packed_param_0): mode(packed_param_0.mode) {}
  1532. ::megdnn::param::TopK param() const {
  1533. return {mode};
  1534. }
  1535. };
  1536. class TypeCvt : public OpDefImplBase<TypeCvt> {
  1537. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1538. public:
  1539. ::megdnn::DType dtype;
  1540. TypeCvt() = default;
  1541. TypeCvt(::megdnn::DType dtype_, std::string scope_ = {}): dtype(dtype_) { set_scope(scope_); }
  1542. };
  1543. class UniformRNG : public OpDefImplBase<UniformRNG> {
  1544. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1545. public:
  1546. uint64_t seed = 0;
  1547. ::megdnn::DType dtype = megdnn::DType::from_enum(megdnn::DTypeEnum::Float32);
  1548. size_t handle;
  1549. UniformRNG() = default;
  1550. UniformRNG(uint64_t seed_, ::megdnn::DType dtype_, size_t handle_, std::string scope_ = {}): seed(seed_), dtype(dtype_), handle(handle_) { set_scope(scope_); }
  1551. };
  1552. class WarpAffine : public OpDefImplBase<WarpAffine> {
  1553. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1554. public:
  1555. using InterpolationMode = ::megdnn::param::WarpAffine::InterpolationMode;
  1556. using BorderMode = ::megdnn::param::WarpAffine::BorderMode;
  1557. using Format = ::megdnn::param::WarpAffine::Format;
  1558. InterpolationMode imode = ::megdnn::param::WarpAffine::InterpolationMode::LINEAR;
  1559. BorderMode border_mode = ::megdnn::param::WarpAffine::BorderMode::REPLICATE;
  1560. float border_val = .0f;
  1561. Format format = ::megdnn::param::WarpAffine::Format::NHWC;
  1562. WarpAffine() = default;
  1563. WarpAffine(InterpolationMode imode_, BorderMode border_mode_, float border_val_, Format format_, std::string scope_ = {}): imode(imode_), border_mode(border_mode_), border_val(border_val_), format(format_) { set_scope(scope_); }
  1564. WarpAffine(::megdnn::param::WarpAffine packed_param_0): imode(packed_param_0.imode), border_mode(packed_param_0.border_mode), border_val(packed_param_0.border_val), format(packed_param_0.format) {}
  1565. ::megdnn::param::WarpAffine param() const {
  1566. return {imode, border_mode, border_val, format};
  1567. }
  1568. };
  1569. class WarpPerspective : public OpDefImplBase<WarpPerspective> {
  1570. MGB_DYN_TYPE_OBJ_FINAL_DECL;
  1571. public:
  1572. using InterpolationMode = ::megdnn::param::WarpPerspective::InterpolationMode;
  1573. using BorderMode = ::megdnn::param::WarpPerspective::BorderMode;
  1574. using Format = ::megdnn::param::WarpPerspective::Format;
  1575. InterpolationMode imode = ::megdnn::param::WarpPerspective::InterpolationMode::LINEAR;
  1576. BorderMode bmode = ::megdnn::param::WarpPerspective::BorderMode::REPLICATE;
  1577. Format format = ::megdnn::param::WarpPerspective::Format::NCHW;
  1578. float border_val = .0f;
  1579. WarpPerspective() = default;
  1580. WarpPerspective(InterpolationMode imode_, BorderMode bmode_, Format format_, float border_val_, std::string scope_ = {}): imode(imode_), bmode(bmode_), format(format_), border_val(border_val_) { set_scope(scope_); }
  1581. WarpPerspective(::megdnn::param::WarpPerspective packed_param_0): imode(packed_param_0.imode), bmode(packed_param_0.bmode), format(packed_param_0.format), border_val(packed_param_0.border_val) {}
  1582. ::megdnn::param::WarpPerspective param() const {
  1583. return {imode, bmode, format, border_val};
  1584. }
  1585. };
  1586. // clang-format on