You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

conv_bias_multi_thread.cpp 142 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396
  1. /**
  2. * \file dnn/test/arm_common/conv_bias_multi_thread.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
  10. * implied.
  11. */
  12. #include "megdnn/dtype.h"
  13. #include "test/arm_common/fixture.h"
  14. #include "test/common/benchmarker.h"
  15. #include "test/common/conv_bias.h"
  16. #include "test/arm_common/cpuinfo_help.h"
  17. using namespace megdnn;
  18. using namespace test;
  19. using namespace conv_bias;
  20. std::vector<conv_bias::TestArg> get_int8_quint8_conv_bias_args(
  21. std::vector<size_t> kernel, size_t stride, bool no_pad, bool no_bias,
  22. bool no_nonlinemode) {
  23. using namespace conv_bias;
  24. using Param = param::ConvBias;
  25. using NLMode = param::ConvBias::NonlineMode;
  26. std::vector<TestArg> args;
  27. auto pack = [&](size_t n, size_t oc, size_t ic, size_t w, size_t h,
  28. size_t kernel, size_t stride, NLMode nlmode) {
  29. Param param;
  30. param.stride_h = stride;
  31. param.stride_w = stride;
  32. if (!no_pad) {
  33. param.pad_h = kernel / 2;
  34. param.pad_w = kernel / 2;
  35. } else {
  36. param.pad_h = 0;
  37. param.pad_w = 0;
  38. }
  39. param.nonlineMode = nlmode;
  40. args.emplace_back(param, TensorShape{n, ic, h, w},
  41. TensorShape{oc, ic, kernel, kernel}, TensorShape{});
  42. if (!no_bias) {
  43. args.emplace_back(param, TensorShape{n, ic, h, w},
  44. TensorShape{oc, ic, kernel, kernel},
  45. TensorShape{1, oc, 1, 1});
  46. }
  47. };
  48. std::vector<NLMode> nonlinemode = {NLMode::IDENTITY};
  49. if (!no_nonlinemode) {
  50. nonlinemode.emplace_back(NLMode::RELU);
  51. nonlinemode.emplace_back(NLMode::H_SWISH);
  52. }
  53. for (size_t n : {1, 2}) {
  54. for (auto nlmode : nonlinemode) {
  55. for (size_t ic : {1, 3, 7}) {
  56. for (size_t oc : {1, 3, 7}) {
  57. for (size_t size : {4, 6, 8, 14, 16, 18}) {
  58. for (size_t kern : kernel) {
  59. pack(n, oc, ic, size, size, kern, stride, nlmode);
  60. }
  61. }
  62. }
  63. }
  64. }
  65. }
  66. return args;
  67. }
  68. std::vector<conv_bias::TestArg> get_nchw44_conv_bias_args(
  69. std::vector<size_t> kernel_vec, size_t stride, bool no_pad = false,
  70. bool no_bias = false, bool no_nonlinemode = false,
  71. bool is_input_nchw = false, bool is_nchw44_dot = false,
  72. bool support_full_bias = false, bool support_sigmoid = false,
  73. bool only_no_bias = false) {
  74. using namespace conv_bias;
  75. using NLMode = param::ConvBias::NonlineMode;
  76. std::vector<TestArg> args;
  77. MEGDNN_MARK_USED_VAR(no_pad);
  78. auto pack = [&](size_t n, size_t oc, size_t ic, size_t h, size_t w,
  79. size_t kernel, size_t stride, size_t group, NLMode nlmode,
  80. megdnn::BiasMode bias_mode, int any_pad = -1) {
  81. constexpr int pack_c = 4;
  82. const size_t pad = any_pad >= 0 ? any_pad : kernel / 2;
  83. auto oc_per_group = oc / group;
  84. auto ic_per_group = ic / group;
  85. bool ok_group = (oc % group == 0 && ic % group == 0) &&
  86. oc_per_group % pack_c == 0 && oc_per_group > 0 &&
  87. ic_per_group > 0;
  88. bool nchw_disable = group > 1 || ic_per_group >= 4;
  89. bool nchw44_disable = ic_per_group % pack_c != 0;
  90. bool invalid_pad = (w + 2 * pad < kernel) || (h + 2 * pad < kernel);
  91. if (!(ok_group) || invalid_pad) {
  92. return;
  93. }
  94. if ((is_input_nchw && nchw_disable) ||
  95. (!is_input_nchw && nchw44_disable)) {
  96. return;
  97. }
  98. size_t kernel_h = kernel;
  99. size_t kernel_w = kernel;
  100. param::ConvBias param;
  101. if (!is_nchw44_dot) {
  102. param.format = param::ConvBias::Format::NCHW44;
  103. } else {
  104. param.format = param::ConvBias::Format::NCHW44_DOT;
  105. }
  106. param.stride_h = stride;
  107. param.stride_w = stride;
  108. param.pad_h = pad;
  109. param.pad_w = pad;
  110. param.nonlineMode = nlmode;
  111. auto src_tensor_shape = TensorShape{n, ic / pack_c, h, w, pack_c};
  112. auto weight_tensor_shape = TensorShape{
  113. oc / pack_c, ic / pack_c, kernel_h, kernel_w, pack_c, pack_c};
  114. auto bias_tensor_shape = TensorShape{};
  115. if (bias_mode == megdnn::BiasMode::BROADCAST_CHANNEL_BIAS) {
  116. bias_tensor_shape = {1, oc / pack_c, 1, 1, pack_c};
  117. } else if (bias_mode == megdnn::BiasMode::BIAS) {
  118. bias_tensor_shape = {n, oc / pack_c,
  119. (h + 2 * pad - kernel) / stride + 1,
  120. (w + 2 * pad - kernel) / stride + 1, pack_c};
  121. }
  122. if (group == 1) {
  123. param.sparse = param::ConvBias::Sparse::DENSE;
  124. } else if (group > 1 && ic / group == 1 && oc / group == 1) {
  125. megdnn_assert(0, "not support channel wise");
  126. param.sparse = param::ConvBias::Sparse::GROUP;
  127. weight_tensor_shape = TensorShape{group / pack_c, 1, 1,
  128. kernel_h, kernel_w, pack_c};
  129. } else if (group > 1 && oc_per_group % pack_c == 0 && oc / group > 0 &&
  130. ic_per_group % pack_c == 0 && ic / group > 0) {
  131. param.sparse = param::ConvBias::Sparse::GROUP;
  132. weight_tensor_shape = TensorShape{group,
  133. oc_per_group / pack_c,
  134. ic_per_group / pack_c,
  135. kernel_h,
  136. kernel_w,
  137. pack_c,
  138. pack_c};
  139. }
  140. if (is_input_nchw) {
  141. src_tensor_shape = TensorShape{n, ic, h, w};
  142. weight_tensor_shape =
  143. TensorShape{oc / pack_c, kernel_h, kernel_w, ic, pack_c};
  144. }
  145. args.emplace_back(param, src_tensor_shape, weight_tensor_shape,
  146. bias_tensor_shape);
  147. };
  148. std::vector<NLMode> nonlinemode = {NLMode::IDENTITY};
  149. if (!no_nonlinemode) {
  150. nonlinemode.emplace_back(NLMode::RELU);
  151. nonlinemode.emplace_back(NLMode::H_SWISH);
  152. }
  153. if (support_sigmoid) {
  154. nonlinemode.emplace_back(NLMode::SIGMOID);
  155. }
  156. std::vector<megdnn::BiasMode> bias_mode;
  157. if (!only_no_bias) {
  158. bias_mode.emplace_back(megdnn::BiasMode::BROADCAST_CHANNEL_BIAS);
  159. if (no_bias) {
  160. bias_mode.emplace_back(megdnn::BiasMode::NO_BIAS);
  161. }
  162. } else {
  163. bias_mode.emplace_back(megdnn::BiasMode::NO_BIAS);
  164. }
  165. if (support_full_bias) {
  166. bias_mode.emplace_back(megdnn::BiasMode::BIAS);
  167. }
  168. for (auto bias : bias_mode)
  169. for (auto nlmode : nonlinemode)
  170. for (size_t n : {1, 2})
  171. for (size_t kernel : kernel_vec)
  172. for (size_t oc : {4, 12})
  173. for (size_t ic : {1, 3, 4, 12})
  174. for (size_t h : {1, 3, 12})
  175. for (size_t w : {1, 16, 23}) {
  176. for (size_t group = 1;
  177. group <=
  178. std::min(std::min(oc, ic), 4_z);
  179. ++group) {
  180. if (kernel != 1 && (h == 1 || w == 1)) {
  181. continue;
  182. }
  183. pack(n, oc, ic, h, w, kernel, stride,
  184. group, nlmode, bias);
  185. }
  186. }
  187. return args;
  188. }
  189. std::vector<conv_bias::TestArg> get_nchw44_channel_wise_args(
  190. std::vector<size_t> kernel, size_t stride, bool no_bias,
  191. bool no_nonlinemode, bool no_full_bias) {
  192. using namespace conv_bias;
  193. using Param = param::ConvBias;
  194. using NLMode = param::ConvBias::NonlineMode;
  195. std::vector<TestArg> args;
  196. auto pack = [&](size_t n, size_t group, size_t w, size_t h, size_t kernel,
  197. size_t stride, NLMode nlmode, bool pad) {
  198. Param param;
  199. param.stride_h = stride;
  200. param.stride_w = stride;
  201. if (pad) {
  202. param.pad_h = kernel / 2;
  203. param.pad_w = kernel / 2;
  204. } else {
  205. param.pad_h = 0;
  206. param.pad_w = 0;
  207. }
  208. param.nonlineMode = nlmode;
  209. param.format = param::ConvBias::Format::NCHW44;
  210. param.sparse = param::ConvBias::Sparse::GROUP;
  211. args.emplace_back(param, TensorShape{n, group, h, w, 4},
  212. TensorShape{group, 1, 1, kernel, kernel, 4},
  213. TensorShape{});
  214. if (!no_bias) {
  215. args.emplace_back(param, TensorShape{n, group, h, w, 4},
  216. TensorShape{group, 1, 1, kernel, kernel, 4},
  217. TensorShape{1, group, 1, 1, 4});
  218. }
  219. if (!no_full_bias) {
  220. args.emplace_back(
  221. param, TensorShape{n, group, h, w, 4},
  222. TensorShape{group, 1, 1, kernel, kernel, 4},
  223. TensorShape{n, group,
  224. (h + 2 * param.pad_w - kernel) / stride + 1,
  225. (w + 2 * param.pad_w - kernel) / stride + 1,
  226. 4});
  227. }
  228. };
  229. std::vector<NLMode> nonlinemode = {NLMode::IDENTITY};
  230. if (!no_nonlinemode) {
  231. nonlinemode.emplace_back(NLMode::RELU);
  232. nonlinemode.emplace_back(NLMode::H_SWISH);
  233. }
  234. for (size_t n : {1, 2}) {
  235. for (auto nlmode : nonlinemode) {
  236. for (bool pad : {true}) {
  237. for (size_t group : {1, 2, 4, 7, 128}) {
  238. for (size_t size : {4, 6, 7, 9, 15, 40}) {
  239. for (size_t kern : kernel) {
  240. pack(n, group, size, size, kern, stride, nlmode,
  241. pad);
  242. }
  243. }
  244. }
  245. }
  246. for (bool pad : {false}) {
  247. for (size_t group : {1, 2, 7, 128}) {
  248. for (size_t size : {7, 9, 15, 40}) {
  249. for (size_t kern : kernel) {
  250. pack(n, group, size, size, kern, stride, nlmode,
  251. pad);
  252. }
  253. }
  254. }
  255. }
  256. }
  257. }
  258. return args;
  259. }
  260. void checker_conv_bias_qint8x8x8(std::vector<conv_bias::TestArg> args,
  261. Handle* handle, const char* algo_name) {
  262. Checker<ConvBias> checker(handle);
  263. checker.set_before_exec_callback(
  264. conv_bias::ConvBiasAlgoChecker<ConvBias>(algo_name));
  265. #if MEGDNN_ARMV7
  266. checker.set_epsilon(1);
  267. #endif
  268. UniformIntRNG rng{-50, 50};
  269. checker.set_dtype(0, dtype::QuantizedS8(0.41113496f))
  270. .set_dtype(1, dtype::QuantizedS8(0.01887994f))
  271. .set_dtype(2, dtype::QuantizedS32(0.41113496f * 0.01887994f))
  272. .set_dtype(4, dtype::QuantizedS8(0.49550694f))
  273. .set_rng(0, &rng)
  274. .set_rng(1, &rng)
  275. .set_rng(2, &rng);
  276. for (auto&& arg : args) {
  277. checker.set_param(arg.param).execs({arg.src, arg.filter, {}, {}, {}});
  278. }
  279. }
  280. void checker_conv_bias_qint8x8x32(std::vector<conv_bias::TestArg> args,
  281. Handle* handle, const char* algo_name) {
  282. Checker<ConvBias> checker(handle);
  283. UniformIntRNG rng{-50, 50};
  284. checker.set_dtype(0, dtype::QuantizedS8(2.5f))
  285. .set_dtype(1, dtype::QuantizedS8(2.5f))
  286. .set_dtype(2, dtype::QuantizedS32(6.25f))
  287. .set_dtype(4, {});
  288. checker.set_before_exec_callback(
  289. conv_bias::ConvBiasAlgoChecker<ConvBias>(algo_name));
  290. for (auto&& arg : args) {
  291. checker.set_param(arg.param).execs({arg.src, arg.filter, {}, {}, {}});
  292. }
  293. }
  294. void checker_conv_bias_quint8x8x8(std::vector<conv_bias::TestArg> args,
  295. Handle* handle, const char* algo_name) {
  296. Checker<ConvBias> checker(handle);
  297. checker.set_before_exec_callback(
  298. conv_bias::ConvBiasAlgoChecker<ConvBias>(algo_name));
  299. UniformIntRNG rng(0, 255);
  300. checker.set_dtype(0, dtype::Quantized8Asymm(0.2f, 100))
  301. .set_dtype(1, dtype::Quantized8Asymm(0.2f, 120))
  302. .set_dtype(2, dtype::QuantizedS32(0.04f))
  303. .set_dtype(4, dtype::Quantized8Asymm(1.4f, 110))
  304. .set_rng(0, &rng)
  305. .set_rng(1, &rng)
  306. .set_rng(2, &rng);
  307. for (auto&& arg : args) {
  308. checker.set_param(arg.param).execs({arg.src, arg.filter, {}, {}, {}});
  309. }
  310. }
  311. void checker_conv_bias_quint8x8x32(std::vector<conv_bias::TestArg> args,
  312. Handle* handle, const char* algo_name) {
  313. Checker<ConvBias> checker(handle);
  314. checker.set_before_exec_callback(
  315. conv_bias::ConvBiasAlgoChecker<ConvBias>(algo_name));
  316. NormalRNG rng(128.f);
  317. checker.set_rng(0, &rng).set_rng(1, &rng);
  318. checker.set_dtype(0, dtype::Quantized8Asymm(1.2f, (uint8_t)127))
  319. .set_dtype(1, dtype::Quantized8Asymm(1.3f, (uint8_t)129))
  320. .set_dtype(2, dtype::QuantizedS32(1.2 * 1.3))
  321. .set_dtype(4, {});
  322. for (auto&& arg : args) {
  323. checker.set_param(arg.param).execs({arg.src, arg.filter, {}, {}, {}});
  324. }
  325. }
  326. void checker_conv_bias_int8x8x32_multi(std::vector<conv_bias::TestArg> args,
  327. Handle* handle, const char* algo_name) {
  328. Checker<ConvBias> checker(handle);
  329. checker.set_before_exec_callback(
  330. conv_bias::ConvBiasAlgoChecker<ConvBias>(algo_name));
  331. checker.set_dtype(0, dtype::Int8());
  332. checker.set_dtype(1, dtype::Int8());
  333. checker.set_dtype(2, dtype::Int32());
  334. checker.set_dtype(4, dtype::Int32());
  335. for (auto&& arg : args) {
  336. checker.set_param(arg.param).execs({arg.src, arg.filter, {}, {}, {}});
  337. }
  338. }
  339. /**********************************F32 direct************************/
  340. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_DIRECT_FP32) {
  341. check_conv_bias(
  342. get_conv_bias_args({1, 2, 3, 4, 5, 6, 7}, 1, false, false, false),
  343. handle(), "F32DIRECT");
  344. }
  345. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_DIRECT_FP32_NCHW44_S1_K7) {
  346. check_conv_bias(get_nchw44_conv_bias_args({7}, 1, false, true, true, false,
  347. false, false),
  348. handle(), "F32_CONV_NCHW44_DIRECT");
  349. }
  350. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_DIRECT_FP32_NCHW44_S1_K2K3) {
  351. check_conv_bias(get_nchw44_conv_bias_args({2, 3}, 1, false, false, false,
  352. false, false, true, true),
  353. handle(), "F32_CONV_NCHW44_DIRECT");
  354. }
  355. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_DIRECT_FP32_NCHW44_S1_K5) {
  356. check_conv_bias(get_nchw44_conv_bias_args({5}, 1, false, false, false,
  357. false, false, true, true),
  358. handle(), "F32_CONV_NCHW44_DIRECT");
  359. }
  360. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_DIRECT_FP32_NCHW44_S2) {
  361. check_conv_bias(get_nchw44_conv_bias_args({2, 3, 5, 7}, 2, false, false,
  362. false, false, false, true, true),
  363. handle(), "F32_CONV_NCHW44_DIRECT");
  364. }
  365. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_DIRECT_FP32_STR1) {
  366. check_conv_bias(get_conv_bias_args({2, 3, 5, 7}, 1, false, false, false),
  367. handle(), "F32STRD1");
  368. }
  369. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_DIRECT_FP32_STR2) {
  370. check_conv_bias(get_conv_bias_args({2, 3, 5, 7}, 2, false, false, false),
  371. handle(), "F32STRD2");
  372. }
  373. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_NCHW_NCHW44_F32_S2) {
  374. check_conv_bias(get_nchw44_conv_bias_args({2, 3, 5, 7}, 2, false, false,
  375. false, true),
  376. handle(), "F32_CONV_NCHW_NCHW44");
  377. }
  378. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_NCHW_NCHW44_F32_S1) {
  379. check_conv_bias(get_nchw44_conv_bias_args({2, 3, 5, 7}, 1, false, false,
  380. false, true),
  381. handle(), "F32_CONV_NCHW_NCHW44");
  382. }
  383. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_CHANNEL_WISE_STRIDE1_FP32_NCHW44_1) {
  384. check_conv_bias(
  385. get_nchw44_channel_wise_args({2, 3}, 1, false, false, false),
  386. handle(), "F32_CHANNEL_WISE_NCHW44");
  387. }
  388. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_CHANNEL_WISE_STRIDE1_FP32_NCHW44_2) {
  389. check_conv_bias(get_nchw44_channel_wise_args({5}, 1, false, false, false),
  390. handle(), "F32_CHANNEL_WISE_NCHW44");
  391. }
  392. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_CHANNEL_WISE_STRIDE2_FP32_NCHW44) {
  393. check_conv_bias(
  394. get_nchw44_channel_wise_args({2, 3, 5}, 2, false, false, false),
  395. handle(), "F32_CHANNEL_WISE_NCHW44");
  396. }
  397. /**********************************F16 direct************************/
  398. #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  399. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_DIRECT_FP16) {
  400. NormalRNG rng(1);
  401. checker_conv_bias_f16(
  402. get_conv_bias_args({1, 2, 3, 4, 5, 6, 7}, 1, false, false, false),
  403. handle(), rng, "F16DIRECT", 0.03);
  404. }
  405. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_DIRECT_FP16_STR1) {
  406. NormalRNG rng(1);
  407. checker_conv_bias_f16(get_conv_bias_args({2, 3, 5}, 1, false, false, false),
  408. handle(), rng, "F16STRD1", 0.03);
  409. }
  410. #endif
  411. /**********************************algo 8816 direct************************/
  412. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_INT8_INT8_INT16_DIRECT) {
  413. checker_conv_bias_int8x8x16(
  414. get_conv_bias_args({2, 3, 5}, 1, false, true, true), handle(),
  415. "I8816DIRECT");
  416. }
  417. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_INT8_INT8_INT16_STRIDE2) {
  418. checker_conv_bias_int8x8x16(
  419. get_conv_bias_args({2, 3, 5}, 2, false, true, true), handle(),
  420. "I8816STRD2");
  421. }
  422. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_INT8_INT8_INT16_NCHW_NCHW44_S2) {
  423. checker_conv_bias_int8x8x16(
  424. get_nchw44_conv_bias_args({2, 3, 5, 7}, 2, false, false, true,
  425. true),
  426. handle(), "I8816_CONV_NCHW_NCHW44");
  427. }
  428. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_INT8_INT8_INT16_NCHW_NCHW44_S1) {
  429. checker_conv_bias_int8x8x16(
  430. get_nchw44_conv_bias_args({2, 3, 5, 7}, 1, false, false, true,
  431. true),
  432. handle(), "I8816_CONV_NCHW_NCHW44");
  433. }
  434. /**********************************algo 8-8-32 direct************************/
  435. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_INT8_INT8_INT32_STRIDE1) {
  436. checker_conv_bias_int8x8x32_multi(
  437. get_conv_bias_args({2, 3, 5, 7}, 1, false, true, true), handle(),
  438. "S8STRD1");
  439. }
  440. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_INT8_INT8_INT32_STRIDE2) {
  441. checker_conv_bias_int8x8x32_multi(
  442. get_conv_bias_args({2, 3, 5, 7}, 2, false, true, true), handle(),
  443. "S8STRD2");
  444. }
  445. TEST_F(ARM_COMMON_MULTI_THREADS,
  446. CONV_BIAS_INT8_INT8_INT32_CHANNEL_WISE_DIRECT1_NCHW44) {
  447. checker_conv_bias_int8x8x32_multi(
  448. get_nchw44_channel_wise_args({2, 3, 5}, 1, false, true, true),
  449. handle(), "S8_CHAN_WISE_STRD1_NCHW44");
  450. }
  451. TEST_F(ARM_COMMON_MULTI_THREADS,
  452. CONV_BIAS_INT8_INT8_INT32_CHANNEL_WISE_DIRECT2_NCHW44) {
  453. checker_conv_bias_int8x8x32_multi(
  454. get_nchw44_channel_wise_args({2, 3, 5}, 2, false, true, true),
  455. handle(), "S8_CHAN_WISE_STRD2_NCHW44");
  456. }
  457. TEST_F(ARM_COMMON, CONV_BIAS_INT8_INT8_INT16_CHANNEL_WISE_DIRECT1_NCHW44) {
  458. Checker<ConvBias> checker(handle());
  459. checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBias>(
  460. "S8x8x16_CHAN_WISE_STRD1_STRD2_NCHW44"));
  461. checker.set_dtype(0, dtype::Int8());
  462. checker.set_dtype(1, dtype::Int8());
  463. checker.set_dtype(2, dtype::Int16());
  464. checker.set_dtype(4, dtype::Int16());
  465. auto args = get_nchw44_channel_wise_args({2, 3, 5}, 1, false, true, true);
  466. for (auto&& arg : args) {
  467. checker.set_param(arg.param).execs({arg.src, arg.filter, {}, {}, {}});
  468. }
  469. }
  470. TEST_F(ARM_COMMON_MULTI_THREADS,
  471. CONV_BIAS_INT8_INT8_INT16_CHANNEL_WISE_DIRECT2_NCHW44) {
  472. Checker<ConvBias> checker(handle());
  473. checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBias>(
  474. "S8x8x16_CHAN_WISE_STRD1_STRD2_NCHW44"));
  475. checker.set_dtype(0, dtype::Int8());
  476. checker.set_dtype(1, dtype::Int8());
  477. checker.set_dtype(2, dtype::Int16());
  478. checker.set_dtype(4, dtype::Int16());
  479. auto args = get_nchw44_channel_wise_args({2, 3, 5}, 2, false, true, true);
  480. for (auto&& arg : args) {
  481. checker.set_param(arg.param).execs({arg.src, arg.filter, {}, {}, {}});
  482. }
  483. }
  484. /********************************qint8 direct******************************/
  485. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_INT8_STRIDE1) {
  486. checker_conv_bias_qint8x8x8(get_int8_quint8_conv_bias_args(
  487. {2, 3, 5, 7}, 1, false, false, false),
  488. handle(), "S8STRD1");
  489. }
  490. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_INT8_STRIDE2) {
  491. checker_conv_bias_qint8x8x8(get_int8_quint8_conv_bias_args(
  492. {2, 3, 5, 7}, 2, false, false, false),
  493. handle(), "S8STRD2");
  494. }
  495. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_INT8_STRIDE1_NCHW44) {
  496. checker_conv_bias_qint8x8x8(
  497. get_nchw44_conv_bias_args({2, 3, 5, 7}, 1, false, false, false),
  498. handle(), "S8_NCHW44_DIRECT");
  499. }
  500. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_INT8_STRIDE1_NCHW44_8832) {
  501. checker_conv_bias_qint8x8x32(
  502. get_nchw44_conv_bias_args({2, 3, 5, 7}, 1, false, false, true),
  503. handle(), "S8_NCHW44_DIRECT");
  504. }
  505. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_INT8_STRIDE2_NCHW44_8832) {
  506. checker_conv_bias_qint8x8x32(
  507. get_nchw44_conv_bias_args({2, 3, 5, 7}, 2, false, false, true),
  508. handle(), "S8_NCHW44_DIRECT");
  509. }
  510. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_INT8_STRIDE2_NCHW44) {
  511. checker_conv_bias_qint8x8x8(
  512. get_nchw44_conv_bias_args({2, 3, 5, 7}, 2, false, false, false),
  513. handle(), "S8_NCHW44_DIRECT");
  514. }
  515. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_QS8_CHANNEL_WISE_DIRECT1_NCHW44) {
  516. checker_conv_bias_qint8x8x8(
  517. get_nchw44_channel_wise_args({2, 3, 5}, 1, false, false, true),
  518. handle(), "S8_CHAN_WISE_STRD1_NCHW44");
  519. }
  520. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_QS8_CHANNEL_WISE_DIRECT2_NCHW44) {
  521. checker_conv_bias_qint8x8x8(
  522. get_nchw44_channel_wise_args({2, 3, 5}, 2, false, false, true),
  523. handle(), "S8_CHAN_WISE_STRD2_NCHW44");
  524. }
  525. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_INT8_NCHW_NCHW44_S1) {
  526. checker_conv_bias_qint8x8x8(
  527. get_nchw44_conv_bias_args({2, 3, 5, 7}, 1, false, false, false,
  528. true),
  529. handle(), "S8_CONV_NCHW_NCHW44");
  530. }
  531. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_INT8_NCHW_NCHW44_S2) {
  532. checker_conv_bias_qint8x8x8(
  533. get_nchw44_conv_bias_args({2, 3, 5, 7}, 2, false, false, false,
  534. true),
  535. handle(), "S8_CONV_NCHW_NCHW44");
  536. }
  537. /*****************************quint8 direct****************************/
  538. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_QUINT8_STRIDE1) {
  539. checker_conv_bias_quint8x8x8(get_int8_quint8_conv_bias_args(
  540. {2, 3, 5, 7}, 1, false, false, false),
  541. handle(), "QU8STRD1");
  542. }
  543. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_QUINT8_STRIDE2) {
  544. checker_conv_bias_quint8x8x8(get_int8_quint8_conv_bias_args(
  545. {2, 3, 5, 7}, 2, false, false, false),
  546. handle(), "QU8STRD2");
  547. }
  548. /****************************dot qint8 direct*************************/
  549. #if __ARM_FEATURE_DOTPROD
  550. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_DOT_NCHW_NCHW44) {
  551. auto args = get_nchw44_conv_bias_args({2, 3, 5, 7}, 2, false, false, false,
  552. true);
  553. for (auto&& arg : args) {
  554. arg.param.format = param::ConvBias::Format::NCHW44_DOT;
  555. }
  556. checker_conv_bias_qint8x8x8(args, handle(), "ARMDOTS8_NCHW_NCHW44");
  557. args = get_nchw44_conv_bias_args({2, 3, 5, 7}, 1, false, false, false,
  558. true);
  559. for (auto&& arg : args) {
  560. arg.param.format = param::ConvBias::Format::NCHW44_DOT;
  561. }
  562. checker_conv_bias_qint8x8x8(args, handle(), "ARMDOTS8_NCHW_NCHW44");
  563. }
  564. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_INT8_STRIDE1_WITHDOTPROD) {
  565. checker_conv_bias_qint8x8x8(get_int8_quint8_conv_bias_args(
  566. {2, 3, 5, 7}, 1, false, false, false),
  567. handle(), "ARMDOTS8STRD1");
  568. }
  569. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_INT8_STRIDE2_WITHDOTPROD) {
  570. checker_conv_bias_qint8x8x8(get_int8_quint8_conv_bias_args(
  571. {2, 3, 5, 7}, 2, false, false, false),
  572. handle(), "ARMDOTS8STRD2");
  573. }
  574. /****************************dot 8-8-32 direct*************************/
  575. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_I8832STRD1_WITHDOT) {
  576. checker_conv_bias_qint8x8x32(
  577. get_conv_bias_args({2, 3, 5, 7}, 1, false, true, true), handle(),
  578. "ARMDOTS8STRD1");
  579. }
  580. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_I8832STRD2_WITHDOT) {
  581. checker_conv_bias_qint8x8x32(
  582. get_conv_bias_args({2, 3, 5, 7}, 2, false, true, true), handle(),
  583. "ARMDOTS8STRD2");
  584. }
  585. /******************************dot quint8*****************************/
  586. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_QUINT8_STRIDE1_WITHDOTPROD) {
  587. checker_conv_bias_quint8x8x8(get_int8_quint8_conv_bias_args(
  588. {2, 3, 5, 7}, 1, false, false, false),
  589. handle(), "ARMDOTU8STRD1");
  590. }
  591. //! TODO: this test without test kernel size=3, add it will case buss error now
  592. //! in armv7
  593. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_QUINT8_STRIDE2_WITHDOTPROD) {
  594. checker_conv_bias_quint8x8x8(
  595. get_int8_quint8_conv_bias_args({2, 5, 7}, 2, false, false, false),
  596. handle(), "ARMDOTU8STRD2");
  597. }
  598. /******************************dot quint8x8x32***********************/
  599. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_QUINT8_DIRECT_STRIDE1) {
  600. checker_conv_bias_quint8x8x32(
  601. get_conv_bias_args({2, 3, 5, 7}, 1, false, true, true), handle(),
  602. "ARMDOTU8STRD1");
  603. }
  604. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_QUINT8_DIRECT_STRIDE2) {
  605. checker_conv_bias_quint8x8x32(
  606. get_conv_bias_args({2, 3, 5, 7}, 2, false, true, true), handle(),
  607. "ARMDOTU8STRD2");
  608. }
  609. /******************************dot int8x8x8 nchw44 ***********************/
  610. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_INT8_DIRECT_DOT_NCHW44_S1_Q8x8x8) {
  611. using namespace conv_bias;
  612. std::vector<TestArg> args = get_nchw44_conv_bias_args({2, 3, 5, 7}, 1);
  613. for (auto&& arg : args)
  614. arg.param.format = param::ConvBias::Format::NCHW44_DOT;
  615. checker_conv_bias_qint8x8x8(args, handle(), "ARMDOTS8DIRECT_NCHW44");
  616. }
  617. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_INT8_DIRECT_DOT_NCHW44_S1_Q8x8x32) {
  618. using namespace conv_bias;
  619. std::vector<TestArg> args =
  620. get_nchw44_conv_bias_args({2, 3, 5, 7}, 1, false, true, true);
  621. for (auto&& arg : args)
  622. arg.param.format = param::ConvBias::Format::NCHW44_DOT;
  623. checker_conv_bias_qint8x8x32(args, handle(), "ARMDOTS8DIRECT_NCHW44");
  624. }
  625. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_INT8_DIRECT_DOT_NCHW44_S1_8x8x32) {
  626. using namespace conv_bias;
  627. std::vector<TestArg> args =
  628. get_nchw44_conv_bias_args({2, 3, 5, 7}, 1, false, true, true);
  629. for (auto&& arg : args)
  630. arg.param.format = param::ConvBias::Format::NCHW44_DOT;
  631. checker_conv_bias_int8x8x32_multi(args, handle(), "ARMDOTS8DIRECT_NCHW44");
  632. }
  633. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_INT8_DIRECT_DOT_NCHW44_S2_Q8x8x8) {
  634. using namespace conv_bias;
  635. //! test qint8x8x8
  636. std::vector<TestArg> args = get_nchw44_conv_bias_args({2, 3, 5, 7}, 2);
  637. for (auto&& arg : args)
  638. arg.param.format = param::ConvBias::Format::NCHW44_DOT;
  639. checker_conv_bias_qint8x8x8(args, handle(), "ARMDOTS8DIRECT_NCHW44");
  640. }
  641. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_INT8_DIRECT_DOT_NCHW44_S2_Q8x8x32) {
  642. using namespace conv_bias;
  643. //! test qint8x8x8
  644. std::vector<TestArg> args =
  645. get_nchw44_conv_bias_args({2, 3, 5, 7}, 2, false, true, true);
  646. for (auto&& arg : args)
  647. arg.param.format = param::ConvBias::Format::NCHW44_DOT;
  648. checker_conv_bias_qint8x8x32(args, handle(), "ARMDOTS8DIRECT_NCHW44");
  649. }
  650. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_INT8_DIRECT_DOT_NCHW44_S2_8x8x32) {
  651. using namespace conv_bias;
  652. //! test qint8x8x8
  653. std::vector<TestArg> args =
  654. get_nchw44_conv_bias_args({2, 3, 5, 7}, 2, false, true, true);
  655. for (auto&& arg : args)
  656. arg.param.format = param::ConvBias::Format::NCHW44_DOT;
  657. checker_conv_bias_int8x8x32_multi(args, handle(), "ARMDOTS8DIRECT_NCHW44");
  658. }
  659. #endif
  660. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F23_4) {
  661. using namespace conv_bias;
  662. std::vector<TestArg> args = get_winograd_mk_packed_args();
  663. Checker<ConvBiasForward> checker(handle());
  664. check_winograd("4:2:32", checker, args, param::MatrixMul::Format::MK4);
  665. }
  666. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F23_4_WEIGHT_PREPROCESS) {
  667. using namespace conv_bias;
  668. std::vector<TestArg> args = get_winograd_mk_packed_args();
  669. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  670. handle());
  671. check_winograd("4:2:32", checker, args, param::MatrixMul::Format::MK4);
  672. }
  673. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F23_4_NCHW44) {
  674. using namespace conv_bias;
  675. std::vector<TestArg> args = get_nchw44_conv_bias_args({3}, 1);
  676. Checker<ConvBiasForward> checker(handle());
  677. check_winograd("4:2:32", checker, args, param::MatrixMul::Format::MK4,
  678. param::ConvBias::Format::NCHW44);
  679. }
  680. TEST_F(ARM_COMMON_MULTI_THREADS,
  681. CONV_BIAS_WINOGRAD_F23_4_NCHW44_WEIGHT_PREPROCESS) {
  682. using namespace conv_bias;
  683. std::vector<TestArg> args = get_nchw44_conv_bias_args({3}, 1);
  684. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  685. handle());
  686. check_winograd("4:2:32", checker, args, param::MatrixMul::Format::MK4,
  687. param::ConvBias::Format::NCHW44);
  688. }
  689. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F63) {
  690. using namespace conv_bias;
  691. std::vector<TestArg> args = get_winograd_args(3);
  692. Checker<ConvBiasForward> checker(handle());
  693. check_winograd("1:6:32", checker, args);
  694. }
  695. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F63_WEIGHT_PREPROCESS) {
  696. using namespace conv_bias;
  697. std::vector<TestArg> args = get_winograd_args(3);
  698. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  699. handle());
  700. check_winograd("1:6:32", checker, args);
  701. }
  702. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F63_4) {
  703. using namespace conv_bias;
  704. std::vector<TestArg> args = get_winograd_mk_packed_args();
  705. Checker<ConvBiasForward> checker(handle());
  706. check_winograd("4:6:16", checker, args, param::MatrixMul::Format::MK4);
  707. }
  708. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F63_4_WEIGHT_PREPROCESS) {
  709. using namespace conv_bias;
  710. std::vector<TestArg> args = get_winograd_mk_packed_args();
  711. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  712. handle());
  713. check_winograd("4:6:16", checker, args, param::MatrixMul::Format::MK4);
  714. }
  715. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F63_4_NCHW44) {
  716. using namespace conv_bias;
  717. std::vector<TestArg> args = get_nchw44_conv_bias_args({3}, 1);
  718. Checker<ConvBiasForward> checker(handle());
  719. check_winograd("4:6:16", checker, args, param::MatrixMul::Format::MK4,
  720. param::ConvBias::Format::NCHW44);
  721. }
  722. TEST_F(ARM_COMMON_MULTI_THREADS,
  723. CONV_BIAS_WINOGRAD_F63_4_NCHW44_WEIGHT_PREPROCESS) {
  724. using namespace conv_bias;
  725. std::vector<TestArg> args = get_nchw44_conv_bias_args({3}, 1);
  726. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  727. handle());
  728. check_winograd("4:6:16", checker, args, param::MatrixMul::Format::MK4,
  729. param::ConvBias::Format::NCHW44);
  730. }
  731. //! uncomment it when low precision mode is ok
  732. #if 0
  733. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F73_4_NCHW44) {
  734. using namespace conv_bias;
  735. std::vector<TestArg> args = get_nchw44_conv_bias_args({3}, 1);
  736. Checker<ConvBiasForward> checker(handle());
  737. check_winograd("4:7:16", checker, args, param::MatrixMul::Format::MK4,
  738. param::ConvBias::Format::NCHW44);
  739. }
  740. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F73_4_NCHW44_WEIGHT_PREPROCESS) {
  741. using namespace conv_bias;
  742. std::vector<TestArg> args = get_nchw44_conv_bias_args({3}, 1);
  743. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  744. handle());
  745. check_winograd("4:7:16", checker, args, param::MatrixMul::Format::MK4,
  746. param::ConvBias::Format::NCHW44);
  747. }
  748. #endif
  749. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F54) {
  750. using namespace conv_bias;
  751. std::vector<TestArg> args = get_winograd_args(4);
  752. Checker<ConvBiasForward> checker(handle());
  753. check_winograd("1:5:32", checker, args);
  754. }
  755. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F54_WEIGHT_PREPROCESS) {
  756. using namespace conv_bias;
  757. std::vector<TestArg> args = get_winograd_args(4);
  758. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  759. handle());
  760. check_winograd("1:5:32", checker, args);
  761. }
  762. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F45) {
  763. using namespace conv_bias;
  764. std::vector<TestArg> args = get_winograd_args(5);
  765. Checker<ConvBiasForward> checker(handle());
  766. check_winograd("1:4:32", checker, args);
  767. }
  768. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F45_WEIGHT_PREPROCESS) {
  769. using namespace conv_bias;
  770. std::vector<TestArg> args = get_winograd_args(5);
  771. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  772. handle());
  773. check_winograd("1:4:32", checker, args);
  774. }
  775. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD) {
  776. using namespace conv_bias;
  777. std::vector<TestArg> args = get_winograd_args(3);
  778. Checker<ConvBiasForward> checker(handle());
  779. auto extra_impl = [](const TensorNDArray& tensors, uint32_t m,
  780. param::ConvBias param, Handle* handle) {
  781. megdnn_assert(param.format == param::ConvBias::Format::NCHW);
  782. auto winograd_preprocess_opr =
  783. handle->create_operator<WinogradFilterPreprocess>();
  784. winograd_preprocess_opr->param().output_block_size = m;
  785. TensorLayout filter_transform_layout;
  786. winograd_preprocess_opr->deduce_layout(tensors[1].layout,
  787. filter_transform_layout);
  788. size_t winograd_preprocess_workspace_in_bytes =
  789. winograd_preprocess_opr->get_workspace_in_bytes(
  790. tensors[1].layout, filter_transform_layout);
  791. auto conv_bias_opr = handle->create_operator<ConvBias>();
  792. conv_bias_opr->param() = param;
  793. conv_bias_opr->param().format = param::ConvBias::Format::NCHW_WINOGRAD;
  794. conv_bias_opr->param().output_block_size = m;
  795. size_t conv_bias_workspace_in_bytes =
  796. conv_bias_opr->get_workspace_in_bytes(
  797. tensors[0].layout, filter_transform_layout,
  798. tensors[2].layout, tensors[3].layout, tensors[4].layout,
  799. nullptr);
  800. WorkspaceBundle wb(nullptr, {filter_transform_layout.span().dist_byte(),
  801. conv_bias_workspace_in_bytes,
  802. winograd_preprocess_workspace_in_bytes});
  803. wb.set(malloc(wb.total_size_in_bytes()));
  804. TensorND filter_transform_tensor(wb.get(0),
  805. std::move(filter_transform_layout));
  806. winograd_preprocess_opr->exec(tensors[1], filter_transform_tensor,
  807. wb.get_workspace(2));
  808. conv_bias_opr->exec(tensors[0], filter_transform_tensor, tensors[2],
  809. tensors[3], tensors[4], nullptr,
  810. wb.get_workspace(1));
  811. free(wb.ptr());
  812. };
  813. auto run = [&checker, &extra_impl](
  814. Handle* handle, const std::vector<TestArg>& args,
  815. const std::vector<size_t>& out_size, DType A_dtype,
  816. DType B_dtype, DType C_dtype, DType D_dtype,
  817. const float eps) {
  818. for (auto&& arg : args) {
  819. for (uint32_t m : out_size) {
  820. checker.set_extra_opr_impl(std::bind(extra_impl,
  821. std::placeholders::_1, m,
  822. arg.param, handle));
  823. checker.set_dtype(0, A_dtype)
  824. .set_dtype(1, B_dtype)
  825. .set_dtype(2, C_dtype)
  826. .set_dtype(4, D_dtype)
  827. .set_epsilon(eps)
  828. .set_param(arg.param)
  829. .execs({arg.src, arg.filter, arg.bias, {}, {}});
  830. }
  831. }
  832. };
  833. run(handle(), args, {6}, dtype::Float32(), dtype::Float32(),
  834. dtype::Float32(), dtype::Float32(), 1e-3f);
  835. #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  836. Float16PeriodicalRNG* rng = new Float16PeriodicalRNG(0x3c00);
  837. checker.set_rng(0, rng).set_rng(1, rng).set_rng(2, rng);
  838. run(handle(), args, {6}, dtype::Float16(), dtype::Float16(),
  839. dtype::Float16(), dtype::Float16(), 0.35f);
  840. #endif
  841. }
  842. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_PREPROCESS_NCHW44) {
  843. using namespace conv_bias;
  844. std::vector<TestArg> nchw44_args = get_nchw44_conv_bias_args({3}, 1);
  845. Checker<ConvBiasForward> checker(handle());
  846. auto extra_impl = [](const TensorNDArray& tensors, uint32_t m,
  847. param::ConvBias param, Handle* handle) {
  848. megdnn_assert(param.format == param::ConvBias::Format::NCHW44);
  849. auto winograd_preprocess_opr =
  850. handle->create_operator<WinogradFilterPreprocess>();
  851. winograd_preprocess_opr->param().output_block_size = m;
  852. winograd_preprocess_opr->param().format = param::MatrixMul::Format::MK4;
  853. TensorLayout filter_transform_layout;
  854. winograd_preprocess_opr->deduce_layout(tensors[1].layout,
  855. filter_transform_layout);
  856. size_t winograd_preprocess_workspace_in_bytes =
  857. winograd_preprocess_opr->get_workspace_in_bytes(
  858. tensors[1].layout, filter_transform_layout);
  859. auto conv_bias_opr = handle->create_operator<ConvBias>();
  860. conv_bias_opr->param() = param;
  861. conv_bias_opr->param().format =
  862. param::ConvBias::Format::NCHW44_WINOGRAD;
  863. conv_bias_opr->param().output_block_size = m;
  864. size_t conv_bias_workspace_in_bytes =
  865. conv_bias_opr->get_workspace_in_bytes(
  866. tensors[0].layout, filter_transform_layout,
  867. tensors[2].layout, tensors[3].layout, tensors[4].layout,
  868. nullptr);
  869. WorkspaceBundle wb(nullptr, {filter_transform_layout.span().dist_byte(),
  870. conv_bias_workspace_in_bytes,
  871. winograd_preprocess_workspace_in_bytes});
  872. wb.set(malloc(wb.total_size_in_bytes()));
  873. TensorND filter_transform_tensor(wb.get(0),
  874. std::move(filter_transform_layout));
  875. winograd_preprocess_opr->exec(tensors[1], filter_transform_tensor,
  876. wb.get_workspace(2));
  877. conv_bias_opr->exec(tensors[0], filter_transform_tensor, tensors[2],
  878. tensors[3], tensors[4], nullptr,
  879. wb.get_workspace(1));
  880. free(wb.ptr());
  881. };
  882. auto run = [&checker, &extra_impl](
  883. Handle* handle, const std::vector<TestArg>& args,
  884. const std::vector<size_t>& out_size, DType A_dtype,
  885. DType B_dtype, DType C_dtype, DType D_dtype,
  886. const float eps) {
  887. for (auto&& arg : args) {
  888. for (uint32_t m : out_size) {
  889. checker.set_extra_opr_impl(std::bind(extra_impl,
  890. std::placeholders::_1, m,
  891. arg.param, handle));
  892. checker.set_dtype(0, A_dtype)
  893. .set_dtype(1, B_dtype)
  894. .set_dtype(2, C_dtype)
  895. .set_dtype(4, D_dtype)
  896. .set_epsilon(eps)
  897. .set_param(arg.param)
  898. .execs({arg.src, arg.filter, arg.bias, {}, {}});
  899. }
  900. }
  901. };
  902. //! uncomment this when low precision mode is ok
  903. // run(handle(), nchw44_args, {2, 6, 7}, dtype::Float32(), dtype::Float32(),
  904. // dtype::Float32(), dtype::Float32(), 1e-2f);
  905. //! remove this when low precision mode is ok
  906. run(handle(), nchw44_args, {2, 6}, dtype::Float32(), dtype::Float32(),
  907. dtype::Float32(), dtype::Float32(), 1e-3f);
  908. }
  909. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_MK_PACKED_F32_1) {
  910. using namespace conv_bias;
  911. Checker<ConvBiasForward> checker(handle());
  912. auto run = [&checker](Handle* handle, const std::vector<TestArg>& args,
  913. const std::vector<size_t>& out_size, DType A_dtype,
  914. DType B_dtype, DType C_dtype, DType D_dtype,
  915. param::MatrixMul::Format format, float eps) {
  916. for (auto&& arg : args) {
  917. for (uint32_t m : out_size) {
  918. checker.set_extra_opr_impl(std::bind(
  919. winograd_algo_extra_impl, std::placeholders::_1, m,
  920. arg.param, handle, format));
  921. checker.set_dtype(0, A_dtype)
  922. .set_dtype(1, B_dtype)
  923. .set_dtype(2, C_dtype)
  924. .set_dtype(4, D_dtype)
  925. .set_epsilon(eps)
  926. .set_param(arg.param)
  927. .execs({arg.src, arg.filter, arg.bias, {}, {}});
  928. }
  929. }
  930. };
  931. std::vector<TestArg> args = get_winograd_mk_packed_args(8);
  932. std::vector<TestArg> args_first_half(args.begin(),
  933. args.begin() + args.size() / 2);
  934. run(handle(), args_first_half, {2, 6}, dtype::Float32{}, dtype::Float32{},
  935. dtype::Float32{}, dtype::Float32{}, param::MatrixMul::Format::MK4,
  936. 1e-3f);
  937. }
  938. TEST_F(ARM_COMMON_MULTI_THREADS,
  939. CONV_BIAS_WINOGRAD_MK_PACKED_F32_1_WEIGHT_PREPROCESS) {
  940. using namespace conv_bias;
  941. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  942. handle());
  943. auto run = [&checker](Handle* handle, const std::vector<TestArg>& args,
  944. const std::vector<size_t>& out_size, DType A_dtype,
  945. DType B_dtype, DType C_dtype, DType D_dtype,
  946. param::MatrixMul::Format format, float eps) {
  947. for (auto&& arg : args) {
  948. for (uint32_t m : out_size) {
  949. checker.set_extra_opr_impl(std::bind(
  950. winograd_algo_extra_impl, std::placeholders::_1, m,
  951. arg.param, handle, format));
  952. checker.set_dtype(0, A_dtype)
  953. .set_dtype(1, B_dtype)
  954. .set_dtype(2, C_dtype)
  955. .set_dtype(4, D_dtype)
  956. .set_epsilon(eps)
  957. .set_param(arg.param)
  958. .execs({arg.src, arg.filter, arg.bias, {}, {}});
  959. }
  960. }
  961. };
  962. std::vector<TestArg> args = get_winograd_mk_packed_args(8);
  963. std::vector<TestArg> args_first_half(args.begin(),
  964. args.begin() + args.size() / 2);
  965. run(handle(), args_first_half, {2, 6}, dtype::Float32{}, dtype::Float32{},
  966. dtype::Float32{}, dtype::Float32{}, param::MatrixMul::Format::MK4,
  967. 1e-3f);
  968. }
  969. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_MK_PACKED_F32_2) {
  970. using namespace conv_bias;
  971. Checker<ConvBiasForward> checker(handle());
  972. auto run = [&checker](Handle* handle, const std::vector<TestArg>& args,
  973. const std::vector<size_t>& out_size, DType A_dtype,
  974. DType B_dtype, DType C_dtype, DType D_dtype,
  975. param::MatrixMul::Format format, float eps) {
  976. for (auto&& arg : args) {
  977. for (uint32_t m : out_size) {
  978. checker.set_extra_opr_impl(std::bind(
  979. winograd_algo_extra_impl, std::placeholders::_1, m,
  980. arg.param, handle, format));
  981. checker.set_dtype(0, A_dtype)
  982. .set_dtype(1, B_dtype)
  983. .set_dtype(2, C_dtype)
  984. .set_dtype(4, D_dtype)
  985. .set_epsilon(eps)
  986. .set_param(arg.param)
  987. .execs({arg.src, arg.filter, arg.bias, {}, {}});
  988. }
  989. }
  990. };
  991. std::vector<TestArg> args = get_winograd_mk_packed_args(8);
  992. std::vector<TestArg> args_second_half(args.begin() + args.size() / 2,
  993. args.end());
  994. run(handle(), args_second_half, {2, 6}, dtype::Float32{}, dtype::Float32{},
  995. dtype::Float32{}, dtype::Float32{}, param::MatrixMul::Format::MK4,
  996. 1e-3f);
  997. }
  998. TEST_F(ARM_COMMON_MULTI_THREADS,
  999. CONV_BIAS_WINOGRAD_MK_PACKED_F32_2_WEIGHT_PREPROCESS) {
  1000. using namespace conv_bias;
  1001. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  1002. handle());
  1003. auto run = [&checker](Handle* handle, const std::vector<TestArg>& args,
  1004. const std::vector<size_t>& out_size, DType A_dtype,
  1005. DType B_dtype, DType C_dtype, DType D_dtype,
  1006. param::MatrixMul::Format format, float eps) {
  1007. for (auto&& arg : args) {
  1008. for (uint32_t m : out_size) {
  1009. checker.set_extra_opr_impl(std::bind(
  1010. winograd_algo_extra_impl, std::placeholders::_1, m,
  1011. arg.param, handle, format));
  1012. checker.set_dtype(0, A_dtype)
  1013. .set_dtype(1, B_dtype)
  1014. .set_dtype(2, C_dtype)
  1015. .set_dtype(4, D_dtype)
  1016. .set_epsilon(eps)
  1017. .set_param(arg.param)
  1018. .execs({arg.src, arg.filter, arg.bias, {}, {}});
  1019. }
  1020. }
  1021. };
  1022. std::vector<TestArg> args = get_winograd_mk_packed_args(8);
  1023. std::vector<TestArg> args_second_half(args.begin() + args.size() / 2,
  1024. args.end());
  1025. run(handle(), args_second_half, {2, 6}, dtype::Float32{}, dtype::Float32{},
  1026. dtype::Float32{}, dtype::Float32{}, param::MatrixMul::Format::MK4,
  1027. 1e-3f);
  1028. }
  1029. #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  1030. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_MK_PACKED_F16) {
  1031. using namespace conv_bias;
  1032. Checker<ConvBiasForward> checker(handle());
  1033. auto run = [&checker](Handle* handle, const std::vector<TestArg>& args,
  1034. const std::vector<size_t>& out_size, DType A_dtype,
  1035. DType B_dtype, DType C_dtype, DType D_dtype,
  1036. param::MatrixMul::Format format, float eps) {
  1037. for (auto&& arg : args) {
  1038. for (uint32_t m : out_size) {
  1039. checker.set_extra_opr_impl(std::bind(
  1040. winograd_algo_extra_impl, std::placeholders::_1, m,
  1041. arg.param, handle, format));
  1042. checker.set_dtype(0, A_dtype)
  1043. .set_dtype(1, B_dtype)
  1044. .set_dtype(2, C_dtype)
  1045. .set_dtype(4, D_dtype)
  1046. .set_epsilon(eps)
  1047. .set_param(arg.param)
  1048. .execs({arg.src, arg.filter, arg.bias, {}, {}});
  1049. }
  1050. }
  1051. };
  1052. std::vector<TestArg> args = get_winograd_mk_packed_args(8);
  1053. Float16PeriodicalRNG* rng = new Float16PeriodicalRNG(0x3c00);
  1054. checker.set_rng(0, rng).set_rng(1, rng).set_rng(2, rng);
  1055. run(handle(), args, {2}, dtype::Float16{}, dtype::Float16{},
  1056. dtype::Float16{}, dtype::Float16{}, param::MatrixMul::Format::MK8,
  1057. 0.25);
  1058. }
  1059. TEST_F(ARM_COMMON_MULTI_THREADS,
  1060. CONV_BIAS_WINOGRAD_MK_PACKED_F16_WEIGHT_PREPROCESS) {
  1061. using namespace conv_bias;
  1062. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  1063. handle());
  1064. auto run = [&checker](Handle* handle, const std::vector<TestArg>& args,
  1065. const std::vector<size_t>& out_size, DType A_dtype,
  1066. DType B_dtype, DType C_dtype, DType D_dtype,
  1067. param::MatrixMul::Format format, float eps) {
  1068. for (auto&& arg : args) {
  1069. for (uint32_t m : out_size) {
  1070. checker.set_extra_opr_impl(std::bind(
  1071. winograd_algo_extra_impl, std::placeholders::_1, m,
  1072. arg.param, handle, format));
  1073. checker.set_dtype(0, A_dtype)
  1074. .set_dtype(1, B_dtype)
  1075. .set_dtype(2, C_dtype)
  1076. .set_dtype(4, D_dtype)
  1077. .set_epsilon(eps)
  1078. .set_param(arg.param)
  1079. .execs({arg.src, arg.filter, arg.bias, {}, {}});
  1080. }
  1081. }
  1082. };
  1083. std::vector<TestArg> args = get_winograd_mk_packed_args(8);
  1084. Float16PeriodicalRNG* rng = new Float16PeriodicalRNG(0x3c00);
  1085. checker.set_rng(0, rng).set_rng(1, rng).set_rng(2, rng);
  1086. run(handle(), args, {2}, dtype::Float16{}, dtype::Float16{},
  1087. dtype::Float16{}, dtype::Float16{}, param::MatrixMul::Format::MK8,
  1088. 0.25);
  1089. }
  1090. #endif
  1091. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_MK_PACKED_INT8) {
  1092. using namespace conv_bias;
  1093. Checker<ConvBiasForward> checker(handle());
  1094. auto run = [&checker](Handle* handle, const std::vector<TestArg>& args,
  1095. const std::vector<size_t>& out_size, DType A_dtype,
  1096. DType B_dtype, DType C_dtype, DType D_dtype,
  1097. param::MatrixMul::Format format, float eps) {
  1098. for (auto&& arg : args) {
  1099. for (uint32_t m : out_size) {
  1100. checker.set_extra_opr_impl(std::bind(
  1101. winograd_algo_extra_impl, std::placeholders::_1, m,
  1102. arg.param, handle, format));
  1103. checker.set_dtype(0, A_dtype)
  1104. .set_dtype(1, B_dtype)
  1105. .set_dtype(2, C_dtype)
  1106. .set_dtype(4, D_dtype)
  1107. .set_epsilon(eps)
  1108. .set_param(arg.param)
  1109. .execs({arg.src, arg.filter, arg.bias, {}, {}});
  1110. }
  1111. }
  1112. };
  1113. #if MEGDNN_AARCH64
  1114. const char* matmul_name = "AARCH64_INT16X16X32_MK8_8X8";
  1115. #else
  1116. const char* matmul_name = "ARMV7_INT16X16X32_MK8_4X8";
  1117. #endif
  1118. checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBias>(
  1119. ssprintf("WINOGRAD:%s:8:2:32", matmul_name).c_str()));
  1120. std::vector<TestArg> quantized_args =
  1121. get_quantized_winograd_mk_packed_args(8);
  1122. UniformIntRNG int_rng{-50, 50};
  1123. checker.set_rng(0, &int_rng).set_rng(1, &int_rng).set_rng(2, &int_rng);
  1124. run(handle(), quantized_args, {2}, dtype::QuantizedS8(2.5f),
  1125. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f),
  1126. dtype::QuantizedS8(60.25f), param::MatrixMul::Format::MK8, 1e-3);
  1127. }
  1128. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_NCHW44_MK_PACKED_INT8) {
  1129. using namespace conv_bias;
  1130. Checker<ConvBiasForward> checker(handle());
  1131. auto run = [&checker](Handle* handle, const std::vector<TestArg>& args,
  1132. const std::vector<size_t>& out_size, DType A_dtype,
  1133. DType B_dtype, DType C_dtype, DType D_dtype,
  1134. param::MatrixMul::Format format, float eps) {
  1135. for (auto&& arg : args) {
  1136. for (uint32_t m : out_size) {
  1137. checker.set_extra_opr_impl(std::bind(
  1138. winograd_algo_extra_impl, std::placeholders::_1, m,
  1139. arg.param, handle, format));
  1140. checker.set_dtype(0, A_dtype)
  1141. .set_dtype(1, B_dtype)
  1142. .set_dtype(2, C_dtype)
  1143. .set_dtype(4, D_dtype)
  1144. .set_epsilon(eps)
  1145. .set_param(arg.param)
  1146. .execs({arg.src, arg.filter, arg.bias, {}, {}});
  1147. }
  1148. }
  1149. };
  1150. #if MEGDNN_AARCH64
  1151. const char* matmul_name = "AARCH64_INT16X16X32_MK8_8X8";
  1152. #else
  1153. const char* matmul_name = "ARMV7_INT16X16X32_MK8_4X8";
  1154. #endif
  1155. checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBias>(
  1156. ssprintf("WINOGRAD_NCHW44:%s:8:2:32", matmul_name).c_str()));
  1157. std::vector<TestArg> quantized_args = get_int8_nchw44_args(3, 4);
  1158. UniformIntRNG int_rng{-50, 50};
  1159. checker.set_rng(0, &int_rng).set_rng(1, &int_rng).set_rng(2, &int_rng);
  1160. run(handle(), quantized_args, {2}, dtype::QuantizedS8(2.5f),
  1161. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f),
  1162. dtype::QuantizedS8(60.25f), param::MatrixMul::Format::MK8, 1e-3);
  1163. }
  1164. TEST_F(ARM_COMMON_MULTI_THREADS,
  1165. CONV_BIAS_WINOGRAD_NCHW44_MK_PACKED_INT8_GROUPMODE) {
  1166. using namespace conv_bias;
  1167. Checker<ConvBiasForward> checker(handle());
  1168. auto run = [&checker](Handle* handle, const std::vector<TestArg>& args,
  1169. const std::vector<size_t>& out_size, DType A_dtype,
  1170. DType B_dtype, DType C_dtype, DType D_dtype,
  1171. param::MatrixMul::Format format, float eps) {
  1172. for (auto&& arg : args) {
  1173. for (uint32_t m : out_size) {
  1174. checker.set_extra_opr_impl(std::bind(
  1175. winograd_algo_extra_impl, std::placeholders::_1, m,
  1176. arg.param, handle, format));
  1177. checker.set_dtype(0, A_dtype)
  1178. .set_dtype(1, B_dtype)
  1179. .set_dtype(2, C_dtype)
  1180. .set_dtype(4, D_dtype)
  1181. .set_epsilon(eps)
  1182. .set_param(arg.param)
  1183. .execs({arg.src, arg.filter, arg.bias, {}, {}});
  1184. }
  1185. }
  1186. };
  1187. #if MEGDNN_AARCH64
  1188. const char* matmul_name = "AARCH64_INT16X16X32_MK8_8X8";
  1189. #else
  1190. const char* matmul_name = "ARMV7_INT16X16X32_MK8_4X8";
  1191. #endif
  1192. checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBias>(
  1193. ssprintf("WINOGRAD_NCHW44:%s:8:2:32", matmul_name).c_str()));
  1194. std::vector<TestArg> quantized_args =
  1195. get_int8_nchw44_args(3, 4, false, true);
  1196. UniformIntRNG int_rng{-50, 50};
  1197. checker.set_rng(0, &int_rng).set_rng(1, &int_rng).set_rng(2, &int_rng);
  1198. run(handle(), quantized_args, {2}, dtype::QuantizedS8(2.5f),
  1199. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f),
  1200. dtype::QuantizedS8(60.25f), param::MatrixMul::Format::MK8, 1e-3);
  1201. }
  1202. TEST_F(ARM_COMMON_MULTI_THREADS,
  1203. CONV_BIAS_WINOGRAD_NCHW44_MK_PACKED_INT8_COMP_F32) {
  1204. using namespace conv_bias;
  1205. Checker<ConvBiasForward> checker(handle());
  1206. auto run = [&checker](Handle* handle, const std::vector<TestArg>& args,
  1207. const std::vector<size_t>& out_size, DType A_dtype,
  1208. DType B_dtype, DType C_dtype, DType D_dtype,
  1209. param::MatrixMul::Format format, float eps) {
  1210. for (auto&& arg : args) {
  1211. for (uint32_t m : out_size) {
  1212. checker.set_extra_opr_impl(std::bind(
  1213. winograd_algo_extra_impl, std::placeholders::_1, m,
  1214. arg.param, handle, format));
  1215. checker.set_dtype(0, A_dtype)
  1216. .set_dtype(1, B_dtype)
  1217. .set_dtype(2, C_dtype)
  1218. .set_dtype(4, D_dtype)
  1219. .set_epsilon(eps)
  1220. .set_param(arg.param)
  1221. .execs({arg.src, arg.filter, arg.bias, {}, {}});
  1222. }
  1223. }
  1224. };
  1225. float epsilon = 0.001;
  1226. #if MEGDNN_AARCH64
  1227. const char* matmul_name = "AARCH64_F32_MK4_4x16";
  1228. #else
  1229. const char* matmul_name = "ARMV7_F32_MK4_4x8";
  1230. #endif
  1231. checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBias>(
  1232. ssprintf("WINOGRAD_NCHW44:%s:4:2:32", matmul_name).c_str()));
  1233. std::vector<TestArg> quantized_args = get_int8_nchw44_args(3, 4, true);
  1234. UniformIntRNG int_rng{-50, 50};
  1235. checker.set_rng(0, &int_rng).set_rng(1, &int_rng).set_rng(2, &int_rng);
  1236. run(handle(), quantized_args, {2}, dtype::QuantizedS8(0.41113496f),
  1237. dtype::QuantizedS8(0.01887994f),
  1238. dtype::QuantizedS32(0.41113496f * 0.01887994f),
  1239. dtype::QuantizedS8(0.49550694f), param::MatrixMul::Format::MK4,
  1240. epsilon);
  1241. }
  1242. TEST_F(ARM_COMMON_MULTI_THREADS,
  1243. CONV_BIAS_WINOGRAD_NCHW44_MK_PACKED_INT8_COMP_F32_GROUPMODE) {
  1244. using namespace conv_bias;
  1245. Checker<ConvBiasForward> checker(handle());
  1246. auto run = [&checker](Handle* handle, const std::vector<TestArg>& args,
  1247. const std::vector<size_t>& out_size, DType A_dtype,
  1248. DType B_dtype, DType C_dtype, DType D_dtype,
  1249. param::MatrixMul::Format format, float eps) {
  1250. for (auto&& arg : args) {
  1251. for (uint32_t m : out_size) {
  1252. checker.set_extra_opr_impl(std::bind(
  1253. winograd_algo_extra_impl, std::placeholders::_1, m,
  1254. arg.param, handle, format));
  1255. checker.set_dtype(0, A_dtype)
  1256. .set_dtype(1, B_dtype)
  1257. .set_dtype(2, C_dtype)
  1258. .set_dtype(4, D_dtype)
  1259. .set_epsilon(eps)
  1260. .set_param(arg.param)
  1261. .execs({arg.src, arg.filter, arg.bias, {}, {}});
  1262. }
  1263. }
  1264. };
  1265. float epsilon = 0.001;
  1266. #if MEGDNN_AARCH64
  1267. const char* matmul_name = "AARCH64_F32_MK4_4x16";
  1268. #else
  1269. const char* matmul_name = "ARMV7_F32_MK4_4x8";
  1270. #endif
  1271. checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBias>(
  1272. ssprintf("WINOGRAD_NCHW44:%s:4:2:32", matmul_name).c_str()));
  1273. std::vector<TestArg> quantized_args =
  1274. get_int8_nchw44_args(3, 4, true, true);
  1275. UniformIntRNG int_rng{-50, 50};
  1276. checker.set_rng(0, &int_rng).set_rng(1, &int_rng).set_rng(2, &int_rng);
  1277. run(handle(), quantized_args, {2}, dtype::QuantizedS8(0.41113496f),
  1278. dtype::QuantizedS8(0.01887994f),
  1279. dtype::QuantizedS32(0.41113496f * 0.01887994f),
  1280. dtype::QuantizedS8(0.49550694f), param::MatrixMul::Format::MK4,
  1281. epsilon);
  1282. }
  1283. TEST_F(ARM_COMMON_MULTI_THREADS,
  1284. CONV_BIAS_WINOGRAD_MK_PACKED_INT8_WEIGHT_PREPROCESS) {
  1285. using namespace conv_bias;
  1286. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  1287. handle());
  1288. auto run = [&checker](Handle* handle, const std::vector<TestArg>& args,
  1289. const std::vector<size_t>& out_size, DType A_dtype,
  1290. DType B_dtype, DType C_dtype, DType D_dtype,
  1291. param::MatrixMul::Format format, float eps) {
  1292. for (auto&& arg : args) {
  1293. for (uint32_t m : out_size) {
  1294. checker.set_extra_opr_impl(std::bind(
  1295. winograd_algo_extra_impl, std::placeholders::_1, m,
  1296. arg.param, handle, format));
  1297. checker.set_dtype(0, A_dtype)
  1298. .set_dtype(1, B_dtype)
  1299. .set_dtype(2, C_dtype)
  1300. .set_dtype(4, D_dtype)
  1301. .set_epsilon(eps)
  1302. .set_param(arg.param)
  1303. .execs({arg.src, arg.filter, arg.bias, {}, {}});
  1304. }
  1305. }
  1306. };
  1307. #if MEGDNN_AARCH64
  1308. const char* matmul_name = "AARCH64_INT16X16X32_MK8_8X8";
  1309. #else
  1310. const char* matmul_name = "ARMV7_INT16X16X32_MK8_4X8";
  1311. #endif
  1312. checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBias>(
  1313. ssprintf("WINOGRAD:%s:8:2:32", matmul_name).c_str()));
  1314. std::vector<TestArg> quantized_args =
  1315. get_quantized_winograd_mk_packed_args(8);
  1316. UniformIntRNG int_rng{-50, 50};
  1317. checker.set_rng(0, &int_rng).set_rng(1, &int_rng).set_rng(2, &int_rng);
  1318. run(handle(), quantized_args, {2}, dtype::QuantizedS8(2.5f),
  1319. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f),
  1320. dtype::QuantizedS8(60.25f), param::MatrixMul::Format::MK8, 1e-3);
  1321. }
  1322. TEST_F(ARM_COMMON_MULTI_THREADS,
  1323. CONV_BIAS_WINOGRAD_NCHW44_MK_PACKED_INT8_WEIGHT_PREPROCESS) {
  1324. using namespace conv_bias;
  1325. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  1326. handle());
  1327. auto run = [&checker](Handle* handle, const std::vector<TestArg>& args,
  1328. const std::vector<size_t>& out_size, DType A_dtype,
  1329. DType B_dtype, DType C_dtype, DType D_dtype,
  1330. param::MatrixMul::Format format, float eps) {
  1331. for (auto&& arg : args) {
  1332. for (uint32_t m : out_size) {
  1333. checker.set_extra_opr_impl(std::bind(
  1334. winograd_algo_extra_impl, std::placeholders::_1, m,
  1335. arg.param, handle, format));
  1336. checker.set_dtype(0, A_dtype)
  1337. .set_dtype(1, B_dtype)
  1338. .set_dtype(2, C_dtype)
  1339. .set_dtype(4, D_dtype)
  1340. .set_epsilon(eps)
  1341. .set_param(arg.param)
  1342. .execs({arg.src, arg.filter, arg.bias, {}, {}});
  1343. }
  1344. }
  1345. };
  1346. #if MEGDNN_AARCH64
  1347. const char* matmul_name = "AARCH64_INT16X16X32_MK8_8X8";
  1348. #else
  1349. const char* matmul_name = "ARMV7_INT16X16X32_MK8_4X8";
  1350. #endif
  1351. checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBias>(
  1352. ssprintf("WINOGRAD_NCHW44:%s:8:2:32", matmul_name).c_str()));
  1353. std::vector<TestArg> quantized_args = get_int8_nchw44_args(3, 4);
  1354. UniformIntRNG int_rng{-50, 50};
  1355. checker.set_rng(0, &int_rng).set_rng(1, &int_rng).set_rng(2, &int_rng);
  1356. run(handle(), quantized_args, {2}, dtype::QuantizedS8(2.5f),
  1357. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f),
  1358. dtype::QuantizedS8(60.25f), param::MatrixMul::Format::MK8, 1e-3);
  1359. }
  1360. TEST_F(ARM_COMMON_MULTI_THREADS,
  1361. CONV_BIAS_WINOGRAD_NCHW44_MK_PACKED_INT8_GROUPMODE_WEIGHT_PREPROCESS) {
  1362. using namespace conv_bias;
  1363. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  1364. handle());
  1365. auto run = [&checker](Handle* handle, const std::vector<TestArg>& args,
  1366. const std::vector<size_t>& out_size, DType A_dtype,
  1367. DType B_dtype, DType C_dtype, DType D_dtype,
  1368. param::MatrixMul::Format format, float eps) {
  1369. for (auto&& arg : args) {
  1370. for (uint32_t m : out_size) {
  1371. checker.set_extra_opr_impl(std::bind(
  1372. winograd_algo_extra_impl, std::placeholders::_1, m,
  1373. arg.param, handle, format));
  1374. checker.set_dtype(0, A_dtype)
  1375. .set_dtype(1, B_dtype)
  1376. .set_dtype(2, C_dtype)
  1377. .set_dtype(4, D_dtype)
  1378. .set_epsilon(eps)
  1379. .set_param(arg.param)
  1380. .execs({arg.src, arg.filter, arg.bias, {}, {}});
  1381. }
  1382. }
  1383. };
  1384. #if MEGDNN_AARCH64
  1385. const char* matmul_name = "AARCH64_INT16X16X32_MK8_8X8";
  1386. #else
  1387. const char* matmul_name = "ARMV7_INT16X16X32_MK8_4X8";
  1388. #endif
  1389. checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBias>(
  1390. ssprintf("WINOGRAD_NCHW44:%s:8:2:32", matmul_name).c_str()));
  1391. std::vector<TestArg> quantized_args =
  1392. get_int8_nchw44_args(3, 4, false, true);
  1393. UniformIntRNG int_rng{-50, 50};
  1394. checker.set_rng(0, &int_rng).set_rng(1, &int_rng).set_rng(2, &int_rng);
  1395. run(handle(), quantized_args, {2}, dtype::QuantizedS8(2.5f),
  1396. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f),
  1397. dtype::QuantizedS8(60.25f), param::MatrixMul::Format::MK8, 1e-3);
  1398. }
  1399. TEST_F(ARM_COMMON_MULTI_THREADS,
  1400. CONV_BIAS_WINOGRAD_NCHW44_MK_PACKED_INT8_COMP_F32_WEIGHT_PREPROCESS) {
  1401. using namespace conv_bias;
  1402. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  1403. handle());
  1404. auto run = [&checker](Handle* handle, const std::vector<TestArg>& args,
  1405. const std::vector<size_t>& out_size, DType A_dtype,
  1406. DType B_dtype, DType C_dtype, DType D_dtype,
  1407. param::MatrixMul::Format format, float eps) {
  1408. for (auto&& arg : args) {
  1409. for (uint32_t m : out_size) {
  1410. checker.set_extra_opr_impl(std::bind(
  1411. winograd_algo_extra_impl, std::placeholders::_1, m,
  1412. arg.param, handle, format));
  1413. checker.set_dtype(0, A_dtype)
  1414. .set_dtype(1, B_dtype)
  1415. .set_dtype(2, C_dtype)
  1416. .set_dtype(4, D_dtype)
  1417. .set_epsilon(eps)
  1418. .set_param(arg.param)
  1419. .execs({arg.src, arg.filter, arg.bias, {}, {}});
  1420. }
  1421. }
  1422. };
  1423. float epsilon = 0.001;
  1424. #if MEGDNN_AARCH64
  1425. const char* matmul_name = "AARCH64_F32_MK4_4x16";
  1426. #else
  1427. const char* matmul_name = "ARMV7_F32_MK4_4x8";
  1428. #endif
  1429. checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBias>(
  1430. ssprintf("WINOGRAD_NCHW44:%s:4:2:32", matmul_name).c_str()));
  1431. std::vector<TestArg> quantized_args = get_int8_nchw44_args(3, 4, true);
  1432. UniformIntRNG int_rng{-50, 50};
  1433. checker.set_rng(0, &int_rng).set_rng(1, &int_rng).set_rng(2, &int_rng);
  1434. run(handle(), quantized_args, {2}, dtype::QuantizedS8(0.41113496f),
  1435. dtype::QuantizedS8(0.01887994f),
  1436. dtype::QuantizedS32(0.41113496f * 0.01887994f),
  1437. dtype::QuantizedS8(0.49550694f), param::MatrixMul::Format::MK4,
  1438. epsilon);
  1439. }
  1440. TEST_F(ARM_COMMON_MULTI_THREADS,
  1441. WINOGRAD_NCHW44_MK_PACKED_INT8_COMP_F32_GROUPMODE_WEIGHT_PREPROCESS) {
  1442. using namespace conv_bias;
  1443. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  1444. handle());
  1445. auto run = [&checker](Handle* handle, const std::vector<TestArg>& args,
  1446. const std::vector<size_t>& out_size, DType A_dtype,
  1447. DType B_dtype, DType C_dtype, DType D_dtype,
  1448. param::MatrixMul::Format format, float eps) {
  1449. for (auto&& arg : args) {
  1450. for (uint32_t m : out_size) {
  1451. checker.set_extra_opr_impl(std::bind(
  1452. winograd_algo_extra_impl, std::placeholders::_1, m,
  1453. arg.param, handle, format));
  1454. checker.set_dtype(0, A_dtype)
  1455. .set_dtype(1, B_dtype)
  1456. .set_dtype(2, C_dtype)
  1457. .set_dtype(4, D_dtype)
  1458. .set_epsilon(eps)
  1459. .set_param(arg.param)
  1460. .execs({arg.src, arg.filter, arg.bias, {}, {}});
  1461. }
  1462. }
  1463. };
  1464. float epsilon = 0.001;
  1465. #if MEGDNN_AARCH64
  1466. const char* matmul_name = "AARCH64_F32_MK4_4x16";
  1467. #else
  1468. const char* matmul_name = "ARMV7_F32_MK4_4x8";
  1469. #endif
  1470. checker.set_before_exec_callback(conv_bias::ConvBiasAlgoChecker<ConvBias>(
  1471. ssprintf("WINOGRAD_NCHW44:%s:4:2:32", matmul_name).c_str()));
  1472. std::vector<TestArg> quantized_args =
  1473. get_int8_nchw44_args(3, 4, true, true);
  1474. UniformIntRNG int_rng{-50, 50};
  1475. checker.set_rng(0, &int_rng).set_rng(1, &int_rng).set_rng(2, &int_rng);
  1476. run(handle(), quantized_args, {2}, dtype::QuantizedS8(0.41113496f),
  1477. dtype::QuantizedS8(0.01887994f),
  1478. dtype::QuantizedS32(0.41113496f * 0.01887994f),
  1479. dtype::QuantizedS8(0.49550694f), param::MatrixMul::Format::MK4,
  1480. epsilon);
  1481. }
  1482. #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  1483. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F16_F23) {
  1484. using namespace conv_bias;
  1485. std::vector<TestArg> args = get_winograd_mk_packed_args();
  1486. Checker<ConvBiasForward> checker(handle());
  1487. check_winograd_fp16("1:2:32", checker, args, NULL, 0.08);
  1488. }
  1489. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F16_F45_1) {
  1490. using namespace conv_bias;
  1491. std::vector<TestArg> args = get_winograd_args(5);
  1492. std::vector<TestArg> args_head_half(args.begin(),
  1493. args.begin() + args.size() / 2);
  1494. Checker<ConvBiasForward> checker(handle());
  1495. //! fp16 range -1.0 ~ 1.0
  1496. Float16PeriodicalRNG* rng = new Float16PeriodicalRNG(0x3c00);
  1497. check_winograd_fp16("1:4:32", checker, args_head_half, rng, 0.25);
  1498. }
  1499. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F16_F45_2) {
  1500. using namespace conv_bias;
  1501. std::vector<TestArg> args = get_winograd_args(5);
  1502. std::vector<TestArg> args_back_half(args.begin() + args.size() / 2,
  1503. args.end());
  1504. Checker<ConvBiasForward> checker(handle());
  1505. //! fp16 range -1.0 ~ 1.0
  1506. Float16PeriodicalRNG* rng = new Float16PeriodicalRNG(0x3c00);
  1507. check_winograd_fp16("1:4:32", checker, args_back_half, rng, 0.25);
  1508. }
  1509. //! FIXME: This test may be failed if run `ARM_COMMON.CONV_BIAS_WINOGRAD*`, but
  1510. //! it will pass when run single testcase
  1511. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F16_F63) {
  1512. using namespace conv_bias;
  1513. std::vector<TestArg> args = get_winograd_args(3);
  1514. Checker<ConvBiasForward> checker(handle());
  1515. //! fp16 range -1.0 ~ 1.0
  1516. Float16PeriodicalRNG* rng = new Float16PeriodicalRNG(0x3c00);
  1517. check_winograd_fp16("1:6:32", checker, args, rng, 0.3);
  1518. }
  1519. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F16_8x8_1) {
  1520. using namespace conv_bias;
  1521. std::vector<TestArg> args = get_winograd_mk_packed_args(8);
  1522. std::vector<TestArg> args_head_half(args.begin(),
  1523. args.begin() + args.size() / 2);
  1524. Checker<ConvBiasForward> checker(handle());
  1525. Float16PeriodicalRNG* rng = new Float16PeriodicalRNG(0x3c00);
  1526. check_winograd_fp16("8:2:32", checker, args_head_half, rng, 0.25,
  1527. param::MatrixMul::Format::MK8);
  1528. }
  1529. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F16_8x8_2) {
  1530. using namespace conv_bias;
  1531. std::vector<TestArg> args = get_winograd_mk_packed_args(8);
  1532. std::vector<TestArg> args_back_half(args.begin() + args.size() / 2,
  1533. args.end());
  1534. Checker<ConvBiasForward> checker(handle());
  1535. Float16PeriodicalRNG* rng = new Float16PeriodicalRNG(0x3c00);
  1536. check_winograd_fp16("8:2:32", checker, args_back_half, rng, 0.25,
  1537. param::MatrixMul::Format::MK8);
  1538. }
  1539. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F16_F23_WEIGHT_PREPROCESS) {
  1540. using namespace conv_bias;
  1541. std::vector<TestArg> args = get_winograd_mk_packed_args();
  1542. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  1543. handle());
  1544. check_winograd_fp16("1:2:32", checker, args, NULL, 0.08);
  1545. }
  1546. TEST_F(ARM_COMMON_MULTI_THREADS,
  1547. CONV_BIAS_WINOGRAD_F16_F45_1_WEIGHT_PREPROCESS) {
  1548. using namespace conv_bias;
  1549. std::vector<TestArg> args = get_winograd_args(5);
  1550. std::vector<TestArg> args_head_half(args.begin(),
  1551. args.begin() + args.size() / 2);
  1552. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  1553. handle());
  1554. //! fp16 range -1.0 ~ 1.0
  1555. Float16PeriodicalRNG* rng = new Float16PeriodicalRNG(0x3c00);
  1556. check_winograd_fp16("1:4:32", checker, args_head_half, rng, 0.25);
  1557. }
  1558. TEST_F(ARM_COMMON_MULTI_THREADS,
  1559. CONV_BIAS_WINOGRAD_F16_F45_2_WEIGHT_PREPROCESS) {
  1560. using namespace conv_bias;
  1561. std::vector<TestArg> args = get_winograd_args(5);
  1562. std::vector<TestArg> args_back_half(args.begin() + args.size() / 2,
  1563. args.end());
  1564. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  1565. handle());
  1566. //! fp16 range -1.0 ~ 1.0
  1567. Float16PeriodicalRNG* rng = new Float16PeriodicalRNG(0x3c00);
  1568. check_winograd_fp16("1:4:32", checker, args_back_half, rng, 0.25);
  1569. }
  1570. //! FIXME: This test may be failed if run `ARM_COMMON.CONV_BIAS_WINOGRAD*`, but
  1571. //! it will pass when run single testcase
  1572. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_F16_F63_WEIGHT_PREPROCESS) {
  1573. using namespace conv_bias;
  1574. std::vector<TestArg> args = get_winograd_args(3);
  1575. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  1576. handle());
  1577. //! fp16 range -1.0 ~ 1.0
  1578. Float16PeriodicalRNG* rng = new Float16PeriodicalRNG(0x3c00);
  1579. check_winograd_fp16("1:6:32", checker, args, rng, 0.3);
  1580. }
  1581. TEST_F(ARM_COMMON_MULTI_THREADS,
  1582. CONV_BIAS_WINOGRAD_F16_8x8_1_WEIGHT_PREPROCESS) {
  1583. using namespace conv_bias;
  1584. std::vector<TestArg> args = get_winograd_mk_packed_args(8);
  1585. std::vector<TestArg> args_head_half(args.begin(),
  1586. args.begin() + args.size() / 2);
  1587. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  1588. handle());
  1589. Float16PeriodicalRNG* rng = new Float16PeriodicalRNG(0x3c00);
  1590. check_winograd_fp16("8:2:32", checker, args_head_half, rng, 0.25,
  1591. param::MatrixMul::Format::MK8);
  1592. }
  1593. TEST_F(ARM_COMMON_MULTI_THREADS,
  1594. CONV_BIAS_WINOGRAD_F16_8x8_2_WEIGHT_PREPROCESS) {
  1595. using namespace conv_bias;
  1596. std::vector<TestArg> args = get_winograd_mk_packed_args(8);
  1597. std::vector<TestArg> args_back_half(args.begin() + args.size() / 2,
  1598. args.end());
  1599. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  1600. handle());
  1601. Float16PeriodicalRNG* rng = new Float16PeriodicalRNG(0x3c00);
  1602. check_winograd_fp16("8:2:32", checker, args_back_half, rng, 0.25,
  1603. param::MatrixMul::Format::MK8);
  1604. }
  1605. #endif
  1606. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_WINOGRAD_INT8_8X8) {
  1607. using namespace conv_bias;
  1608. std::vector<TestArg> args = get_quantized_winograd_mk_packed_args(8);
  1609. Checker<ConvBiasForward> checker(handle());
  1610. UniformIntRNG rng{-50, 50};
  1611. checker.set_dtype(0, dtype::QuantizedS8(2.5f))
  1612. .set_dtype(1, dtype::QuantizedS8(2.5f))
  1613. .set_dtype(2, dtype::QuantizedS32(6.25f))
  1614. .set_dtype(4, dtype::QuantizedS8(60.25f))
  1615. .set_rng(0, &rng)
  1616. .set_rng(1, &rng)
  1617. .set_rng(2, &rng);
  1618. check_winograd("8:2:32", checker, args, param::MatrixMul::Format::MK8);
  1619. }
  1620. TEST_F(ARM_COMMON_MULTI_THREADS,
  1621. CONV_BIAS_WINOGRAD_INT8_8X8_WEIGHT_PREPROCESS) {
  1622. using namespace conv_bias;
  1623. std::vector<TestArg> args = get_quantized_winograd_mk_packed_args(8);
  1624. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  1625. handle());
  1626. UniformIntRNG rng{-50, 50};
  1627. checker.set_dtype(0, dtype::QuantizedS8(2.5f))
  1628. .set_dtype(1, dtype::QuantizedS8(2.5f))
  1629. .set_dtype(2, dtype::QuantizedS32(6.25f))
  1630. .set_dtype(4, dtype::QuantizedS8(60.25f))
  1631. .set_rng(0, &rng)
  1632. .set_rng(1, &rng)
  1633. .set_rng(2, &rng);
  1634. check_winograd("8:2:32", checker, args, param::MatrixMul::Format::MK8);
  1635. }
  1636. void checker_conv_bias(std::vector<conv_bias::TestArg> args, Handle* handle,
  1637. RNG* rng, float epsilon, DType type0, DType type1,
  1638. DType type2, DType type3, const char* algo_name) {
  1639. using namespace conv_bias;
  1640. Checker<ConvBias> checker(handle);
  1641. checker.set_before_exec_callback(
  1642. conv_bias::ConvBiasAlgoChecker<ConvBias>(algo_name));
  1643. checker.set_dtype(0, type0);
  1644. checker.set_dtype(1, type1);
  1645. checker.set_dtype(2, type2);
  1646. checker.set_dtype(4, type3);
  1647. checker.set_epsilon(epsilon);
  1648. if (NULL != rng) {
  1649. checker.set_rng(0, rng).set_rng(1, rng).set_rng(2, rng).set_rng(3, rng);
  1650. }
  1651. for (auto&& arg : args) {
  1652. checker.set_param(arg.param).execs(
  1653. {arg.src, arg.filter, arg.bias, {}, {}});
  1654. }
  1655. }
  1656. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_IM2COL_FP32_STRIDE2_PREPROCESS) {
  1657. #define cb(name) \
  1658. check_conv_bias_preprocess( \
  1659. get_conv_bias_args({1, 2, 3, 4, 5, 6, 7}, 2, false, false, false), \
  1660. handle(), nullptr, 0.001, dtype::Float32(), dtype::Float32(), \
  1661. dtype::Float32(), dtype::Float32(), name);
  1662. #if MEGDNN_AARCH64
  1663. cb("IM2COLMATMUL:AARCH64_F32K8X12X1") cb("IM2COLMATMUL:AARCH64_F32K4X16X1")
  1664. #elif MEGDNN_ARMV7
  1665. cb("IM2COLMATMUL:ARMV7_F32")
  1666. #endif
  1667. #undef cb
  1668. }
  1669. // clang-format off
  1670. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_IM2COL_FP32_STRIDE2) {
  1671. #define cb(name) \
  1672. check_conv_bias( \
  1673. get_conv_bias_args({1, 2, 3, 4, 5, 6, 7}, 2, false, false, false), \
  1674. handle(), name);
  1675. #if MEGDNN_AARCH64
  1676. cb("IM2COLMATMUL:AARCH64_F32K8X12X1")
  1677. cb("IM2COLMATMUL:AARCH64_F32K4X16X1")
  1678. cb("IM2COLMATMUL:FB_F32_K8X12X1")
  1679. #elif MEGDNN_ARMV7
  1680. cb("IM2COLMATMUL:ARMV7_F32")
  1681. #endif
  1682. #undef cb
  1683. }
  1684. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_IM2COL_FP32_STRIDE1_PREPROCESS) {
  1685. #define cb(name) \
  1686. check_conv_bias_preprocess( \
  1687. get_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, false, false), \
  1688. handle(), nullptr, 0.001, dtype::Float32(), dtype::Float32(), \
  1689. dtype::Float32(), dtype::Float32(), name);
  1690. #if MEGDNN_AARCH64
  1691. cb("IM2COLMATMUL:AARCH64_F32K8X12X1")
  1692. cb("IM2COLMATMUL:AARCH64_F32K4X16X1")
  1693. #elif MEGDNN_ARMV7
  1694. cb("IM2COLMATMUL:ARMV7_F32")
  1695. #endif
  1696. #undef cb
  1697. }
  1698. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_IM2COL_FP32_STRIDE1) {
  1699. #define cb(name) \
  1700. check_conv_bias( \
  1701. get_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, false, false), \
  1702. handle(), name);
  1703. #if MEGDNN_AARCH64
  1704. cb("IM2COLMATMUL:AARCH64_F32K8X12X1")
  1705. cb("IM2COLMATMUL:AARCH64_F32K4X16X1")
  1706. cb("IM2COLMATMUL:FB_F32_K8X12X1")
  1707. #elif MEGDNN_ARMV7
  1708. cb("IM2COLMATMUL:ARMV7_F32")
  1709. cb("IM2COLMATMUL:FB_F32_K8X12X1")
  1710. #endif
  1711. #undef cb
  1712. }
  1713. //! CPUINFO ralated test
  1714. #if MEGDNN_AARCH64
  1715. #if MGB_ENABLE_CPUINFO
  1716. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_IM2COL_FP32_A55) {
  1717. CpuInfoTmpReplace cpu_replace_guard(cpuinfo_uarch_cortex_a55);
  1718. #define cb(name,stride) \
  1719. check_conv_bias( \
  1720. get_conv_bias_args({2, 3, 4, 5, 6, 7}, stride, false, false, false), \
  1721. handle(), name);
  1722. cb("IM2COLMATMUL:AARCH64_F32K8X12X1", 1)
  1723. cb("IM2COLMATMUL:AARCH64_F32K8X12X1", 2)
  1724. #undef cb
  1725. }
  1726. #endif
  1727. #endif
  1728. #if MEGDNN_AARCH64
  1729. #if MGB_ENABLE_CPUINFO
  1730. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_IM2COL_FP32_A53) {
  1731. CpuInfoTmpReplace cpu_replace_guard(cpuinfo_uarch_cortex_a53);
  1732. #define cb(name,stride) \
  1733. check_conv_bias( \
  1734. get_conv_bias_args({2, 3, 4, 5, 6, 7}, stride, false, false, false), \
  1735. handle(), name);
  1736. cb("IM2COLMATMUL:AARCH64_F32K8X12X1", 1)
  1737. cb("IM2COLMATMUL:AARCH64_F32K8X12X1", 2)
  1738. #undef cb
  1739. }
  1740. #endif
  1741. #endif
  1742. #if MEGDNN_AARCH64
  1743. #if MGB_ENABLE_CPUINFO
  1744. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COL_MK4_PACK_F32_A55) {
  1745. CpuInfoTmpReplace cpu_replace_guard(cpuinfo_uarch_cortex_a55);
  1746. using namespace conv_bias;
  1747. std::vector<conv_bias::TestArg> args = get_nchw44_conv_bias_args(
  1748. {2, 3, 7}, 1, false, false, false, false, false, true, true);
  1749. check_conv_bias(args, handle(), "IM2COLMATMUL:AARCH64_F32_MK4_K8X12X1");
  1750. args = get_nchw44_conv_bias_args(
  1751. {2, 3, 7}, 2, false, false, false, false, false, true, true);
  1752. check_conv_bias(args, handle(), "IM2COLMATMUL:AARCH64_F32_MK4_K8X12X1");
  1753. }
  1754. #endif
  1755. #endif
  1756. #if MEGDNN_AARCH64
  1757. #if MGB_ENABLE_CPUINFO
  1758. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COL_MK4_PACK_F32_A53) {
  1759. CpuInfoTmpReplace cpu_replace_guard(cpuinfo_uarch_cortex_a53);
  1760. using namespace conv_bias;
  1761. std::vector<conv_bias::TestArg> args = get_nchw44_conv_bias_args(
  1762. {2, 3, 7}, 1, false, false, false, false, false, true, true);
  1763. check_conv_bias(args, handle(), "IM2COLMATMUL:AARCH64_F32_MK4_K8X12X1");
  1764. args = get_nchw44_conv_bias_args(
  1765. {2, 3, 7}, 2, false, false, false, false, false, true, true);
  1766. check_conv_bias(args, handle(), "IM2COLMATMUL:AARCH64_F32_MK4_K8X12X1");
  1767. }
  1768. #endif
  1769. #endif
  1770. #if MEGDNN_AARCH64
  1771. #if MGB_ENABLE_CPUINFO
  1772. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_MK4_PACK_F32_A55) {
  1773. CpuInfoTmpReplace cpu_replace_guard(cpuinfo_uarch_cortex_a55);
  1774. using namespace conv_bias;
  1775. std::vector<conv_bias::TestArg> args =
  1776. get_nchw44_conv_bias_args({1}, 1, true, false, false);
  1777. check_conv_bias(args, handle(), "CONV1x1:AARCH64_F32_MK4_K8X12X1:24");
  1778. }
  1779. #endif
  1780. #endif
  1781. #if MEGDNN_AARCH64
  1782. #if MGB_ENABLE_CPUINFO
  1783. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_MK4_PACK_F32_A53) {
  1784. CpuInfoTmpReplace cpu_replace_guard(cpuinfo_uarch_cortex_a53);
  1785. using namespace conv_bias;
  1786. std::vector<conv_bias::TestArg> args =
  1787. get_nchw44_conv_bias_args({1}, 1, true, false, false);
  1788. check_conv_bias(args, handle(), "CONV1x1:AARCH64_F32_MK4_K8X12X1:24");
  1789. }
  1790. #endif
  1791. #endif
  1792. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_QUANTIZEDSYM) {
  1793. UniformIntRNG rng{-50, 50};
  1794. #define cb(name) \
  1795. checker_conv_bias(get_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, false, \
  1796. false, true, true), \
  1797. handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
  1798. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
  1799. dtype::QuantizedS8(60.25f), name); \
  1800. checker_conv_bias( \
  1801. get_conv_bias_args({1}, 2, false, false, false, true, true), \
  1802. handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
  1803. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
  1804. dtype::QuantizedS8(60.25f), name);
  1805. float epsilon = 0.001;
  1806. #if MEGDNN_AARCH64
  1807. #if __ARM_FEATURE_DOTPROD
  1808. cb("IM2COLMATMUL:AARCH64_INT8X8X32_K8X12X4_DOTPROD");
  1809. #else
  1810. cb("IM2COLMATMUL:AARCH64_INT8X8X32_K8X8X8");
  1811. cb("IM2COLMATMUL:AARCH64_INT8X8X32_K4X4X16");
  1812. #endif
  1813. #elif MEGDNN_ARMV7
  1814. epsilon = 1;
  1815. cb("IM2COLMATMUL:ARMV7_INT8X8X32_K4X8X8");
  1816. #endif
  1817. #undef cb
  1818. }
  1819. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_QUANTIZEDSYM_PREPROCESS) {
  1820. UniformIntRNG rng{-50, 50};
  1821. #define cb(name) \
  1822. check_conv_bias_preprocess(get_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, false, \
  1823. false, true, true), \
  1824. handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
  1825. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
  1826. dtype::QuantizedS8(60.25f), name); \
  1827. check_conv_bias_preprocess( \
  1828. get_conv_bias_args({1}, 2, false, false, false, true, true), \
  1829. handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
  1830. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
  1831. dtype::QuantizedS8(60.25f), name);
  1832. float epsilon = 0.001;
  1833. #if MEGDNN_AARCH64
  1834. #if __ARM_FEATURE_DOTPROD
  1835. cb("IM2COLMATMUL:AARCH64_INT8X8X32_K8X12X4_DOTPROD");
  1836. #else
  1837. cb("IM2COLMATMUL:AARCH64_INT8X8X32_K8X8X8");
  1838. cb("IM2COLMATMUL:AARCH64_INT8X8X32_K4X4X16");
  1839. #endif
  1840. #elif MEGDNN_ARMV7
  1841. epsilon = 1;
  1842. cb("IM2COLMATMUL:ARMV7_INT8X8X32_K4X8X8");
  1843. #endif
  1844. #undef cb
  1845. }
  1846. #if __ARM_FEATURE_DOTPROD
  1847. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_QUANTIZEDSYM_MK4_DOT) {
  1848. UniformIntRNG rng{-50, 50};
  1849. #define cb(name) \
  1850. checker_conv_bias(get_nchw44_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, \
  1851. false, false, false, true), \
  1852. handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
  1853. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
  1854. dtype::QuantizedS8(60.25f), name); \
  1855. checker_conv_bias( \
  1856. get_nchw44_conv_bias_args({1}, 2, false, true, true, false, true), \
  1857. handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
  1858. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
  1859. dtype::QuantizedS8(60.25f), name);
  1860. float epsilon = 0.001;
  1861. #if MEGDNN_AARCH64
  1862. cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_8X12X4_DOTPROD:96");
  1863. #elif MEGDNN_ARMV7
  1864. cb("IM2COLMATMUL:AARCH32_INT8_MK4_8X4X4_DOTPROD:96");
  1865. #endif
  1866. #undef cb
  1867. }
  1868. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_QUANTIZEDSYM_MK4_DOT_PREPROCESS) {
  1869. UniformIntRNG rng{-50, 50};
  1870. #define cb(name) \
  1871. check_conv_bias_preprocess(get_nchw44_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, \
  1872. false, false, false, true), \
  1873. handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
  1874. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
  1875. dtype::QuantizedS8(60.25f), name); \
  1876. checker_conv_bias( \
  1877. get_nchw44_conv_bias_args({1}, 2, false, true, true, false, true), \
  1878. handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
  1879. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
  1880. dtype::QuantizedS8(60.25f), name);
  1881. float epsilon = 0.001;
  1882. #if MEGDNN_AARCH64
  1883. cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_8X12X4_DOTPROD:96");
  1884. #elif MEGDNN_ARMV7
  1885. cb("IM2COLMATMUL:AARCH32_INT8_MK4_8X4X4_DOTPROD:96");
  1886. #endif
  1887. #undef cb
  1888. }
  1889. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_QUANTIZEDSYM_MK4_DOT_S2_FUSE) {
  1890. UniformIntRNG rng{-50, 50};
  1891. #define cb(name) \
  1892. checker_conv_bias(get_nchw44_conv_bias_args({3}, 2, false, \
  1893. false, false, false, true), \
  1894. handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
  1895. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
  1896. dtype::QuantizedS8(60.25f), name); \
  1897. float epsilon = 0.001;
  1898. #if MEGDNN_AARCH64
  1899. cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_8X12X4_DOTPROD:96");
  1900. #elif MEGDNN_ARMV7
  1901. cb("IM2COLMATMUL:AARCH32_INT8_MK4_8X4X4_DOTPROD:96");
  1902. #endif
  1903. #undef cb
  1904. }
  1905. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_S8x8x32_MK4_DOT) {
  1906. UniformIntRNG rng{-50, 50};
  1907. #define cb(name) \
  1908. checker_conv_bias( \
  1909. get_nchw44_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, true, \
  1910. true, false, true, true, false, false), \
  1911. handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
  1912. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), {}, name); \
  1913. checker_conv_bias( \
  1914. get_nchw44_conv_bias_args({1}, 2, false, true, true, false, true, \
  1915. true, false, false), \
  1916. handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
  1917. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), {}, name);
  1918. float epsilon = 0.001;
  1919. #if MEGDNN_AARCH64
  1920. cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_8X12X4_DOTPROD:96");
  1921. #elif MEGDNN_ARMV7
  1922. cb("IM2COLMATMUL:AARCH32_INT8_MK4_8X4X4_DOTPROD:96");
  1923. #endif
  1924. #undef cb
  1925. }
  1926. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_S8x8x32_MK4_DOT_PREPROCESS) {
  1927. UniformIntRNG rng{-50, 50};
  1928. #define cb(name) \
  1929. check_conv_bias_preprocess( \
  1930. get_nchw44_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, false, \
  1931. true, false, true, false, false, true), \
  1932. handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
  1933. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), {}, name); \
  1934. check_conv_bias_preprocess( \
  1935. get_nchw44_conv_bias_args({1}, 2, false, true, true, false, true, \
  1936. false, false, true), \
  1937. handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
  1938. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), {}, name);
  1939. float epsilon = 0.001;
  1940. #if MEGDNN_AARCH64
  1941. cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_8X12X4_DOTPROD:96");
  1942. #elif MEGDNN_ARMV7
  1943. cb("IM2COLMATMUL:AARCH32_INT8_MK4_8X4X4_DOTPROD:96");
  1944. #endif
  1945. #undef cb
  1946. }
  1947. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_INT8x8x32_MK4_DOT) {
  1948. UniformIntRNG rng{-50, 50};
  1949. #define cb(name) \
  1950. checker_conv_bias( \
  1951. get_nchw44_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, true, \
  1952. true, false, true, true, false, false), \
  1953. handle(), &rng, epsilon, dtype::Int8(), dtype::Int8(), \
  1954. dtype::Int32(), {}, name); \
  1955. checker_conv_bias( \
  1956. get_nchw44_conv_bias_args({1}, 2, false, true, true, false, true, \
  1957. true, false, false), \
  1958. handle(), &rng, epsilon, dtype::Int8(), dtype::Int8(), \
  1959. dtype::Int32(), {}, name);
  1960. float epsilon = 0.001;
  1961. #if MEGDNN_AARCH64
  1962. cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_8X12X4_DOTPROD:96");
  1963. #elif MEGDNN_ARMV7
  1964. cb("IM2COLMATMUL:AARCH32_INT8_MK4_8X4X4_DOTPROD:96");
  1965. #endif
  1966. #undef cb
  1967. }
  1968. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_INT8x8x32_MK4_DOT_PREPROCESS) {
  1969. UniformIntRNG rng{-50, 50};
  1970. #define cb(name) \
  1971. check_conv_bias_preprocess( \
  1972. get_nchw44_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, false, \
  1973. true, false, true, false, false, true), \
  1974. handle(), &rng, epsilon, dtype::Int8(), dtype::Int8(), \
  1975. dtype::Int32(), {}, name); \
  1976. check_conv_bias_preprocess( \
  1977. get_nchw44_conv_bias_args({1}, 2, false, true, true, false, true, \
  1978. false, false, true), \
  1979. handle(), &rng, epsilon, dtype::Int8(), dtype::Int8(), \
  1980. dtype::Int32(), {}, name);
  1981. float epsilon = 0.001;
  1982. #if MEGDNN_AARCH64
  1983. cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_8X12X4_DOTPROD:96");
  1984. #elif MEGDNN_ARMV7
  1985. cb("IM2COLMATMUL:AARCH32_INT8_MK4_8X4X4_DOTPROD:96");
  1986. #endif
  1987. #undef cb
  1988. }
  1989. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_CONV1x1_QUANTIZEDSYM_MK4_DOT) {
  1990. UniformIntRNG rng{-50, 50};
  1991. #define cb(name) \
  1992. checker_conv_bias( \
  1993. get_nchw44_conv_bias_args({1}, 1, true, true, false, false, true), \
  1994. handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
  1995. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
  1996. dtype::QuantizedS8(60.25f), name); \
  1997. checker_conv_bias( \
  1998. get_nchw44_conv_bias_args({1}, 1, true, true, true, false, true, \
  1999. false, false, true), \
  2000. handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
  2001. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), {}, name); \
  2002. checker_conv_bias( \
  2003. get_nchw44_conv_bias_args({1}, 1, true, true, true, false, true, \
  2004. false, false, true), \
  2005. handle(), &rng, epsilon, dtype::Int8(), dtype::Int8(), \
  2006. dtype::Int32(), {}, name);
  2007. float epsilon = 0.001;
  2008. #if MEGDNN_AARCH64
  2009. cb("CONV1x1:AARCH64_INT8X8X32_MK4_8X12X4_DOTPROD");
  2010. #elif MEGDNN_ARMV7
  2011. cb("CONV1x1:AARCH32_INT8_MK4_8X4X4_DOTPROD");
  2012. #endif
  2013. #undef cb
  2014. }
  2015. #endif
  2016. // clang-format on
  2017. #if MEGDNN_AARCH64 || MEGDNN_ARMV7
  2018. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_QUANTIZEDASYM) {
  2019. NormalRNG rng(128.f);
  2020. #define cb(name) \
  2021. checker_conv_bias(get_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, false, \
  2022. false, true, true), \
  2023. handle(), &rng, epsilon, \
  2024. dtype::Quantized8Asymm(1.2f, (uint8_t)125), \
  2025. dtype::Quantized8Asymm(1.3f, (uint8_t)129), \
  2026. dtype::QuantizedS32(1.2 * 1.3), \
  2027. dtype::Quantized8Asymm(50.3f, (uint8_t)120), name); \
  2028. checker_conv_bias( \
  2029. get_conv_bias_args({1}, 2, false, false, false, true, true), \
  2030. handle(), &rng, epsilon, \
  2031. dtype::Quantized8Asymm(1.2f, (uint8_t)125), \
  2032. dtype::Quantized8Asymm(1.3f, (uint8_t)129), \
  2033. dtype::QuantizedS32(1.2 * 1.3), \
  2034. dtype::Quantized8Asymm(50.3f, (uint8_t)120), name);
  2035. float epsilon = 0.001;
  2036. #if MEGDNN_AARCH64
  2037. #if __ARM_FEATURE_DOTPROD
  2038. cb("IM2COLMATMUL:AARCH64_QUINT8_K8X8X4_DOTPROD");
  2039. #else
  2040. cb("IM2COLMATMUL:AARCH64_QUINT8_K8X8X8");
  2041. #endif
  2042. #elif MEGDNN_ARMV7
  2043. epsilon = 1;
  2044. cb("IM2COLMATMUL:ARMV7_QUINT8_K4X8X8");
  2045. #endif
  2046. #undef cb
  2047. }
  2048. TEST_F(ARM_COMMON_MULTI_THREADS,
  2049. CONV_BIAS_IM2COLMATMUL_QUANTIZEDASYM_FILTERPREPROCESS) {
  2050. NormalRNG rng(128.f);
  2051. #define cb(name) \
  2052. check_conv_bias_preprocess( \
  2053. get_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, false, false, \
  2054. true, true), \
  2055. handle(), &rng, epsilon, \
  2056. dtype::Quantized8Asymm(1.2f, (uint8_t)125), \
  2057. dtype::Quantized8Asymm(1.3f, (uint8_t)129), \
  2058. dtype::QuantizedS32(1.2 * 1.3), \
  2059. dtype::Quantized8Asymm(50.3f, (uint8_t)120), name); \
  2060. check_conv_bias_preprocess( \
  2061. get_conv_bias_args({1}, 2, false, false, false, true, true), \
  2062. handle(), &rng, epsilon, \
  2063. dtype::Quantized8Asymm(1.2f, (uint8_t)125), \
  2064. dtype::Quantized8Asymm(1.3f, (uint8_t)129), \
  2065. dtype::QuantizedS32(1.2 * 1.3), \
  2066. dtype::Quantized8Asymm(50.3f, (uint8_t)120), name);
  2067. float epsilon = 0.001;
  2068. #if MEGDNN_AARCH64
  2069. #if __ARM_FEATURE_DOTPROD
  2070. cb("IM2COLMATMUL:AARCH64_QUINT8_K8X8X4_DOTPROD");
  2071. #else
  2072. cb("IM2COLMATMUL:AARCH64_QUINT8_K8X8X8");
  2073. #endif
  2074. #elif MEGDNN_ARMV7
  2075. epsilon = 1;
  2076. cb("IM2COLMATMUL:ARMV7_QUINT8_K4X8X8");
  2077. #endif
  2078. #undef cb
  2079. }
  2080. #endif
  2081. #if MEGDNN_AARCH64 || MEGDNN_ARMV7
  2082. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_QUINT8x8x32) {
  2083. UniformIntRNG rng{-50, 50};
  2084. float epsilon = 0.001;
  2085. #define cb(name) \
  2086. checker_conv_bias(get_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, false, \
  2087. true, true, false), \
  2088. handle(), &rng, epsilon, \
  2089. dtype::Quantized8Asymm(1.2f, (uint8_t)125), \
  2090. dtype::Quantized8Asymm(1.3f, (uint8_t)129), \
  2091. dtype::QuantizedS32(1.2 * 1.3), {}, name); \
  2092. checker_conv_bias( \
  2093. get_conv_bias_args({1}, 2, false, false, true, true, false), \
  2094. handle(), &rng, epsilon, \
  2095. dtype::Quantized8Asymm(1.2f, (uint8_t)125), \
  2096. dtype::Quantized8Asymm(1.3f, (uint8_t)129), \
  2097. dtype::QuantizedS32(1.2 * 1.3), {}, name);
  2098. #if MEGDNN_AARCH64
  2099. #if __ARM_FEATURE_DOTPROD
  2100. cb("IM2COLMATMUL:AARCH64_QUINT8_K8X8X4_DOTPROD");
  2101. #else
  2102. cb("IM2COLMATMUL:AARCH64_QUINT8_K8X8X8");
  2103. #endif
  2104. #elif MEGDNN_ARMV7
  2105. #if __ARM_FEATURE_DOTPROD
  2106. cb("IM2COLMATMUL:AARCH32_QUINT8_K4X8X4");
  2107. #endif
  2108. cb("IM2COLMATMUL:ARMV7_QUINT8_K4X8X8");
  2109. #endif
  2110. #undef cb
  2111. }
  2112. TEST_F(ARM_COMMON_MULTI_THREADS,
  2113. CONV_BIAS_IM2COLMATMUL_QUINT8x8x32_FILTERPREPROCESS) {
  2114. UniformIntRNG rng{-50, 50};
  2115. float epsilon = 0.001;
  2116. #define cb(name) \
  2117. check_conv_bias_preprocess( \
  2118. get_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, true, true), \
  2119. handle(), &rng, epsilon, \
  2120. dtype::Quantized8Asymm(1.2f, (uint8_t)125), \
  2121. dtype::Quantized8Asymm(1.3f, (uint8_t)129), \
  2122. dtype::QuantizedS32(1.2 * 1.3), {}, name); \
  2123. check_conv_bias_preprocess(get_conv_bias_args({1}, 2, false, true, true), \
  2124. handle(), &rng, epsilon, \
  2125. dtype::Quantized8Asymm(1.2f, (uint8_t)125), \
  2126. dtype::Quantized8Asymm(1.3f, (uint8_t)129), \
  2127. dtype::QuantizedS32(1.2 * 1.3), {}, name);
  2128. #if MEGDNN_AARCH64
  2129. #if __ARM_FEATURE_DOTPROD
  2130. cb("IM2COLMATMUL:AARCH64_QUINT8_K8X8X4_DOTPROD");
  2131. #else
  2132. cb("IM2COLMATMUL:AARCH64_QUINT8_K8X8X8");
  2133. #endif
  2134. #elif MEGDNN_ARMV7
  2135. #if __ARM_FEATURE_DOTPROD
  2136. cb("IM2COLMATMUL:AARCH32_QUINT8_K4X8X4");
  2137. #endif
  2138. cb("IM2COLMATMUL:ARMV7_QUINT8_K4X8X8");
  2139. #endif
  2140. #undef cb
  2141. }
  2142. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_IM2COLMATMUL_INT8x8x16) {
  2143. UniformIntRNG rng{-50, 50};
  2144. float epsilon = 0.001;
  2145. std::vector<conv_bias::TestArg> args_nchw44 =
  2146. get_nchw44_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, true, false, true,
  2147. false, false, true, false, false);
  2148. std::vector<conv_bias::TestArg> args_nchw44_1x1s2 =
  2149. get_nchw44_conv_bias_args({1}, 2, true, false, true, false, false,
  2150. true, false, false);
  2151. #define cb(name) \
  2152. checker_conv_bias( \
  2153. get_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, false, true), \
  2154. handle(), &rng, epsilon, dtype::Int8{}, dtype::Int8{}, \
  2155. dtype::Int16{}, dtype::Int16{}, name); \
  2156. checker_conv_bias(get_conv_bias_args({1}, 2, false, false, true), \
  2157. handle(), &rng, epsilon, dtype::Int8{}, dtype::Int8{}, \
  2158. dtype::Int16{}, dtype::Int16{}, name);
  2159. #define cb_nchw44(name) \
  2160. checker_conv_bias(args_nchw44, handle(), &rng, epsilon, dtype::Int8{}, \
  2161. dtype::Int8{}, dtype::Int16{}, dtype::Int16{}, name); \
  2162. checker_conv_bias(args_nchw44_1x1s2, handle(), &rng, epsilon, \
  2163. dtype::Int8{}, dtype::Int8{}, dtype::Int16{}, \
  2164. dtype::Int16{}, name);
  2165. #if MEGDNN_AARCH64
  2166. cb("IM2COLMATMUL:AARCH64_INT8X8X16_K8X8X8");
  2167. cb("IM2COLMATMUL:AARCH64_INT8X8X16_K4X4X16");
  2168. cb_nchw44("IM2COLMATMUL:AARCH64_INT8X8X16_MK4_4X4X8");
  2169. cb_nchw44("IM2COLMATMUL:AARCH64_INT8X8X16_MK4_16X12X4");
  2170. #elif MEGDNN_ARMV7
  2171. cb("IM2COLMATMUL:ARMV7_INT8X8X16_K4X8X8");
  2172. cb("IM2COLMATMUL:ARMV7_INT8X8X16_K4X2X16");
  2173. cb_nchw44("IM2COLMATMUL:ARMV7_INT8X8X16_MK4_K8X8X4");
  2174. #endif
  2175. cb("IM2COLMATMUL:ARM_COMMON_INT8X8X16");
  2176. #undef cb
  2177. #undef cb_nchw44
  2178. }
  2179. TEST_F(ARM_COMMON_MULTI_THREADS,
  2180. CONVBIAS_IM2COLMATMUL_INT8x8x16_FILTERPREPROCESS) {
  2181. UniformIntRNG rng{-50, 50};
  2182. float epsilon = 0.001;
  2183. #define cb(name) \
  2184. check_conv_bias_preprocess( \
  2185. get_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, true, true), \
  2186. handle(), &rng, epsilon, dtype::Int8{}, dtype::Int8{}, \
  2187. dtype::Int16{}, dtype::Int16{}, name); \
  2188. check_conv_bias_preprocess(get_conv_bias_args({1}, 2, false, true, true), \
  2189. handle(), &rng, epsilon, dtype::Int8{}, \
  2190. dtype::Int8{}, dtype::Int16{}, dtype::Int16{}, \
  2191. name);
  2192. #if MEGDNN_AARCH64
  2193. cb("IM2COLMATMUL:AARCH64_INT8X8X16_K8X8X8");
  2194. cb("IM2COLMATMUL:AARCH64_INT8X8X16_K4X4X16");
  2195. #elif MEGDNN_ARMV7
  2196. cb("IM2COLMATMUL:ARMV7_INT8X8X16_K4X8X8");
  2197. cb("IM2COLMATMUL:ARMV7_INT8X8X16_K4X2X16");
  2198. #endif
  2199. #undef cb
  2200. }
  2201. TEST_F(ARM_COMMON_MULTI_THREADS,
  2202. CONVBIAS_IM2COLMATMUL_INT8x8x16_NOPACK_FILTERPREPROCESS) {
  2203. UniformIntRNG rng{-50, 50};
  2204. float epsilon = 0.001;
  2205. #define cb(name) \
  2206. check_conv_bias_preprocess( \
  2207. get_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, false, true), \
  2208. handle(), &rng, epsilon, dtype::Int8{}, dtype::Int8{}, \
  2209. dtype::Int16{}, dtype::Int16{}, name); \
  2210. check_conv_bias_preprocess(get_conv_bias_args({1}, 2, false, false, true), \
  2211. handle(), &rng, epsilon, dtype::Int8{}, \
  2212. dtype::Int8{}, dtype::Int16{}, dtype::Int16{}, \
  2213. name);
  2214. #if MEGDNN_AARCH64
  2215. cb("IM2COLMATMUL:ARM_COMMON_INT8X8X16");
  2216. #elif MEGDNN_ARMV7
  2217. cb("IM2COLMATMUL:ARM_COMMON_INT8X8X16");
  2218. #endif
  2219. #undef cb
  2220. }
  2221. #endif
  2222. #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  2223. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_FP16) {
  2224. using namespace conv_bias;
  2225. param::ConvBias cur_param;
  2226. std::vector<conv_bias::TestArg> args =
  2227. get_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, false, false);
  2228. std::vector<conv_bias::TestArg> args1 =
  2229. get_conv_bias_args({1}, 2, false, false, false);
  2230. args.insert(args.begin(), args1.begin(), args1.end());
  2231. NormalRNG rng(1);
  2232. #define cb(name) \
  2233. checker_conv_bias(args, handle(), &rng, 0.03, dtype::Float16{}, \
  2234. dtype::Float16{}, dtype::Float16{}, dtype::Float16{}, \
  2235. name);
  2236. #if MEGDNN_AARCH64
  2237. cb("IM2COLMATMUL:AARCH64_F16_K8X24X1");
  2238. #elif MEGDNN_ARMV7
  2239. cb("IM2COLMATMUL:AARCH32_F16_K4X16X1");
  2240. #endif
  2241. #undef cb
  2242. }
  2243. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_FP16_FILTERPREPROCESS) {
  2244. using namespace conv_bias;
  2245. param::ConvBias cur_param;
  2246. std::vector<conv_bias::TestArg> args =
  2247. get_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, false, false);
  2248. std::vector<conv_bias::TestArg> args1 =
  2249. get_conv_bias_args({1}, 2, false, false, false);
  2250. args.insert(args.begin(), args1.begin(), args1.end());
  2251. NormalRNG rng(1);
  2252. #define cb(name) \
  2253. check_conv_bias_preprocess(args, handle(), &rng, 0.03, dtype::Float16{}, \
  2254. dtype::Float16{}, dtype::Float16{}, \
  2255. dtype::Float16{}, name);
  2256. #if MEGDNN_AARCH64
  2257. cb("IM2COLMATMUL:AARCH64_F16_K8X24X1");
  2258. #elif MEGDNN_ARMV7
  2259. cb("IM2COLMATMUL:AARCH32_F16_K4X16X1");
  2260. #endif
  2261. #undef cb
  2262. }
  2263. #endif
  2264. void checker_conv_bias_mul_int8x8x32(std::vector<conv_bias::TestArg> args,
  2265. Handle* handle, const char* algo_name) {
  2266. using namespace conv_bias;
  2267. Checker<ConvBias> checker(handle);
  2268. checker.set_before_exec_callback(
  2269. conv_bias::ConvBiasAlgoChecker<ConvBias>(algo_name));
  2270. checker.set_dtype(0, dtype::Int8());
  2271. checker.set_dtype(1, dtype::Int8());
  2272. checker.set_dtype(2, dtype::Int32());
  2273. checker.set_dtype(4, dtype::Int32());
  2274. for (auto&& arg : args) {
  2275. checker.set_param(arg.param).execs({arg.src, arg.filter, {}, {}, {}});
  2276. }
  2277. UniformIntRNG rng{-50, 50};
  2278. for (auto&& arg : args) {
  2279. checker.set_dtype(0, dtype::QuantizedS8(2.5f))
  2280. .set_dtype(1, dtype::QuantizedS8(2.5f))
  2281. .set_dtype(2, dtype::QuantizedS32(6.25f))
  2282. .set_dtype(4, dtype::QuantizedS32(6.25f))
  2283. .set_rng(0, &rng)
  2284. .set_rng(1, &rng)
  2285. .set_rng(2, &rng)
  2286. .set_param(arg.param)
  2287. .execs({arg.src, arg.filter, {}, {}, {}});
  2288. }
  2289. }
  2290. void checker_conv_bias_int8x8x32_preprocess(
  2291. std::vector<conv_bias::TestArg> args, Handle* handle,
  2292. const char* algo_name) {
  2293. using namespace conv_bias;
  2294. Checker<ConvBiasForward, OprWeightPreprocessProxy<ConvBiasForward>> checker(
  2295. handle);
  2296. checker.set_before_exec_callback(
  2297. conv_bias::ConvBiasAlgoChecker<ConvBias>(algo_name));
  2298. checker.set_dtype(0, dtype::Int8());
  2299. checker.set_dtype(1, dtype::Int8());
  2300. checker.set_dtype(2, dtype::Int32());
  2301. checker.set_dtype(4, dtype::Int32());
  2302. for (auto&& arg : args) {
  2303. checker.set_param(arg.param).execs({arg.src, arg.filter, {}, {}, {}});
  2304. }
  2305. UniformIntRNG rng{-50, 50};
  2306. for (auto&& arg : args) {
  2307. checker.set_dtype(0, dtype::QuantizedS8(2.5f))
  2308. .set_dtype(1, dtype::QuantizedS8(2.5f))
  2309. .set_dtype(2, dtype::QuantizedS32(6.25f))
  2310. .set_dtype(4, dtype::QuantizedS32(6.25f))
  2311. .set_rng(0, &rng)
  2312. .set_rng(1, &rng)
  2313. .set_rng(2, &rng)
  2314. .set_param(arg.param)
  2315. .execs({arg.src, arg.filter, {}, {}, {}});
  2316. }
  2317. }
  2318. #if MEGDNN_AARCH64 || MEGDNN_ARMV7
  2319. #if !__ARM_FEATURE_DOTPROD
  2320. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_INT8x8x32NCHW44_S2) {
  2321. using namespace conv_bias;
  2322. std::vector<conv_bias::TestArg> args =
  2323. get_nchw44_conv_bias_args({2, 5, 7}, 2, false, false, true);
  2324. #define cb(name) checker_conv_bias_mul_int8x8x32(args, handle(), name);
  2325. #if MEGDNN_AARCH64
  2326. cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_4X4X16:96");
  2327. #else
  2328. cb("IM2COLMATMUL:ARMV7_INT8X8X32_MK4_4X2X16:96");
  2329. #endif
  2330. #undef cb
  2331. }
  2332. TEST_F(ARM_COMMON_MULTI_THREADS,
  2333. CONV_BIAS_IM2COLMATMUL_INT8x8x32NCHW44_S2_PREPROCESS) {
  2334. using namespace conv_bias;
  2335. std::vector<conv_bias::TestArg> args =
  2336. get_nchw44_conv_bias_args({2, 5, 7}, 2, false, false, true);
  2337. #define cb(name) checker_conv_bias_int8x8x32_preprocess(args, handle(), name);
  2338. #if MEGDNN_AARCH64
  2339. cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_4X4X16:96");
  2340. #else
  2341. cb("IM2COLMATMUL:ARMV7_INT8X8X32_MK4_4X2X16:96");
  2342. #endif
  2343. #undef cb
  2344. }
  2345. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_INT8x8x32NCHW44_S1) {
  2346. using namespace conv_bias;
  2347. std::vector<conv_bias::TestArg> args =
  2348. get_nchw44_conv_bias_args({3, 4, 6}, 1, false, false, true);
  2349. #define cb(name) checker_conv_bias_mul_int8x8x32(args, handle(), name);
  2350. #if MEGDNN_AARCH64
  2351. cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_4X4X16:96");
  2352. #else
  2353. cb("IM2COLMATMUL:ARMV7_INT8X8X32_MK4_4X2X16:96");
  2354. #endif
  2355. #undef cb
  2356. }
  2357. TEST_F(ARM_COMMON_MULTI_THREADS,
  2358. CONV_BIAS_IM2COLMATMUL_INT8x8x32NCHW44_S1_PREPROCESS) {
  2359. using namespace conv_bias;
  2360. std::vector<conv_bias::TestArg> args =
  2361. get_nchw44_conv_bias_args({3, 4, 6}, 1, false, true, true);
  2362. #define cb(name) checker_conv_bias_int8x8x32_preprocess(args, handle(), name);
  2363. #if MEGDNN_AARCH64
  2364. cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_4X4X16:96");
  2365. #else
  2366. cb("IM2COLMATMUL:ARMV7_INT8X8X32_MK4_4X2X16:96");
  2367. #endif
  2368. #undef cb
  2369. }
  2370. TEST_F(ARM_COMMON_MULTI_THREADS,
  2371. CONV_BIAS_IM2COLMATMUL_QUANTIZEDSYM_NCHW44_S2) {
  2372. UniformIntRNG rng{-50, 50};
  2373. #define cb(name) \
  2374. checker_conv_bias(get_nchw44_conv_bias_args({3, 4, 6}, 2), handle(), &rng, \
  2375. epsilon, dtype::QuantizedS8(2.5f), \
  2376. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
  2377. dtype::QuantizedS8(60.25f), name);
  2378. float epsilon = 0.001;
  2379. #if MEGDNN_AARCH64
  2380. cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_4X4X16:96");
  2381. #else
  2382. cb("IM2COLMATMUL:ARMV7_INT8X8X32_MK4_4X2X16:96");
  2383. #endif
  2384. #undef cb
  2385. }
  2386. TEST_F(ARM_COMMON_MULTI_THREADS,
  2387. CONV_BIAS_IM2COLMATMUL_QUANTIZEDSYM_NCHW44_S2_PREPROCESS) {
  2388. UniformIntRNG rng{-50, 50};
  2389. #define cb(name) \
  2390. check_conv_bias_preprocess( \
  2391. get_nchw44_conv_bias_args({3, 4, 6}, 2), handle(), &rng, epsilon, \
  2392. dtype::QuantizedS8(2.5f), dtype::QuantizedS8(2.5f), \
  2393. dtype::QuantizedS32(6.25f), dtype::QuantizedS8(60.25f), name);
  2394. float epsilon = 0.001;
  2395. #if MEGDNN_AARCH64
  2396. cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_4X4X16:96");
  2397. #else
  2398. cb("IM2COLMATMUL:ARMV7_INT8X8X32_MK4_4X2X16:96");
  2399. #endif
  2400. #undef cb
  2401. }
  2402. TEST_F(ARM_COMMON_MULTI_THREADS,
  2403. CONV_BIAS_IM2COLMATMUL_QUANTIZEDSYM_NCHW44_S1) {
  2404. UniformIntRNG rng{-50, 50};
  2405. #define cb(name) \
  2406. checker_conv_bias(get_nchw44_conv_bias_args({2, 5, 7}, 1), handle(), &rng, \
  2407. epsilon, dtype::QuantizedS8(2.5f), \
  2408. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
  2409. dtype::QuantizedS8(60.25f), name);
  2410. float epsilon = 0.001;
  2411. #if MEGDNN_AARCH64
  2412. cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_4X4X16:96");
  2413. #else
  2414. cb("IM2COLMATMUL:ARMV7_INT8X8X32_MK4_4X2X16:96");
  2415. #endif
  2416. #undef cb
  2417. }
  2418. TEST_F(ARM_COMMON_MULTI_THREADS,
  2419. CONV_BIAS_IM2COLMATMUL_QUANTIZEDSYM_NCHW44_S1_PREPROCESS) {
  2420. UniformIntRNG rng{-50, 50};
  2421. #define cb(name) \
  2422. check_conv_bias_preprocess( \
  2423. get_nchw44_conv_bias_args({2, 5, 7}, 1), handle(), &rng, epsilon, \
  2424. dtype::QuantizedS8(2.5f), dtype::QuantizedS8(2.5f), \
  2425. dtype::QuantizedS32(6.25f), dtype::QuantizedS8(60.25f), name);
  2426. float epsilon = 0.001;
  2427. #if MEGDNN_AARCH64
  2428. cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_4X4X16:96");
  2429. #else
  2430. cb("IM2COLMATMUL:ARMV7_INT8X8X32_MK4_4X2X16:96");
  2431. #endif
  2432. #undef cb
  2433. }
  2434. #if MEGDNN_AARCH64
  2435. TEST_F(ARM_COMMON_MULTI_THREADS,
  2436. CONV_BIAS_IM2COLMATMUL_QUANTIZEDSYM_NCHW44_FUSE) {
  2437. UniformIntRNG rng{-50, 50};
  2438. #define cb(name) \
  2439. checker_conv_bias(get_nchw44_conv_bias_args({3}, 1), handle(), &rng, \
  2440. epsilon, dtype::QuantizedS8(2.5f), \
  2441. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
  2442. dtype::QuantizedS8(60.25f), name);
  2443. float epsilon = 0.001;
  2444. cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_4X4X16:96");
  2445. #undef cb
  2446. }
  2447. TEST_F(ARM_COMMON_MULTI_THREADS,
  2448. CONV_BIAS_IM2COLMATMUL_QUANTIZEDSYM_NCHW44_FUSE_PREPROCESS) {
  2449. UniformIntRNG rng{-50, 50};
  2450. #define cb(name) \
  2451. check_conv_bias_preprocess( \
  2452. get_nchw44_conv_bias_args({3}, 1), handle(), &rng, epsilon, \
  2453. dtype::QuantizedS8(2.5f), dtype::QuantizedS8(2.5f), \
  2454. dtype::QuantizedS32(6.25f), dtype::QuantizedS8(60.25f), name);
  2455. float epsilon = 0.001;
  2456. cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_4X4X16:96");
  2457. #undef cb
  2458. }
  2459. #endif
  2460. #endif
  2461. #endif
  2462. #if MEGDNN_AARCH64
  2463. #if __ARM_FEATURE_DOTPROD
  2464. TEST_F(ARM_COMMON_MULTI_THREADS,
  2465. CONV_BIAS_IM2COLMATMUL_QUANTIZEDSYM_NCHW44DOT_FUSE) {
  2466. UniformIntRNG rng{-50, 50};
  2467. #define cb(name) \
  2468. checker_conv_bias( \
  2469. get_nchw44_conv_bias_args({3}, 1, false, false, false, false, \
  2470. true, false, false, false), \
  2471. handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
  2472. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
  2473. dtype::QuantizedS8(60.25f), name);
  2474. float epsilon = 0.001;
  2475. cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_8X12X4_DOTPROD:96");
  2476. #undef cb
  2477. }
  2478. TEST_F(ARM_COMMON_MULTI_THREADS,
  2479. CONV_BIAS_IM2COLMATMUL_QUANTIZEDSYM_NCHW44DOT_FUSE_PREPROCESS) {
  2480. UniformIntRNG rng{-50, 50};
  2481. #define cb(name) \
  2482. check_conv_bias_preprocess( \
  2483. get_nchw44_conv_bias_args({3}, 1, false, false, false, false, \
  2484. true, false, false, false), \
  2485. handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
  2486. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
  2487. dtype::QuantizedS8(60.25f), name);
  2488. float epsilon = 0.001;
  2489. cb("IM2COLMATMUL:AARCH64_INT8X8X32_MK4_8X12X4_DOTPROD:96");
  2490. #undef cb
  2491. }
  2492. #endif
  2493. #endif
  2494. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COLMATMUL_INT8x8x32) {
  2495. using namespace conv_bias;
  2496. std::vector<conv_bias::TestArg> args =
  2497. get_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, true, true);
  2498. std::vector<conv_bias::TestArg> args1 =
  2499. get_conv_bias_args({1}, 2, false, true, true);
  2500. args.insert(args.begin(), args1.begin(), args1.end());
  2501. #define cb(name) checker_conv_bias_mul_int8x8x32(args, handle(), name);
  2502. #if MEGDNN_AARCH64
  2503. #if __ARM_FEATURE_DOTPROD
  2504. cb("IM2COLMATMUL:AARCH64_INT8X8X32_K8X12X4_DOTPROD");
  2505. #else
  2506. cb("IM2COLMATMUL:AARCH64_INT8X8X32_K8X8X8");
  2507. cb("IM2COLMATMUL:AARCH64_INT8X8X32_K4X4X16");
  2508. #endif
  2509. #elif MEGDNN_ARMV7
  2510. #if __ARM_FEATURE_DOTPROD
  2511. cb("IM2COLMATMUL:AARCH32_INT8_K6X8X4");
  2512. #endif
  2513. cb("IM2COLMATMUL:ARMV7_INT8X8X32_K4X8X8");
  2514. #endif
  2515. #if MEGDNN_ARMV7
  2516. cb("IM2COLMATMUL:ARMV7_INT8X8X32_K4X2X16");
  2517. #endif
  2518. #undef cb
  2519. }
  2520. TEST_F(ARM_COMMON_MULTI_THREADS,
  2521. CONV_BIAS_IM2COLMATMUL_INT8X8X32_FILTER_PREPROCESS) {
  2522. using namespace conv_bias;
  2523. std::vector<conv_bias::TestArg> args =
  2524. get_conv_bias_args({2, 3, 4, 5, 6, 7}, 1, false, true, true);
  2525. std::vector<conv_bias::TestArg> args1 =
  2526. get_conv_bias_args({1}, 2, false, true, true);
  2527. args.insert(args.begin(), args1.begin(), args1.end());
  2528. #define cb(name) checker_conv_bias_int8x8x32_preprocess(args, handle(), name);
  2529. #if MEGDNN_AARCH64
  2530. #if __ARM_FEATURE_DOTPROD
  2531. cb("IM2COLMATMUL:AARCH64_INT8X8X32_K8X12X4_DOTPROD");
  2532. #else
  2533. cb("IM2COLMATMUL:AARCH64_INT8X8X32_K8X8X8");
  2534. cb("IM2COLMATMUL:AARCH64_INT8X8X32_K4X4X16");
  2535. #endif
  2536. #elif MEGDNN_ARMV7
  2537. #if __ARM_FEATURE_DOTPROD
  2538. cb("IM2COLMATMUL:AARCH32_INT8_K6X8X4");
  2539. #endif
  2540. cb("IM2COLMATMUL:ARMV7_INT8X8X32_K4X8X8");
  2541. #endif
  2542. #if MEGDNN_ARMV7
  2543. cb("IM2COLMATMUL:ARMV7_INT8X8X32_K4X2X16");
  2544. #endif
  2545. #undef cb
  2546. }
  2547. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COL_S1_MK4_PACK_F32) {
  2548. using namespace conv_bias;
  2549. std::vector<conv_bias::TestArg> args = get_nchw44_conv_bias_args(
  2550. {2, 4, 7}, 1, false, false, false, false, false, true, true);
  2551. #if MEGDNN_AARCH64
  2552. check_conv_bias(args, handle(), "IM2COLMATMUL:AARCH64_F32_MK4_K8X12X1");
  2553. #elif MEGDNN_ARMV7
  2554. check_conv_bias(args, handle(), "IM2COLMATMUL:ARMV7_F32_MK4_PACK_4X12");
  2555. #endif
  2556. }
  2557. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COL_S1_MK4_PACK_F32_PREPROCESS) {
  2558. using namespace conv_bias;
  2559. std::vector<conv_bias::TestArg> args = get_nchw44_conv_bias_args(
  2560. {2, 4, 7}, 1, false, false, false, false, false, true, true);
  2561. #define cb(name) \
  2562. check_conv_bias_preprocess(args, handle(), nullptr, 0.001, \
  2563. dtype::Float32(), dtype::Float32(), \
  2564. dtype::Float32(), dtype::Float32(), name);
  2565. #if MEGDNN_AARCH64
  2566. cb("IM2COLMATMUL:AARCH64_F32_MK4_K8X12X1");
  2567. #elif MEGDNN_ARMV7
  2568. cb("IM2COLMATMUL:ARMV7_F32_MK4_PACK_4X12");
  2569. #endif
  2570. #undef cb
  2571. }
  2572. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COL_S2_MK4_PACK_F32) {
  2573. using namespace conv_bias;
  2574. std::vector<conv_bias::TestArg> args = get_nchw44_conv_bias_args(
  2575. {3, 5, 6}, 2, false, false, false, false, false, true, true);
  2576. #define cb(name) check_conv_bias(args, handle(), name);
  2577. #if MEGDNN_AARCH64
  2578. cb("IM2COLMATMUL:AARCH64_F32_MK4_K8X12X1");
  2579. #elif MEGDNN_ARMV7
  2580. cb("IM2COLMATMUL:ARMV7_F32_MK4_PACK_4X12");
  2581. #endif
  2582. #undef cb
  2583. }
  2584. TEST_F(ARM_COMMON_MULTI_THREADS,
  2585. CONV_BIAS_IM2COL_S2_MK4_PACK_F32_FUSE_PREPROCESS) {
  2586. using namespace conv_bias;
  2587. std::vector<conv_bias::TestArg> args = get_nchw44_conv_bias_args(
  2588. {3}, 2, false, false, false, false, false, true, true, false);
  2589. #define cb(name) \
  2590. check_conv_bias_preprocess(args, handle(), nullptr, 0.001, \
  2591. dtype::Float32(), dtype::Float32(), \
  2592. dtype::Float32(), dtype::Float32(), name);
  2593. #if MEGDNN_AARCH64
  2594. cb("IM2COLMATMUL:AARCH64_F32_MK4_K8X12X1");
  2595. #elif MEGDNN_ARMV7
  2596. cb("IM2COLMATMUL:ARMV7_F32_MK4_PACK_4X12");
  2597. #endif
  2598. #undef cb
  2599. }
  2600. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_IM2COL_S2_MK4_PACK_F32_FUSE) {
  2601. using namespace conv_bias;
  2602. std::vector<conv_bias::TestArg> args = get_nchw44_conv_bias_args(
  2603. {3}, 2, false, false, false, false, false, true, true, false);
  2604. #define cb(name) check_conv_bias(args, handle(), name);
  2605. #if MEGDNN_AARCH64
  2606. cb("IM2COLMATMUL:AARCH64_F32_MK4_K8X12X1");
  2607. #elif MEGDNN_ARMV7
  2608. cb("IM2COLMATMUL:ARMV7_F32_MK4_PACK_4X12");
  2609. #endif
  2610. #undef cb
  2611. }
  2612. /***************************** Conv1x1 Algo Test ***********************/
  2613. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_F32) {
  2614. using namespace conv_bias;
  2615. std::vector<conv_bias::TestArg> args = get_conv_bias_1x1_args(false, false);
  2616. #if MEGDNN_AARCH64
  2617. check_conv_bias(args, handle(), "CONV1x1:AARCH64_F32K8X12X1:24");
  2618. #elif MEGDNN_ARMV7
  2619. check_conv_bias(args, handle(), "CONV1x1:ARMV7_F32:48");
  2620. #endif
  2621. std::vector<conv_bias::TestArg> gemv_args;
  2622. for (auto&& arg : args)
  2623. if (arg.src.shape[2] == 1 && arg.src.shape[3] == 1) {
  2624. gemv_args.emplace_back(arg);
  2625. }
  2626. check_conv_bias(gemv_args, handle(), "CONV1x1_GEMV");
  2627. }
  2628. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_F32_PREPROCESS) {
  2629. using namespace conv_bias;
  2630. std::vector<conv_bias::TestArg> args = get_conv_bias_1x1_args(false, false);
  2631. #define cb(name) \
  2632. check_conv_bias_preprocess(args, handle(), nullptr, 0.001, \
  2633. dtype::Float32(), dtype::Float32(), \
  2634. dtype::Float32(), dtype::Float32(), name);
  2635. #if MEGDNN_AARCH64
  2636. cb("CONV1x1:AARCH64_F32K8X12X1:24");
  2637. #elif MEGDNN_ARMV7
  2638. cb("CONV1x1:ARMV7_F32:48");
  2639. #endif
  2640. #undef cb
  2641. }
  2642. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_MK4_PACK_F32) {
  2643. using namespace conv_bias;
  2644. std::vector<conv_bias::TestArg> args =
  2645. get_nchw44_conv_bias_args({1}, 1, true, false, false);
  2646. #if MEGDNN_AARCH64
  2647. check_conv_bias(args, handle(), "CONV1x1:AARCH64_F32_MK4_K8X12X1:24");
  2648. #elif MEGDNN_ARMV7
  2649. check_conv_bias(args, handle(), "CONV1x1:ARMV7_F32_MK4_PACK_4X12:24");
  2650. #endif
  2651. std::vector<conv_bias::TestArg> gemv_args;
  2652. for (auto&& arg : args)
  2653. if (arg.src.shape[2] == 1 && arg.src.shape[3] == 1) {
  2654. gemv_args.emplace_back(arg);
  2655. }
  2656. check_conv_bias(gemv_args, handle(), "CONV1x1_GEMV");
  2657. }
  2658. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_MK4_PACK_F32_PREPROCESS) {
  2659. using namespace conv_bias;
  2660. std::vector<conv_bias::TestArg> args =
  2661. get_nchw44_conv_bias_args({1}, 1, true, false, false);
  2662. #define cb(name) \
  2663. check_conv_bias_preprocess(args, handle(), nullptr, 0.001, \
  2664. dtype::Float32(), dtype::Float32(), \
  2665. dtype::Float32(), dtype::Float32(), name);
  2666. #if MEGDNN_AARCH64
  2667. cb("CONV1x1:AARCH64_F32_MK4_K8X12X1:24");
  2668. #elif MEGDNN_ARMV7
  2669. cb("CONV1x1:ARMV7_F32_MK4_PACK_4X12:24");
  2670. #endif
  2671. #undef cb
  2672. }
  2673. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_MK4_NO_PACK_F32) {
  2674. using namespace conv_bias;
  2675. std::vector<conv_bias::TestArg> args =
  2676. get_nchw44_conv_bias_args({1}, 1, true, false, false);
  2677. std::vector<conv_bias::TestArg> args_of_4;
  2678. for (auto&& arg : args) {
  2679. if (arg.src.shape[2] * arg.src.shape[3] % 4 == 0) {
  2680. args_of_4.push_back(arg);
  2681. }
  2682. }
  2683. #if MEGDNN_AARCH64
  2684. check_conv_bias(args_of_4, handle(), "CONV1x1:AARCH64_F32_MK4_4x16:24");
  2685. #elif MEGDNN_ARMV7
  2686. check_conv_bias(args_of_4, handle(), "CONV1x1:ARMV7_F32_MK4_4x8:48");
  2687. #endif
  2688. }
  2689. #if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
  2690. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_F16) {
  2691. using namespace conv_bias;
  2692. std::vector<conv_bias::TestArg> args = get_conv_bias_1x1_args(false, false);
  2693. NormalRNG rng(1);
  2694. #if MEGDNN_AARCH64
  2695. checker_conv_bias(args, handle(), &rng, 0.03, dtype::Float16{},
  2696. dtype::Float16{}, dtype::Float16{}, dtype::Float16{},
  2697. "CONV1x1:AARCH64_F16_K8X24X1:48");
  2698. #elif MEGDNN_ARMV7
  2699. checker_conv_bias(args, handle(), &rng, 0.03, dtype::Float16{},
  2700. dtype::Float16{}, dtype::Float16{}, dtype::Float16{},
  2701. "CONV1x1:AARCH32_F16_K4X16X1:24");
  2702. #endif
  2703. std::vector<conv_bias::TestArg> gemv_args;
  2704. for (auto&& arg : args)
  2705. if (arg.src.shape[2] == 1 && arg.src.shape[3] == 1) {
  2706. gemv_args.emplace_back(arg);
  2707. }
  2708. check_conv_bias(gemv_args, handle(), "CONV1x1_GEMV");
  2709. }
  2710. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_F16_PREPROCESS) {
  2711. using namespace conv_bias;
  2712. std::vector<conv_bias::TestArg> args = get_conv_bias_1x1_args(false, false);
  2713. NormalRNG rng(1);
  2714. #if MEGDNN_AARCH64
  2715. check_conv_bias_preprocess(args, handle(), &rng, 0.03, dtype::Float16{},
  2716. dtype::Float16{}, dtype::Float16{},
  2717. dtype::Float16{},
  2718. "CONV1x1:AARCH64_F16_K8X24X1:48");
  2719. #elif MEGDNN_ARMV7
  2720. check_conv_bias_preprocess(args, handle(), &rng, 0.03, dtype::Float16{},
  2721. dtype::Float16{}, dtype::Float16{},
  2722. dtype::Float16{},
  2723. "CONV1x1:AARCH32_F16_K4X16X1:24");
  2724. #endif
  2725. }
  2726. #endif
  2727. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_QUANTIZEDSYM) {
  2728. UniformIntRNG rng{-50, 50};
  2729. float epsilon = 0.001;
  2730. std::vector<conv_bias::TestArg> args =
  2731. get_conv_bias_1x1_args(false, false, true, true);
  2732. #define cb(name) \
  2733. checker_conv_bias(args, handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
  2734. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
  2735. dtype::QuantizedS8(60.25f), name);
  2736. #if MEGDNN_AARCH64
  2737. #if __ARM_FEATURE_DOTPROD
  2738. cb("CONV1x1:AARCH64_INT8X8X32_K8X12X4_DOTPROD:24");
  2739. #else
  2740. cb("CONV1x1:AARCH64_INT8X8X32_K8X8X8:24");
  2741. cb("CONV1x1:AARCH64_INT8X8X32_K4X4X16:48");
  2742. #endif
  2743. #elif MEGDNN_ARMV7
  2744. epsilon = 1;
  2745. cb("CONV1x1:ARMV7_INT8X8X32_K4X8X8:48");
  2746. #endif
  2747. #undef cb
  2748. std::vector<conv_bias::TestArg> gemv_args;
  2749. for (auto&& arg : args)
  2750. if (arg.src.shape[2] == 1 && arg.src.shape[3] == 1) {
  2751. gemv_args.emplace_back(arg);
  2752. }
  2753. checker_conv_bias(gemv_args, handle(), &rng, epsilon,
  2754. dtype::QuantizedS8(2.5f), dtype::QuantizedS8(2.5f),
  2755. dtype::QuantizedS32(6.25f), dtype::QuantizedS8(60.25f),
  2756. "CONV1x1_GEMV");
  2757. }
  2758. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_QUANTIZEDSYM_PREPROCESS) {
  2759. UniformIntRNG rng{-50, 50};
  2760. float epsilon = 0.001;
  2761. std::vector<conv_bias::TestArg> args =
  2762. get_conv_bias_1x1_args(false, false, true, true);
  2763. #define cb(name) \
  2764. check_conv_bias_preprocess( \
  2765. args, handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
  2766. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
  2767. dtype::QuantizedS8(60.25f), name);
  2768. #if MEGDNN_AARCH64
  2769. #if __ARM_FEATURE_DOTPROD
  2770. cb("CONV1x1:AARCH64_INT8X8X32_K8X12X4_DOTPROD:24");
  2771. #else
  2772. cb("CONV1x1:AARCH64_INT8X8X32_K8X8X8:24");
  2773. cb("CONV1x1:AARCH64_INT8X8X32_K4X4X16:48");
  2774. #endif
  2775. #elif MEGDNN_ARMV7
  2776. epsilon = 1;
  2777. cb("CONV1x1:ARMV7_INT8X8X32_K4X8X8:48");
  2778. #endif
  2779. #undef cb
  2780. }
  2781. #if MEGDNN_AARCH64 || MEGDNN_ARMV7
  2782. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_QUANTIZEDASYM) {
  2783. UniformIntRNG rng{-50, 50};
  2784. std::vector<conv_bias::TestArg> args =
  2785. get_conv_bias_1x1_args(false, false, true, true);
  2786. #define cb(name) \
  2787. checker_conv_bias(args, handle(), &rng, epsilon, \
  2788. dtype::Quantized8Asymm(1.2f, (uint8_t)125), \
  2789. dtype::Quantized8Asymm(1.3f, (uint8_t)129), \
  2790. dtype::QuantizedS32(1.2 * 1.3), \
  2791. dtype::Quantized8Asymm(50.3f, (uint8_t)120), name);
  2792. float epsilon = 0.001;
  2793. #if MEGDNN_AARCH64
  2794. #if __ARM_FEATURE_DOTPROD
  2795. cb("CONV1x1:AARCH64_QUINT8_K8X8X4_DOTPROD:48");
  2796. #else
  2797. cb("CONV1x1:AARCH64_QUINT8_K8X8X8:24");
  2798. #endif
  2799. #elif MEGDNN_ARMV7
  2800. epsilon = 1;
  2801. cb("CONV1x1:ARMV7_QUINT8_K4X8X8:48");
  2802. #endif
  2803. #undef cb
  2804. std::vector<conv_bias::TestArg> gemv_args;
  2805. for (auto&& arg : args)
  2806. if (arg.src.shape[2] == 1 && arg.src.shape[3] == 1) {
  2807. gemv_args.emplace_back(arg);
  2808. }
  2809. checker_conv_bias(gemv_args, handle(), &rng, epsilon,
  2810. dtype::Quantized8Asymm(1.2f, (uint8_t)125),
  2811. dtype::Quantized8Asymm(1.3f, (uint8_t)129),
  2812. dtype::QuantizedS32(1.2 * 1.3),
  2813. dtype::Quantized8Asymm(50.3f, (uint8_t)120),
  2814. "CONV1x1_GEMV");
  2815. }
  2816. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_QUANTIZEDASYM_PREPROCESS) {
  2817. UniformIntRNG rng{-50, 50};
  2818. std::vector<conv_bias::TestArg> args =
  2819. get_conv_bias_1x1_args(false, false, true, true);
  2820. #define cb(name) \
  2821. check_conv_bias_preprocess(args, handle(), &rng, epsilon, \
  2822. dtype::Quantized8Asymm(1.2f, (uint8_t)125), \
  2823. dtype::Quantized8Asymm(1.3f, (uint8_t)129), \
  2824. dtype::QuantizedS32(1.2 * 1.3), \
  2825. dtype::Quantized8Asymm(50.3f, (uint8_t)120), \
  2826. name);
  2827. float epsilon = 0.001;
  2828. #if MEGDNN_AARCH64
  2829. #if __ARM_FEATURE_DOTPROD
  2830. cb("CONV1x1:AARCH64_QUINT8_K8X8X4_DOTPROD:48");
  2831. #else
  2832. cb("CONV1x1:AARCH64_QUINT8_K8X8X8:24");
  2833. #endif
  2834. #elif MEGDNN_ARMV7
  2835. epsilon = 1;
  2836. cb("CONV1x1:ARMV7_QUINT8_K4X8X8:48");
  2837. #endif
  2838. #undef cb
  2839. }
  2840. #endif
  2841. #if MEGDNN_AARCH64 || MEGDNN_ARMV7
  2842. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_QUINT8x8x32) {
  2843. NormalRNG rng(128.f);
  2844. float epsilon = 0.001;
  2845. std::vector<conv_bias::TestArg> args = get_conv_bias_1x1_args(true, true);
  2846. #define cb(name) \
  2847. checker_conv_bias(args, handle(), &rng, epsilon, \
  2848. dtype::Quantized8Asymm(1.2f, (uint8_t)125), \
  2849. dtype::Quantized8Asymm(1.3f, (uint8_t)129), \
  2850. dtype::QuantizedS32(1.2 * 1.3), {}, name);
  2851. #if MEGDNN_AARCH64
  2852. #if __ARM_FEATURE_DOTPROD
  2853. cb("CONV1x1:AARCH64_QUINT8_K8X8X4_DOTPROD:24");
  2854. #else
  2855. cb("CONV1x1:AARCH64_QUINT8_K8X8X8:48");
  2856. #endif
  2857. #elif MEGDNN_ARMV7
  2858. #if __ARM_FEATURE_DOTPROD
  2859. cb("CONV1x1:AARCH32_QUINT8_K4X8X4:48");
  2860. #endif
  2861. cb("CONV1x1:ARMV7_QUINT8_K4X8X8:24");
  2862. #endif
  2863. #undef cb
  2864. std::vector<conv_bias::TestArg> gemv_args;
  2865. for (auto&& arg : args)
  2866. if (arg.src.shape[2] == 1 && arg.src.shape[3] == 1) {
  2867. gemv_args.emplace_back(arg);
  2868. }
  2869. checker_conv_bias(gemv_args, handle(), &rng, epsilon,
  2870. dtype::Quantized8Asymm(1.2f, (uint8_t)125),
  2871. dtype::Quantized8Asymm(1.3f, (uint8_t)129),
  2872. dtype::QuantizedS32(1.2 * 1.3), {}, "CONV1x1_GEMV");
  2873. }
  2874. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_QUINT8x8x32_PREPROCESS) {
  2875. NormalRNG rng(128.f);
  2876. float epsilon = 0.001;
  2877. std::vector<conv_bias::TestArg> args = get_conv_bias_1x1_args(true, true);
  2878. #define cb(name) \
  2879. check_conv_bias_preprocess(args, handle(), &rng, epsilon, \
  2880. dtype::Quantized8Asymm(1.2f, (uint8_t)125), \
  2881. dtype::Quantized8Asymm(1.3f, (uint8_t)129), \
  2882. dtype::QuantizedS32(1.2 * 1.3), {}, name);
  2883. #if MEGDNN_AARCH64
  2884. #if __ARM_FEATURE_DOTPROD
  2885. cb("CONV1x1:AARCH64_QUINT8_K8X8X4_DOTPROD:24");
  2886. #else
  2887. cb("CONV1x1:AARCH64_QUINT8_K8X8X8:48");
  2888. #endif
  2889. #elif MEGDNN_ARMV7
  2890. #if __ARM_FEATURE_DOTPROD
  2891. cb("CONV1x1:AARCH32_QUINT8_K4X8X4:48");
  2892. #endif
  2893. cb("CONV1x1:ARMV7_QUINT8_K4X8X8:24");
  2894. #endif
  2895. #undef cb
  2896. }
  2897. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_1X1_S1_INT8x8x16) {
  2898. UniformIntRNG rng{-50, 50};
  2899. float epsilon = 0.001;
  2900. std::vector<conv_bias::TestArg> args =
  2901. get_conv_bias_1x1_args(false, true, false, false);
  2902. std::vector<conv_bias::TestArg> args_nchw44 = get_nchw44_conv_bias_args(
  2903. {1}, 1, true, true, true, false, false, true, false, false);
  2904. #define cb(name) \
  2905. checker_conv_bias(args, handle(), &rng, epsilon, dtype::Int8{}, \
  2906. dtype::Int8{}, dtype::Int16{}, dtype::Int16{}, name);
  2907. #define cb_nchw44(name) \
  2908. checker_conv_bias(args_nchw44, handle(), &rng, epsilon, dtype::Int8{}, \
  2909. dtype::Int8{}, dtype::Int16{}, dtype::Int16{}, name);
  2910. #if MEGDNN_AARCH64
  2911. cb("CONV1x1:AARCH64_INT8X8X16_K8X8X8:24");
  2912. cb("CONV1x1:AARCH64_INT8X8X16_K4X4X16:24");
  2913. cb_nchw44("CONV1x1:AARCH64_INT8X8X16_MK4_4X4X8:48");
  2914. cb_nchw44("CONV1x1:AARCH64_INT8X8X16_MK4_16X12X4:48");
  2915. #elif MEGDNN_ARMV7
  2916. cb("CONV1x1:ARMV7_INT8X8X16_K4X8X8:24");
  2917. cb("CONV1x1:ARMV7_INT8X8X16_K4X2X16:48");
  2918. cb_nchw44("CONV1x1:ARMV7_INT8X8X16_MK4_K8X8X4:48");
  2919. #endif
  2920. cb("CONV1x1:ARM_COMMON_INT8X8X16:48");
  2921. #undef cb
  2922. #undef cb_nchw44
  2923. std::vector<conv_bias::TestArg> gemv_args;
  2924. for (auto&& arg : args)
  2925. if (arg.src.shape[2] == 1 && arg.src.shape[3] == 1) {
  2926. gemv_args.emplace_back(arg);
  2927. }
  2928. checker_conv_bias(gemv_args, handle(), &rng, epsilon, dtype::Int8{},
  2929. dtype::Int8{}, dtype::Int16{}, dtype::Int16{},
  2930. "CONV1x1_GEMV");
  2931. }
  2932. TEST_F(ARM_COMMON_MULTI_THREADS, CONVBIAS_1X1_S1_INT8x8x16_PREPROCESS) {
  2933. UniformIntRNG rng{-50, 50};
  2934. float epsilon = 0.001;
  2935. std::vector<conv_bias::TestArg> args = get_conv_bias_1x1_args(true, true);
  2936. #define cb(name) \
  2937. check_conv_bias_preprocess(args, handle(), &rng, epsilon, dtype::Int8{}, \
  2938. dtype::Int8{}, dtype::Int16{}, dtype::Int16{}, \
  2939. name);
  2940. #if MEGDNN_AARCH64
  2941. cb("CONV1x1:AARCH64_INT8X8X16_K8X8X8:24");
  2942. cb("CONV1x1:AARCH64_INT8X8X16_K4X4X16:24");
  2943. cb("CONV1x1:ARM_COMMON_INT8X8X16:24"); //! add nopack test
  2944. #elif MEGDNN_ARMV7
  2945. cb("CONV1x1:ARMV7_INT8X8X16_K4X8X8:24");
  2946. cb("CONV1x1:ARMV7_INT8X8X16_K4X2X16:48");
  2947. cb("CONV1x1:ARM_COMMON_INT8X8X16:24"); //! add nopack test
  2948. #endif
  2949. #undef cb
  2950. }
  2951. #endif
  2952. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_INT8x8x32) {
  2953. using namespace conv_bias;
  2954. std::vector<conv_bias::TestArg> args =
  2955. get_conv_bias_1x1_args(false, true, false, false);
  2956. #define cb(name) checker_conv_bias_mul_int8x8x32(args, handle(), name);
  2957. #if MEGDNN_AARCH64
  2958. #if __ARM_FEATURE_DOTPROD
  2959. cb("CONV1x1:AARCH64_INT8X8X32_K8X12X4_DOTPROD:48");
  2960. #else
  2961. cb("CONV1x1:AARCH64_INT8X8X32_K8X8X8:24");
  2962. cb("CONV1x1:AARCH64_INT8X8X32_K4X4X16:24");
  2963. #endif
  2964. #elif MEGDNN_ARMV7
  2965. #if __ARM_FEATURE_DOTPROD
  2966. cb("CONV1x1:AARCH32_INT8_K6X8X4:48");
  2967. #endif
  2968. cb("CONV1x1:ARMV7_INT8X8X32_K4X8X8:24");
  2969. #endif
  2970. #if MEGDNN_ARMV7
  2971. cb("CONV1x1:ARMV7_INT8X8X32_K4X2X16:48");
  2972. #endif
  2973. #undef cb
  2974. std::vector<conv_bias::TestArg> gemv_args;
  2975. for (auto&& arg : args)
  2976. if (arg.src.shape[2] == 1 && arg.src.shape[3] == 1) {
  2977. gemv_args.emplace_back(arg);
  2978. }
  2979. checker_conv_bias_mul_int8x8x32(gemv_args, handle(), "CONV1x1_GEMV");
  2980. }
  2981. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_INT8x8x32_PREPROCESS) {
  2982. using namespace conv_bias;
  2983. std::vector<conv_bias::TestArg> args = get_conv_bias_1x1_args(true, true);
  2984. #define cb(name) checker_conv_bias_int8x8x32_preprocess(args, handle(), name);
  2985. #if MEGDNN_AARCH64
  2986. #if __ARM_FEATURE_DOTPROD
  2987. cb("CONV1x1:AARCH64_INT8X8X32_K8X12X4_DOTPROD:48");
  2988. #else
  2989. cb("CONV1x1:AARCH64_INT8X8X32_K8X8X8:24");
  2990. cb("CONV1x1:AARCH64_INT8X8X32_K4X4X16:24");
  2991. #endif
  2992. #elif MEGDNN_ARMV7
  2993. #if __ARM_FEATURE_DOTPROD
  2994. cb("CONV1x1:AARCH32_INT8_K6X8X4:48");
  2995. #endif
  2996. cb("CONV1x1:ARMV7_INT8X8X32_K4X8X8:24");
  2997. #endif
  2998. #if MEGDNN_ARMV7
  2999. cb("CONV1x1:ARMV7_INT8X8X32_K4X2X16:48");
  3000. #endif
  3001. #undef cb
  3002. }
  3003. #ifndef __ARM_FEATURE_DOTPROD
  3004. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_INT8x8x32_MK4) {
  3005. using namespace conv_bias;
  3006. std::vector<conv_bias::TestArg> args =
  3007. get_nchw44_conv_bias_args({1}, 1, true, true, true);
  3008. #define cb(name) checker_conv_bias_mul_int8x8x32(args, handle(), name);
  3009. #if MEGDNN_AARCH64
  3010. cb("CONV1x1:AARCH64_INT8X8X32_MK4_4X4X16:24");
  3011. #elif MEGDNN_ARMV7
  3012. cb("CONV1x1:ARMV7_INT8X8X32_MK4_4X2X16:24");
  3013. #endif
  3014. #undef cb
  3015. UniformIntRNG rng{-50, 50};
  3016. float epsilon = 0.001;
  3017. #define cb(name) \
  3018. checker_conv_bias(get_nchw44_conv_bias_args({1}, 1, true, false, false), \
  3019. handle(), &rng, epsilon, dtype::QuantizedS8(2.5f), \
  3020. dtype::QuantizedS8(2.5f), dtype::QuantizedS32(6.25f), \
  3021. dtype::QuantizedS8(60.25f), name);
  3022. #if MEGDNN_AARCH64
  3023. cb("CONV1x1:AARCH64_INT8X8X32_MK4_4X4X16:24");
  3024. #elif MEGDNN_ARMV7
  3025. cb("CONV1x1:ARMV7_INT8X8X32_MK4_4X2X16:24");
  3026. #endif
  3027. #undef cb
  3028. }
  3029. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_INT8x8x32_MK4_PREPROCESS) {
  3030. using namespace conv_bias;
  3031. std::vector<conv_bias::TestArg> args =
  3032. get_nchw44_conv_bias_args({1}, 1, true, true, true);
  3033. #define cb(name) checker_conv_bias_int8x8x32_preprocess(args, handle(), name);
  3034. #if MEGDNN_AARCH64
  3035. cb("CONV1x1:AARCH64_INT8X8X32_MK4_4X4X16:24");
  3036. #elif MEGDNN_ARMV7
  3037. cb("CONV1x1:ARMV7_INT8X8X32_MK4_4X2X16:24");
  3038. #endif
  3039. #undef cb
  3040. UniformIntRNG rng{-50, 50};
  3041. float epsilon = 0.001;
  3042. #define cb(name) \
  3043. check_conv_bias_preprocess( \
  3044. get_nchw44_conv_bias_args({1}, 1, true, false, false), handle(), \
  3045. &rng, epsilon, dtype::QuantizedS8(2.5f), dtype::QuantizedS8(2.5f), \
  3046. dtype::QuantizedS32(6.25f), dtype::QuantizedS8(60.25f), name);
  3047. #if MEGDNN_AARCH64
  3048. cb("CONV1x1:AARCH64_INT8X8X32_MK4_4X4X16:24");
  3049. #elif MEGDNN_ARMV7
  3050. cb("CONV1x1:ARMV7_INT8X8X32_MK4_4X2X16:24");
  3051. #endif
  3052. #undef cb
  3053. }
  3054. #endif
  3055. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_INT8x8x32_NCHW44) {
  3056. using namespace conv_bias;
  3057. std::vector<conv_bias::TestArg> args =
  3058. get_nchw44_conv_bias_args({1}, 1, true, false, false);
  3059. UniformIntRNG rng{-50, 50};
  3060. float epsilon = 0.001;
  3061. std::vector<conv_bias::TestArg> gemv_args;
  3062. for (auto&& arg : args)
  3063. if (arg.src.shape[2] == 1 && arg.src.shape[3] == 1) {
  3064. gemv_args.emplace_back(arg);
  3065. }
  3066. checker_conv_bias(gemv_args, handle(), &rng, epsilon,
  3067. dtype::QuantizedS8(2.5f), dtype::QuantizedS8(2.5f),
  3068. dtype::QuantizedS32(6.25f), dtype::QuantizedS8(60.25f),
  3069. "CONV1x1_GEMV");
  3070. }
  3071. #ifdef __ARM_FEATURE_DOTPROD
  3072. TEST_F(ARM_COMMON_MULTI_THREADS, CONV_BIAS_1X1_S1_INT8x8x32_NCHW44_DOT) {
  3073. using namespace conv_bias;
  3074. std::vector<conv_bias::TestArg> args =
  3075. get_nchw44_conv_bias_args({1}, 1, true, false, false, false, true);
  3076. UniformIntRNG rng{-50, 50};
  3077. float epsilon = 0.001;
  3078. std::vector<conv_bias::TestArg> gemv_args;
  3079. for (auto&& arg : args)
  3080. if (arg.src.shape[2] == 1 && arg.src.shape[3] == 1) {
  3081. gemv_args.emplace_back(arg);
  3082. }
  3083. checker_conv_bias(gemv_args, handle(), &rng, epsilon,
  3084. dtype::QuantizedS8(2.5f), dtype::QuantizedS8(2.5f),
  3085. dtype::QuantizedS32(6.25f), dtype::QuantizedS8(60.25f),
  3086. "CONV1x1_GEMV");
  3087. }
  3088. #endif
  3089. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台