You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

graph_manager.cc 148 kB

5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/manager/graph_manager.h"
  17. #include <pthread.h>
  18. #include <algorithm>
  19. #include <future>
  20. #include <set>
  21. #include <sstream>
  22. #include <string>
  23. #include <thread>
  24. #include "common/math/math_util.h"
  25. #include "common/thread_pool.h"
  26. #include "common/dump/dump_manager.h"
  27. #include "analyzer/analyzer.h"
  28. #include "graph/common/ge_call_wrapper.h"
  29. #include "graph/common/local_context.h"
  30. #include "graph/common/transop_util.h"
  31. #include "graph/ge_context.h"
  32. #include "graph/ge_global_options.h"
  33. #include "graph/manager/util/rt_context_util.h"
  34. #include "graph/partition/dynamic_shape_partition.h"
  35. #include "graph/passes/enter_pass.h"
  36. #include "graph/partition/stage_partition.h"
  37. #include "graph/passes/addn_pass.h"
  38. #include "graph/passes/bitcast_pass.h"
  39. #include "graph/passes/assign_remove_pass.h"
  40. #include "graph/passes/inplace_support_check_pass.h"
  41. #include "graph/passes/atomic_addr_clean_pass.h"
  42. #include "graph/passes/attach_stream_label_pass.h"
  43. #include "graph/passes/cast_remove_pass.h"
  44. #include "graph/passes/common_subexpression_elimination_pass.h"
  45. #include "graph/passes/compile_nodes_pass.h"
  46. #include "graph/passes/cond_remove_pass.h"
  47. #include "graph/passes/constant_folding_pass.h"
  48. #include "graph/passes/constant_fuse_same_pass.h"
  49. #include "graph/passes/control_trigger_pass.h"
  50. #include "graph/passes/ctrl_edge_transfer_pass.h"
  51. #include "graph/passes/dimension_adjust_pass.h"
  52. #include "graph/passes/dimension_compute_pass.h"
  53. #include "graph/passes/flow_ctrl_pass.h"
  54. #include "graph/passes/fuse_data_nodes_with_common_input_pass.h"
  55. #include "graph/passes/identity_pass.h"
  56. #include "graph/passes/input_output_connection_identify_pass.h"
  57. #include "graph/passes/iterator_op_pass.h"
  58. #include "graph/passes/link_gen_mask_nodes_pass.h"
  59. #include "graph/passes/mark_graph_unknown_status_pass.h"
  60. #include "graph/passes/merge_pass.h"
  61. #include "graph/passes/merge_input_memcpy_pass.h"
  62. #include "graph/passes/merge_to_stream_merge_pass.h"
  63. #include "graph/passes/multi_batch_pass.h"
  64. #include "graph/passes/next_iteration_pass.h"
  65. #include "graph/passes/permute_pass.h"
  66. #include "graph/passes/prune_pass.h"
  67. #include "graph/passes/ref_identity_delete_op_pass.h"
  68. #include "graph/passes/remove_same_const_pass.h"
  69. #include "graph/passes/reshape_recovery_pass.h"
  70. #include "graph/passes/reshape_remove_pass.h"
  71. #include "graph/passes/same_transdata_breadth_fusion_pass.h"
  72. #include "graph/passes/subgraph_pass.h"
  73. #include "graph/passes/switch_data_edges_bypass.h"
  74. #include "graph/passes/switch_dead_branch_elimination.h"
  75. #include "graph/passes/switch_logic_remove_pass.h"
  76. #include "graph/passes/switch_to_stream_switch_pass.h"
  77. #include "graph/passes/transop_breadth_fusion_pass.h"
  78. #include "graph/passes/transop_nearby_allreduce_fusion_pass.h"
  79. #include "graph/passes/transop_symmetry_elimination_pass.h"
  80. #include "graph/passes/transop_without_reshape_fusion_pass.h"
  81. #include "graph/passes/transpose_transdata_pass.h"
  82. #include "graph/passes/useless_control_out_remove_pass.h"
  83. #include "graph/passes/variable_op_pass.h"
  84. #include "graph/passes/variable_ref_delete_op_pass.h"
  85. #include "graph/passes/variable_ref_useless_control_out_delete_pass.h"
  86. #include "graph/passes/end_of_sequence_add_control_pass.h"
  87. #include "graph/passes/subexpression_migration_pass.h"
  88. #include "graph/passes/subgraph_const_migration_pass.h"
  89. #include "graph/passes/unused_args_clean_pass.h"
  90. #include "graph/passes/global_step_insert_pass.h"
  91. #include "graph/passes/memcpy_addr_async_pass.h"
  92. #include "graph/passes/hccl_continuous_memcpy_pass.h"
  93. #include "graph/build/label_allocator.h"
  94. #include "graph/utils/tensor_adapter.h"
  95. #include "inc/pass_manager.h"
  96. #include "init/gelib.h"
  97. #include "ir_build/atc_ir_common.h"
  98. #include "graph/common/local_context.h"
  99. #include "graph/common/omg_util.h"
  100. #include "common/formats/utils/formats_trans_utils.h"
  101. #include "register/custom_pass_helper.h"
  102. namespace {
  103. const char *const kSummary = "Summary";
  104. const char *const kSave = "Save";
  105. const char *const kNetOutput = "NetOutput";
  106. const char *const kVariable = "Variable";
  107. const char *const kSend = "Send";
  108. const char *const kRecv = "Recv";
  109. const char *const kCheckPointForGetVar = "CheckPointGraphForGetVar";
  110. const char *const kCheckPointGraph = "checkpoint_graph";
  111. const char *const kVectorEngine = "VectorEngine";
  112. const char *const kAIcoreEngine = "AIcoreEngine";
  113. const int32_t kDynamicDimsTypeIsGetNext = 0;
  114. const int32_t kDynamicDimsTypeIsData = 1;
  115. const char *const kGetNextName = "IteratorV2";
  116. const uint32_t kInitGraphCount = 1;
  117. const uint32_t kNotAdded = 0;
  118. const uint32_t kStartAdd = 1;
  119. const uint32_t kDoneAdded = 2;
  120. bool IsTailingOptimization() {
  121. string is_tailing_optimization_option;
  122. auto ret = ge::GetContext().GetOption(ge::OPTION_EXEC_ENABLE_TAILING_OPTIMIZATION, is_tailing_optimization_option);
  123. if (ret == ge::GRAPH_SUCCESS) {
  124. GELOGI("Option ge.exec.isTailingOptimization is %s", is_tailing_optimization_option.c_str());
  125. // "1" means it's True from frontend option
  126. return is_tailing_optimization_option == "1";
  127. }
  128. GELOGW("OPTION_EXEC_ENABLE_TAILING_OPTIMIZATION not set, use BFSTopologicalSorting by default.");
  129. return false;
  130. }
  131. ge::Status CheckFpCeilingMode() {
  132. static const std::set<std::string> kValidFpCeilingMode = {"0", "1", "2"};
  133. string mode;
  134. auto ret = ge::GetContext().GetOption("ge.fpCeilingMode", mode);
  135. if (ret == ge::GRAPH_SUCCESS) {
  136. if (kValidFpCeilingMode.count(mode) == 0) {
  137. GELOGE(ge::GE_GRAPH_OPTIONS_INVALID, "The fp_ceiling_mode %s is invalid, options are 0, 1, and 2.", mode.c_str());
  138. return ge::GE_GRAPH_OPTIONS_INVALID;
  139. }
  140. GELOGI("The parameter fp_ceiling_mode is set to %s.", mode.c_str());
  141. return ge::SUCCESS;
  142. }
  143. GELOGW("The parameter fp_ceiling_mode is not set");
  144. return ge::SUCCESS;
  145. }
  146. } // namespace
  147. namespace ge {
  148. GraphManager::GraphManager()
  149. : thread_run_flag_(false),
  150. graph_run_listener_(nullptr),
  151. init_flag_(false) {
  152. }
  153. Status GraphManager::Initialize(const std::map<string, string> &options) {
  154. ErrorManager::GetInstance().SetStage(ErrorMessage::kInitialize, ErrorMessage::kOther);
  155. if (init_flag_) {
  156. GELOGW("[Initialize] GraphManager already initialized.");
  157. return SUCCESS;
  158. }
  159. // malloc
  160. graph_run_listener_ = MakeShared<GraphModelListener>(sync_run_mutex_, condition_);
  161. if (graph_run_listener_ == nullptr) {
  162. GELOGE(MEMALLOC_FAILED, "Make shared failed");
  163. return MEMALLOC_FAILED;
  164. }
  165. // graph context
  166. graph_context_ = MakeShared<GraphContext>();
  167. if (graph_context_ == nullptr) {
  168. GELOGE(MEMALLOC_FAILED, "Make shared failed.");
  169. return MEMALLOC_FAILED;
  170. }
  171. // parse option parameters
  172. Status ret = ParseOptions(options);
  173. if (ret != SUCCESS) {
  174. GELOGE(ret, "[Initialize] parse options failed.");
  175. return ret;
  176. }
  177. ret = CheckFpCeilingMode();
  178. if (ret != SUCCESS) {
  179. GELOGE(ret, "[Initialize] Check fp-ceiling-mode options failed.");
  180. return ret;
  181. }
  182. ret = graph_context_->Initialize(options);
  183. if (ret != SUCCESS) {
  184. GELOGE(ret, "[Initialize] GraphContext initialize failed.");
  185. return ret;
  186. }
  187. graph_map_.clear();
  188. cache_helper_map_.clear();
  189. graph_id_to_add_graph_cond_.clear();
  190. graph_count_.clear();
  191. init_flag_ = true;
  192. thread_run_flag_ = true;
  193. prerun_thread_ = std::thread(GraphManager::PreRunThread, this);
  194. run_thread_ = std::thread(GraphManager::RunThread, this);
  195. return SUCCESS;
  196. }
  197. Status GraphManager::UnloadModel(GeRootModelPtr ge_root_model, uint32_t graph_id) {
  198. Status ret = SUCCESS;
  199. for (size_t i = 0; i < ge_root_model->GetAllModelId().size(); ++i) {
  200. uint32_t model_id = ge_root_model->GetAllModelId()[i];
  201. GELOGI("Unload model %u.", model_id);
  202. ret = GraphLoader::UnloadModel(model_id);
  203. if (ret != SUCCESS) {
  204. GELOGW("[GraphManager] unload model failed, modelId=%u, graphId=%u.", model_id, graph_id);
  205. return ret;
  206. }
  207. }
  208. return ret;
  209. }
  210. Status GraphManager::Finalize() {
  211. if (!init_flag_) {
  212. GELOGW("GraphManager has not been initialized.");
  213. return SUCCESS;
  214. }
  215. if (graph_executor_.FreeExecuteMemory() != SUCCESS) {
  216. GELOGW("Graph executor FreeExecuteMemory failed, resources may not be released correctly.");
  217. }
  218. StopQueue(this);
  219. if (prerun_thread_.joinable()) {
  220. prerun_thread_.join();
  221. }
  222. if (run_thread_.joinable()) {
  223. run_thread_.join();
  224. }
  225. // check graph whether running or not
  226. Status unload_model_ret = SUCCESS;
  227. Status ret;
  228. rtError_t rt_ret;
  229. for (auto iter = graph_map_.begin(); iter != graph_map_.end(); ++iter) {
  230. GraphNodePtr graph_node = iter->second;
  231. if (graph_node->GetRunFlag()) {
  232. GELOGW("[GraphManager] finalize failed, graphId=%u.", iter->first);
  233. unload_model_ret = GE_GRAPH_GRAPH_IS_RUNNING;
  234. continue;
  235. }
  236. // unload model
  237. auto ge_root_model = graph_node->GetGeRootModel();
  238. if (ge_root_model != nullptr && ge_root_model->GetModelId() != INVALID_MODEL_ID && graph_node->GetLoadFlag()) {
  239. rt_ret = rtSetDevice(GetContext().DeviceId());
  240. if (rt_ret != RT_ERROR_NONE) {
  241. GELOGW("[GraphManager] rtSetDevice failed, modelId=%u, graphId=%u.", ge_root_model->GetModelId(), iter->first);
  242. unload_model_ret = FAILED;
  243. continue;
  244. }
  245. ret = UnloadModel(ge_root_model, iter->first);
  246. if (ret != SUCCESS) {
  247. GELOGW("[GraphManager] unload model failed, graph_id=%u.", iter->first);
  248. unload_model_ret = ret;
  249. }
  250. rt_ret = rtDeviceReset(GetContext().DeviceId());
  251. if (rt_ret != RT_ERROR_NONE) {
  252. GELOGW("[GraphManager] rtDeviceReset failed, graphId=%u.", iter->first);
  253. unload_model_ret = FAILED;
  254. continue;
  255. }
  256. }
  257. // clear analyzer saved info(graph level)
  258. auto compute_graph = GraphUtils::GetComputeGraph(*graph_node->GetGraph());
  259. GE_CHECK_NOTNULL(compute_graph);
  260. auto session_id = compute_graph->GetSessionID();
  261. auto graph_id = compute_graph->GetGraphID();
  262. Analyzer::GetInstance()->DestroyGraphJsonObject(session_id, graph_id);
  263. }
  264. graph_map_.clear();
  265. cache_helper_map_.clear();
  266. graph_count_.clear();
  267. // graph context
  268. if (graph_context_ != nullptr) {
  269. Status ret_final = graph_context_->Finalize();
  270. if (ret_final != SUCCESS) {
  271. GELOGE(ret_final, "[GraphManager] graph context Finalize failed!");
  272. unload_model_ret = ret_final;
  273. }
  274. }
  275. init_flag_ = false;
  276. return unload_model_ret;
  277. }
  278. Status GraphManager::InitDynamicParams(ComputeGraphPtr &compute_graph) {
  279. for (const auto &node : compute_graph->GetAllNodes()) {
  280. auto op_desc = node->GetOpDesc();
  281. if (op_desc == nullptr) {
  282. continue;
  283. }
  284. GetLocalOmgContext().need_multi_batch = false;
  285. std::string op_type;
  286. auto ret = GetOriginalType(node, op_type);
  287. if (ret != SUCCESS) {
  288. GELOGE(FAILED, "Failed to get node %s original type.", node->GetName().c_str());
  289. return FAILED;
  290. }
  291. if ((op_desc->GetType() == DATA) || (op_type == kGetNextName)) {
  292. GELOGI("Need to process multi batch for compute graph. op_type:%s.", op_desc->GetType().c_str());
  293. GetLocalOmgContext().need_multi_batch = true;
  294. break;
  295. }
  296. }
  297. if (!options_.input_shape.empty() && !options_.dynamic_dims.empty()) {
  298. if (!ge::ParseInputShape(options_.input_shape, GetLocalOmgContext().input_dims,
  299. GetLocalOmgContext().user_input_dims, true)) {
  300. GELOGE(GRAPH_PARAM_INVALID, "Failed to parse input shape: %s.", options_.input_shape.c_str());
  301. return GRAPH_PARAM_INVALID;
  302. }
  303. GetLocalOmgContext().dynamic_dims = options_.dynamic_dims;
  304. }
  305. if (options_.dynamic_node_type == kDynamicDimsTypeIsGetNext) {
  306. GetLocalOmgContext().dynamic_node_type = GETNEXT;
  307. }
  308. if (options_.dynamic_node_type == kDynamicDimsTypeIsData) {
  309. GetLocalOmgContext().dynamic_node_type = DATA;
  310. }
  311. return SUCCESS;
  312. }
  313. void GraphManager::SetAddGraphCondition(GraphId graph_id, uint32_t cond) {
  314. std::lock_guard<std::mutex> lock(add_graph_cond_mutex_);
  315. graph_id_to_add_graph_cond_[graph_id] = cond;
  316. GELOGD("Graph [id:%u] has been added.", graph_id);
  317. }
  318. uint32_t GraphManager::GetAddGraphCondition(GraphId graph_id) {
  319. std::lock_guard<std::mutex> lock(add_graph_cond_mutex_);
  320. auto it = graph_id_to_add_graph_cond_.find(graph_id);
  321. if (it != graph_id_to_add_graph_cond_.end()) {
  322. return it->second;
  323. } else {
  324. GELOGD("Graph [id:%u] has not been added.", graph_id);
  325. return kNotAdded;
  326. }
  327. }
  328. void GraphManager::RemoveAddGraphCondition(GraphId graph_id) {
  329. std::lock_guard<std::mutex> lock(add_graph_cond_mutex_);
  330. auto it = graph_id_to_add_graph_cond_.find(graph_id);
  331. if (it != graph_id_to_add_graph_cond_.end()) {
  332. graph_id_to_add_graph_cond_.erase(it);
  333. GELOGD("Successfully removed add_graph_cond of graph [id:%u].", graph_id);
  334. } else {
  335. GELOGD("Graph [id:%u] has not been added. no need to remove.", graph_id);
  336. }
  337. }
  338. Status GraphManager::CheckRepeatAdd(uint32_t graph_id, bool &is_added) {
  339. uint32_t count = 0;
  340. if (GetGraphCount(graph_id, count) != SUCCESS) {
  341. GELOGE(INTERNAL_ERROR, "Get graph [id:%u] count failed, graph might have not been added.", graph_id);
  342. return INTERNAL_ERROR;
  343. }
  344. // previous thread owns same graph_id has been in the middle of the AddGraph procession
  345. if (count > 1 && GetAddGraphCondition(graph_id) == kStartAdd) {
  346. std::unique_lock<std::mutex> lock(add_graph_mutex_);
  347. GELOGD("Waitting for build end of previous thread.");
  348. while (GetAddGraphCondition(graph_id) != kDoneAdded) {
  349. add_graph_cv_.wait(lock);
  350. }
  351. GraphNodePtr graph_node;
  352. Status ret = GetGraphNode(graph_id, graph_node);
  353. if (ret != SUCCESS) {
  354. GELOGE(ret, "[AddGraph] GetGraphNode failed, graph_id = %u.", graph_id);
  355. return ret;
  356. }
  357. is_added = true;
  358. }
  359. return SUCCESS;
  360. }
  361. void GraphManager::SetSessionGraphId(ComputeGraphPtr compute_graph, uint32_t graph_id) {
  362. std::string session_graph_id;
  363. if (!AttrUtils::GetStr(*compute_graph, ATTR_NAME_SESSION_GRAPH_ID, session_graph_id) || session_graph_id.empty()) {
  364. session_graph_id = "-1_" + to_string(graph_id);
  365. if (!AttrUtils::SetStr(*compute_graph, ATTR_NAME_SESSION_GRAPH_ID, session_graph_id)) {
  366. GELOGW("Set attribute of compute graph failed.");
  367. }
  368. for (auto &subgraph : compute_graph->GetAllSubgraphs()) {
  369. (void)AttrUtils::SetStr(*subgraph, ATTR_NAME_SESSION_GRAPH_ID, session_graph_id);
  370. }
  371. GELOGD("Get graph session_graph_id attr failed, set session id to default value: [0]");
  372. }
  373. }
  374. Status GraphManager::NotifyWaittingGraph(uint32_t graph_id) {
  375. uint32_t count = 0;
  376. if (GetGraphCount(graph_id, count) != SUCCESS) {
  377. GELOGE(INTERNAL_ERROR, "Get graph [id:%u] count failed, graph might have not been added.", graph_id);
  378. return INTERNAL_ERROR;
  379. }
  380. GELOGD("Add graph finished, graph_id:%u", graph_id);
  381. if (count > 1) {
  382. GELOGD("Finish addgraph, graph_id:%u, graph_count:%u, start to notify.", graph_id, count);
  383. add_graph_cv_.notify_all();
  384. }
  385. return SUCCESS;
  386. }
  387. Status GraphManager::CreateGraphNode(uint32_t graph_id, const Graph &graph,
  388. const std::map<std::string, std::string> &options) {
  389. GraphNodePtr graph_node = MakeShared<ge::GraphNode>(graph_id);
  390. GE_IF_BOOL_EXEC(graph_node == nullptr, GELOGE(FAILED, "GraphNode make shared failed");
  391. return FAILED);
  392. std::shared_ptr<Graph> graph_ptr = MakeShared<ge::Graph>(graph);
  393. GE_IF_BOOL_EXEC(graph_ptr == nullptr, GELOGE(FAILED, "GraphPtr make shared failed");
  394. return FAILED);
  395. // update option about tuning graph
  396. ParseOption(options, BUILD_MODE, options_.build_mode);
  397. ParseOption(options, BUILD_STEP, options_.build_step);
  398. ParseOption(options, TUNING_PATH, options_.tuning_path);
  399. graph_node->SetGraph(graph_ptr);
  400. graph_node->SetOptions(options);
  401. graph_node->IncreaseLoadCount();
  402. AddGraphNode(graph_id, graph_node);
  403. return SUCCESS;
  404. }
  405. Status GraphManager::SetStagesOptions(uint32_t graph_id, const GraphManagerOptions &options) {
  406. CompilerStages &stages = GetCompilerStages(graph_id);
  407. stages.preparer.SetOptions(options_);
  408. Status status = stages.optimizer.SetOptions(options_);
  409. if (status != SUCCESS) {
  410. GELOGE(status, "Graph optimizer set options failed.");
  411. return status;
  412. }
  413. stages.builder.SetOptions(options_);
  414. return SUCCESS;
  415. }
  416. Status GraphManager::AddGraph(const GraphId &graph_id, const Graph &graph,
  417. const std::map<std::string, std::string> &options,
  418. const OmgContext &omg_context) {
  419. IncreaseGraphCount(graph_id);
  420. // validation for adding graphs of same graph_id in multi-thread secenario
  421. // 1.previous thread owns same graph_id has finished the AddGraph procession
  422. if (GetAddGraphCondition(graph_id) == kDoneAdded) {
  423. GraphNodePtr graph_node;
  424. if (GetGraphNode(graph_id, graph_node) != SUCCESS) {
  425. GELOGE(GE_GRAPH_GRAPH_NOT_EXIST, "Graph not exist while done adding previously, graph_id = %u.", graph_id);
  426. return GE_GRAPH_GRAPH_NOT_EXIST;
  427. }
  428. graph_node->IncreaseLoadCount();
  429. return SUCCESS;
  430. }
  431. // In multi-thread scenario, former thread owns same graph_id has been
  432. // in the middle of the AddGraph procession while following threads have to wait until
  433. // done adding graph of the former graph, avoiding repeatively adding same graph.
  434. bool is_added = false;
  435. if (CheckRepeatAdd(graph_id, is_added) != SUCCESS) {
  436. GELOGE(INTERNAL_ERROR, "CheckRepeatAdd for graph[id:%u] failed.", graph_id);
  437. return INTERNAL_ERROR;
  438. }
  439. // The former graph (from different thread) owns same graph id has been successfully added.
  440. if (is_added) {
  441. return SUCCESS;
  442. }
  443. // Do add graph
  444. SetAddGraphCondition(graph_id, kStartAdd);
  445. auto compute_graph = GraphUtils::GetComputeGraph(graph);
  446. GE_CHECK_NOTNULL(compute_graph);
  447. compute_graph->SetGraphID(graph_id);
  448. SetSessionGraphId(compute_graph, graph_id);
  449. if (CreateGraphNode(graph_id, graph, options) != SUCCESS) {
  450. GELOGE(FAILED, "Failed to create graph_node.");
  451. return FAILED;
  452. }
  453. AddLocalOmgContext(graph_id, omg_context);
  454. if (!options_.output_datatype.empty()) {
  455. GetLocalOmgContext().output_type = options_.output_datatype;
  456. }
  457. if (InitDynamicParams(compute_graph) != SUCCESS) {
  458. GELOGE(GRAPH_PARAM_INVALID, "Failed to init params when online infer is dynamic.");
  459. return GRAPH_PARAM_INVALID;
  460. }
  461. if (SetStagesOptions(graph_id, options_) != SUCCESS) {
  462. GELOGE(INTERNAL_ERROR, "Set stage options failed.");
  463. return INTERNAL_ERROR;
  464. }
  465. var_acc_ctrl_.AddGraph(graph_id, compute_graph);
  466. SetAddGraphCondition(graph_id, kDoneAdded);
  467. // There are threads waitting for adding same graph
  468. if (NotifyWaittingGraph(graph_id) != SUCCESS) {
  469. GELOGE(INTERNAL_ERROR, "NotifyWaittingGraph failed.");
  470. return INTERNAL_ERROR;
  471. }
  472. return SUCCESS;
  473. }
  474. Status GraphManager::AddGraphWithCopy(const GraphId &graph_id, const Graph &graph,
  475. const std::map<std::string, std::string> &options,
  476. const OmgContext &omg_context) {
  477. if (HasGraphNode(graph_id)) {
  478. GELOGE(GE_GRAPH_GRAPH_ALREADY_EXIST, "[GraphManager] graph exists, graph_id = %u.", graph_id);
  479. return GE_GRAPH_GRAPH_ALREADY_EXIST;
  480. }
  481. auto compute_graph = GraphUtils::GetComputeGraph(graph);
  482. if (compute_graph != nullptr) {
  483. compute_graph->SetGraphID(graph_id);
  484. bool graph_has_been_added = false;
  485. if (AttrUtils::GetBool(*compute_graph, ATTR_NAME_GRAPH_HAS_BEEN_ADDED, graph_has_been_added)
  486. && graph_has_been_added) {
  487. GELOGE(GE_GRAPH_GRAPH_ALREADY_EXIST,
  488. "[GraphManager] same graph object can not be added again, graph_id = %u.", graph_id);
  489. return GE_GRAPH_GRAPH_ALREADY_EXIST;
  490. }
  491. } else {
  492. GELOGE(FAILED, "compute graph is null");
  493. return FAILED;
  494. }
  495. std::vector<NodePtr> input_nodes;
  496. std::vector<NodePtr> output_nodes;
  497. auto new_compute_graph = GraphUtils::CloneGraph(compute_graph, "", input_nodes, output_nodes);
  498. std::string session_graph_id;
  499. if (!AttrUtils::GetStr(*new_compute_graph, ATTR_NAME_SESSION_GRAPH_ID, session_graph_id) ||
  500. session_graph_id.empty()) {
  501. session_graph_id = "-1_" + to_string(graph_id);
  502. if (!AttrUtils::SetStr(*new_compute_graph, ATTR_NAME_SESSION_GRAPH_ID, session_graph_id)) {
  503. GELOGW("Set attribute of compute graph failed.");
  504. }
  505. for (auto &subgraph : new_compute_graph->GetAllSubgraphs()) {
  506. (void)AttrUtils::SetStr(*subgraph, ATTR_NAME_SESSION_GRAPH_ID, session_graph_id);
  507. }
  508. GELOGD("Get graph session_graph_id attr failed, set session id to default value: [0]");
  509. }
  510. GraphNodePtr graph_node = MakeShared<ge::GraphNode>(graph_id);
  511. if (graph_node == nullptr) {
  512. GELOGE(FAILED, "GraphNode make shared failed");
  513. return FAILED;
  514. }
  515. std::shared_ptr<Graph> graph_ptr = GraphUtils::CreateGraphPtrFromComputeGraph(new_compute_graph);
  516. if (graph_ptr == nullptr) {
  517. GELOGE(FAILED, "GraphPtr make shared failed");
  518. return FAILED;
  519. }
  520. // update option about tuning graph
  521. ParseOption(options, BUILD_MODE, options_.build_mode);
  522. ParseOption(options, BUILD_STEP, options_.build_step);
  523. ParseOption(options, TUNING_PATH, options_.tuning_path);
  524. graph_node->SetGraph(graph_ptr);
  525. graph_node->SetOptions(options);
  526. AddGraphNode(graph_id, graph_node);
  527. AddLocalOmgContext(graph_id, omg_context);
  528. if (!options_.output_datatype.empty()) {
  529. GetLocalOmgContext().output_type = options_.output_datatype;
  530. }
  531. CompilerStages &stages = GetCompilerStages(graph_id);
  532. stages.preparer.SetOptions(options_);
  533. Status status = stages.optimizer.SetOptions(options_);
  534. if (status != SUCCESS) {
  535. GELOGE(status, "Graph optimizer set options failed.");
  536. return status;
  537. }
  538. stages.builder.SetOptions(options_);
  539. var_acc_ctrl_.AddGraph(graph_id, new_compute_graph);
  540. return SUCCESS;
  541. }
  542. Status GraphManager::MergeSubGraph(ComputeGraphPtr &compute_graph, const ge::ComputeGraphPtr &original_compute_graph,
  543. GraphId root_graph_id) {
  544. std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
  545. GraphPartitioner &partitioner = GetCompilerStages(root_graph_id).partitioner;
  546. if (instance_ptr != nullptr && instance_ptr->InitFlag()) {
  547. Status ret = partitioner.MergeAfterSubGraphOptimization(compute_graph, original_compute_graph);
  548. if (ret != SUCCESS) {
  549. GELOGE(ret, "merge end and placeholder after subGraph optimization failed.");
  550. return FAILED;
  551. }
  552. Status ret_topo = compute_graph->TopologicalSorting();
  553. if (ret_topo != SUCCESS) {
  554. GELOGE(ret_topo, "[GraphManager]: TopologicalSorting the merged graph failed.");
  555. return ret_topo;
  556. }
  557. } else {
  558. auto subgraph_list = partitioner.GetSubGraphMap();
  559. if (subgraph_list.find(original_compute_graph) != subgraph_list.end() &&
  560. !subgraph_list[original_compute_graph].empty() && subgraph_list[original_compute_graph][0] != nullptr) {
  561. compute_graph = subgraph_list[original_compute_graph][0]->GetSubGraph();
  562. }
  563. }
  564. return SUCCESS;
  565. }
  566. Status GraphManager::CopySubGraphAndMarkFusion(const ComputeGraphPtr &compute_graph,
  567. Graph2SubGraphInfoList &sub_graph_map,
  568. std::unordered_map<std::string, ComputeGraphPtr> &copy_graphs) {
  569. GE_CHECK_NOTNULL(compute_graph);
  570. vector<ComputeGraphPtr> old_compute_graphs;
  571. const auto &root_subgraph_list = sub_graph_map[compute_graph];
  572. for (const auto &subgraph : root_subgraph_list) {
  573. old_compute_graphs.emplace_back(subgraph->GetSubGraph());
  574. }
  575. for (const auto &function_graph : compute_graph->GetAllSubgraphs()) {
  576. const auto &subgraph_list = sub_graph_map[function_graph];
  577. for (const auto &subgraph : subgraph_list) {
  578. old_compute_graphs.emplace_back(subgraph->GetSubGraph());
  579. }
  580. }
  581. for (const auto &old_compute_graph : old_compute_graphs) {
  582. std::vector<NodePtr> input_nodes;
  583. std::vector<NodePtr> output_nodes;
  584. ComputeGraphPtr new_compute_graph = GraphUtils::CloneGraph(old_compute_graph, "", input_nodes, output_nodes);
  585. if (new_compute_graph == nullptr) {
  586. GELOGE(INTERNAL_ERROR, "Clone graph failed.");
  587. return INTERNAL_ERROR;
  588. }
  589. copy_graphs.emplace(old_compute_graph->GetName(), new_compute_graph);
  590. if (!AttrUtils::SetBool(old_compute_graph, ATTR_NAME_NEED_LX_FUSION, true)) {
  591. GELOGE(INTERNAL_ERROR, "Set attr lx_fusion to graph failed.");
  592. return INTERNAL_ERROR;
  593. }
  594. }
  595. GELOGI("Copy %zu graphs successfully.", copy_graphs.size());
  596. return SUCCESS;
  597. }
  598. Status GraphManager::OptimizeSubGraphWithMultiThreads(ComputeGraphPtr compute_graph,
  599. Graph2SubGraphInfoList &sub_graph_map, uint64_t session_id) {
  600. GE_CHECK_NOTNULL(compute_graph);
  601. // use default 16 multi thread
  602. uint32_t thread_num = 16;
  603. char *env = std::getenv("THREAD_MULTI_NUM");
  604. if (env != nullptr) {
  605. thread_num = atoi(env);
  606. GEEVENT("OptimizeSubGraphWithMultiThreads thread num: %u", thread_num);
  607. }
  608. ThreadPool executor(thread_num);
  609. std::vector<std::future<Status>> vector_future;
  610. const auto &root_subgraph_list = sub_graph_map[compute_graph];
  611. std::string op_compile_strategy;
  612. (void)AttrUtils::GetStr(compute_graph, ATTR_NAME_OP_COMPILE_STRATEGY, op_compile_strategy);
  613. GELOGD("OptimizeSubGraphWithMultiThreads Process op_compile_strategy:%s", op_compile_strategy.c_str());
  614. for (const auto &subgraph : root_subgraph_list) {
  615. if (!op_compile_strategy.empty()) {
  616. (void) AttrUtils::SetStr(subgraph->GetSubGraph(), ATTR_NAME_OP_COMPILE_STRATEGY, op_compile_strategy);
  617. }
  618. std::future<Status> f = executor.commit(GraphManager::ProcessSubGraphWithMultiThreads, this,
  619. compute_graph->GetGraphID(), subgraph,
  620. compute_graph->GetName(), session_id,
  621. ErrorManager::GetInstance().GetErrorContext(),
  622. GetThreadLocalContext());
  623. if (!f.valid()) {
  624. GELOGE(FAILED, "Future is invalid");
  625. return FAILED;
  626. }
  627. vector_future.emplace_back(std::move(f));
  628. }
  629. for (auto &function_graph : compute_graph->GetAllSubgraphs()) {
  630. auto subgraph_list = sub_graph_map[function_graph];
  631. for (const auto &subgraph : subgraph_list) {
  632. if (!op_compile_strategy.empty()) {
  633. (void) AttrUtils::SetStr(subgraph->GetSubGraph(), ATTR_NAME_OP_COMPILE_STRATEGY, op_compile_strategy);
  634. }
  635. std::future<Status> f = executor.commit(GraphManager::ProcessSubGraphWithMultiThreads, this,
  636. compute_graph->GetGraphID(), subgraph,
  637. compute_graph->GetName(), session_id,
  638. ErrorManager::GetInstance().GetErrorContext(),
  639. GetThreadLocalContext());
  640. if (!f.valid()) {
  641. GELOGE(FAILED, "Future is invalid");
  642. return FAILED;
  643. }
  644. vector_future.emplace_back(std::move(f));
  645. }
  646. }
  647. GELOGD("All sub graph num is %zu", vector_future.size());
  648. for (size_t i = 0; i < vector_future.size(); ++i) {
  649. Status ret_status = vector_future[i].get();
  650. if (ret_status != SUCCESS) {
  651. GELOGE(ret_status, "subgraph %zu optimize failed", i);
  652. return ret_status;
  653. }
  654. }
  655. return SUCCESS;
  656. }
  657. bool GraphManager::CheckAllFusionOptimizeSuccess(const ComputeGraphPtr &compute_graph,
  658. Graph2SubGraphInfoList &sub_graph_map) {
  659. if (compute_graph == nullptr) {
  660. GELOGE(PARAM_INVALID, "Input param compute_graph is nullptr.");
  661. return false;
  662. }
  663. /// 1. FE will set attr optimize_group with true(false) while lx fusion is success(fail);
  664. /// 2. FE will not set attr optimize_group while fe.ini set l2fusion enable false;
  665. /// 3. Other engine will not set attr optimize_group.
  666. const auto &root_subgraph_list = sub_graph_map[compute_graph];
  667. for (const auto &subgraph : root_subgraph_list) {
  668. bool optimize_group = true;
  669. (void) AttrUtils::GetBool(subgraph->GetSubGraph(), ATTR_NAME_OPTIMIZE_GROUP, optimize_group);
  670. if (!optimize_group) {
  671. GELOGW("Run lx optimize for subgraph:%s failed.", subgraph->GetSubGraph()->GetName().c_str());
  672. return false;
  673. }
  674. }
  675. for (auto &function_graph : compute_graph->GetAllSubgraphs()) {
  676. const auto &subgraph_list = sub_graph_map[function_graph];
  677. for (const auto &subgraph : subgraph_list) {
  678. bool optimize_group = true;
  679. (void) AttrUtils::GetBool(subgraph->GetSubGraph(), ATTR_NAME_OPTIMIZE_GROUP, optimize_group);
  680. if (!optimize_group) {
  681. GELOGW("Run lx optimize for subgraph:%s failed.", subgraph->GetSubGraph()->GetName().c_str());
  682. return false;
  683. }
  684. }
  685. }
  686. GELOGI("All subgraph are optimized successfully, no need to reuse buffer optimize.");
  687. return true;
  688. }
  689. Status GraphManager::ReplaceSubgraphWithOriGraph(const ComputeGraphPtr &compute_graph,
  690. Graph2SubGraphInfoList &sub_graph_map,
  691. std::unordered_map<std::string, ComputeGraphPtr> &copy_graphs) {
  692. GE_CHECK_NOTNULL(compute_graph);
  693. const auto &root_subgraph_list = sub_graph_map[compute_graph];
  694. for (const auto &subgraph : root_subgraph_list) {
  695. auto iter = copy_graphs.find(subgraph->GetSubGraph()->GetName());
  696. if (iter == copy_graphs.end()) {
  697. GELOGE(FAILED, "Can not find subgraph:%s in copy graphs.", subgraph->GetSubGraph()->GetName().c_str());
  698. return FAILED;
  699. }
  700. subgraph->SetSubGraph(iter->second);
  701. }
  702. for (auto &function_graph : compute_graph->GetAllSubgraphs()) {
  703. const auto &subgraph_list = sub_graph_map[function_graph];
  704. for (const auto &subgraph : subgraph_list) {
  705. auto iter = copy_graphs.find(subgraph->GetSubGraph()->GetName());
  706. if (iter == copy_graphs.end()) {
  707. GELOGE(FAILED, "Can not find subgraph:%s in copy graphs.", subgraph->GetSubGraph()->GetName().c_str());
  708. return FAILED;
  709. }
  710. subgraph->SetSubGraph(iter->second);
  711. }
  712. }
  713. GELOGI("All subgraphs are successfully replaced.");
  714. return SUCCESS;
  715. }
  716. Status GraphManager::SetSubgraph(uint64_t session_id, ComputeGraphPtr compute_graph, GraphPartitioner &partitioner) {
  717. GE_CHECK_NOTNULL(compute_graph);
  718. auto sub_graph_map = partitioner.GetSubGraphMap();
  719. GELOGD("Directly optimize subgraph with build mode:%s, and step:%s.",
  720. options_.build_mode.c_str(),
  721. options_.build_step.c_str());
  722. Status ret = OptimizeSubGraphWithMultiThreads(compute_graph, sub_graph_map, session_id);
  723. if (ret != SUCCESS) {
  724. GELOGE(ret, "Multiply optimize subgraph failed");
  725. return ret;
  726. }
  727. return SUCCESS;
  728. }
  729. #define GM_RUN_AND_DUMP_PERF(name, func, ...) \
  730. do { \
  731. GE_RUN_PERF(GraphManager, func, __VA_ARGS__); \
  732. GE_DUMP(compute_graph, "PreRunAfter" name); \
  733. GELOGI("Run %s on graph %s(%u) success.", name, compute_graph->GetName().c_str(), graph_node->GetGraphId()); \
  734. } while (0)
  735. Status GraphManager::PreRunOptimizeOriginalGraph(const GraphNodePtr &graph_node, const std::vector<GeTensor> &inputs,
  736. ge::ComputeGraphPtr &compute_graph, uint64_t session_id) {
  737. ErrorManager::GetInstance().SetStage(ErrorMessage::kModelCompile, ErrorMessage::kPrepareOptimize);
  738. GE_CHECK_NOTNULL(graph_node);
  739. GE_CHECK_NOTNULL(compute_graph);
  740. CompilerStages &stages = GetCompilerStages(graph_node->GetGraphId());
  741. GM_RUN_AND_DUMP_PERF("OptimizeGraphPrepare", stages.optimizer.OptimizeOriginalGraphForQuantize, compute_graph);
  742. GM_RUN_AND_DUMP_PERF("HandleSummaryOp", stages.optimizer.HandleSummaryOp, compute_graph);
  743. GM_RUN_AND_DUMP_PERF("Prepare", stages.preparer.PrepareDynShape, graph_node, inputs, compute_graph,
  744. session_id);
  745. ErrorManager::GetInstance().SetStage(ErrorMessage::kModelCompile, ErrorMessage::kOriginOptimize);
  746. GM_RUN_AND_DUMP_PERF("OptimizeOriginalGraph", stages.optimizer.OptimizeOriginalGraph, compute_graph);
  747. ErrorManager::GetInstance().SetStage(ErrorMessage::kModelCompile, ErrorMessage::kPrepareOptimize);
  748. GM_RUN_AND_DUMP_PERF("PrepareRunningFormatRefiner", stages.preparer.PrepareRunningFormatRefiner);
  749. GM_RUN_AND_DUMP_PERF("RefineRunningFormat", stages.optimizer.OptimizeOriginalGraphJudgeInsert, compute_graph);
  750. GM_RUN_AND_DUMP_PERF("SubexpressionMigration", SubexpressionMigration, compute_graph);
  751. GE_RUN(GraphManager, stages.preparer.RecordAIPPInfo, compute_graph);
  752. if (IsTailingOptimization()) {
  753. GM_RUN_AND_DUMP_PERF("OptimizeSwitchOp", stages.preparer.SwitchOpOptimize, compute_graph);
  754. }
  755. GM_RUN_AND_DUMP_PERF("Optimize1", OptimizeStage1, compute_graph);
  756. GM_RUN_AND_DUMP_PERF("InferShape2", compute_graph->InferShapeInNeed);
  757. PassManager graph_pass;
  758. GE_CHK_STATUS_RET(graph_pass.AddPass("PreRun::CtrlEdgeTransferPass", new (std::nothrow) CtrlEdgeTransferPass))
  759. GE_CHK_STATUS_RET(graph_pass.Run(compute_graph));
  760. GE_CHK_STATUS_RET(stages.optimizer.IdentifyReference(compute_graph), "Identify reference failed.");
  761. GELOGD("PreRun:PreRunOptimizeOriginalGraph success.");
  762. return SUCCESS;
  763. }
  764. Status GraphManager::PreRunOptimizeSubGraph(const GraphNodePtr &graph_node,
  765. ge::ComputeGraphPtr &compute_graph,
  766. uint64_t session_id) {
  767. GE_CHECK_NOTNULL(graph_node);
  768. GE_CHECK_NOTNULL(compute_graph);
  769. GM_RUN_AND_DUMP_PERF("OptimizeSubgraph", OptimizeSubgraph, graph_node, compute_graph, session_id);
  770. // Dump graph to tuning path
  771. if (options_.build_mode == BUILD_MODE_TUNING && options_.build_step == BUILD_STEP_AFTER_UB_MATCH) {
  772. std::string tuning_path;
  773. (void) GetContext().GetOption(TUNING_PATH, tuning_path);
  774. GELOGD("Dump path:%s.", tuning_path.c_str());
  775. GraphUtils::DumpGEGraph(compute_graph, "", true, tuning_path);
  776. }
  777. GELOGD("PreRun:PreRunOptimizeSubGraph success.");
  778. return SUCCESS;
  779. }
  780. Status GraphManager::PreRunAfterOptimizeSubGraph(const GraphNodePtr &graph_node, ComputeGraphPtr &compute_graph,
  781. GeRootModelPtr &ge_root_model, uint64_t session_id) {
  782. GE_CHECK_NOTNULL(graph_node);
  783. GE_CHECK_NOTNULL(compute_graph);
  784. ErrorManager::GetInstance().SetStage(ErrorMessage::kModelCompile, ErrorMessage::kMergeGraphOptimize);
  785. CompilerStages &stages = GetCompilerStages(graph_node->GetGraphId());
  786. GM_RUN_AND_DUMP_PERF("OptimizeWholeGraph", stages.optimizer.OptimizeWholeGraph, compute_graph);
  787. GM_RUN_AND_DUMP_PERF("Optimize2", OptimizeStage2, compute_graph);
  788. GM_RUN_AND_DUMP_PERF("OptimizeGraphBeforeBuildForRts",
  789. GetCompilerStages(graph_node->GetGraphId()).optimizer.OptimizeGraphBeforeBuildForRts,
  790. compute_graph);
  791. Status ret = compute_graph->TopologicalSorting();
  792. if (ret != SUCCESS) {
  793. GELOGE(ret, "Graph topological sort failed, ret:%d.", ret);
  794. return ret;
  795. }
  796. GM_RUN_AND_DUMP_PERF("Build", Build, graph_node, compute_graph, ge_root_model, session_id);
  797. GELOGD("PreRun:PreRunAfterOptimizeSubGraph success.");
  798. return SUCCESS;
  799. }
  800. Status GraphManager::SetRtContext(rtContext_t rt_context, rtCtxMode_t mode, uint64_t session_id, uint32_t graph_id) {
  801. GELOGD("set rt_context: session id: %lu, graph id: %u, mode %d, device id:%u.",
  802. session_id, graph_id, static_cast<int>(mode), ge::GetContext().DeviceId());
  803. rtError_t rt_ret = rtCtxCreate(&rt_context, mode, ge::GetContext().DeviceId());
  804. if (rt_ret != RT_ERROR_NONE) {
  805. GELOGE(FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
  806. return FAILED;
  807. }
  808. rt_ret = rtCtxSetCurrent(rt_context);
  809. if (rt_ret != RT_ERROR_NONE) {
  810. GELOGE(FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
  811. return FAILED;
  812. }
  813. RtContextUtil::GetInstance().AddRtContext(session_id, graph_id, rt_context);
  814. return SUCCESS;
  815. }
  816. Status GraphManager::RunCustomPass(const GraphNodePtr &graph_node) {
  817. ConstGraphPtr const_graph = graph_node->GetGraph();
  818. auto comp_graph = GraphUtils::GetComputeGraph(*const_graph);
  819. GE_DUMP(comp_graph, "RunCustomPassBegin");
  820. GE_TIMESTAMP_START(RunCustomPass);
  821. GraphPtr graph = std::const_pointer_cast<Graph>(const_graph);
  822. GE_CHK_STATUS_RET(CustomPassHelper::Instance().Run(graph), "Graph[%s] run custom pass fail.",
  823. comp_graph->GetName().c_str());
  824. GE_TIMESTAMP_END(RunCustomPass, "GraphBuilder::RunCustomPass");
  825. return SUCCESS;
  826. }
  827. Status GraphManager::PreRun(const GraphNodePtr &graph_node, const std::vector<GeTensor> &inputs,
  828. GeRootModelPtr &ge_root_model, uint64_t session_id) {
  829. GE_CHECK_NOTNULL(graph_node);
  830. GE_CHECK_NOTNULL(graph_node->GetGraph());
  831. GE_CHK_STATUS_RET_NOLOG(RunCustomPass(graph_node));
  832. auto compute_graph = GraphUtils::GetComputeGraph(*graph_node->GetGraph());
  833. GE_CHECK_NOTNULL(compute_graph);
  834. compute_graph->SetSessionID(session_id);
  835. auto analyzer_instance = Analyzer::GetInstance();
  836. GE_CHK_STATUS_RET(analyzer_instance->BuildJsonObject(session_id, compute_graph->GetGraphID()),
  837. "BuildJsonObject Failed")
  838. GEEVENT("PreRun start: graph node size %zu, session id %lu, graph id %u, graph name %s",
  839. compute_graph->GetDirectNodesSize(), session_id, compute_graph->GetGraphID(),
  840. compute_graph->GetName().c_str());
  841. GE_DUMP(compute_graph, "PreRunBegin");
  842. // rtContext_t
  843. Status ret = SetRtContext(rtContext_t(), RT_CTX_GEN_MODE, session_id, compute_graph->GetGraphID());
  844. if (ret != SUCCESS) {
  845. GELOGE(ret, "Set rt context failed.");
  846. return ret;
  847. }
  848. /// 1. BUILD_MODE_TUNING with BUILD_STEP_AFTER_UB_MATCH no need PreRunOptimizeOriginalGraph;
  849. /// 2. BUILD_MODE_TUNING with BUILD_STEP_AFTER_MERGE no need PreRunOptimizeOriginalGraph.
  850. /// 3. BUILD_MODE_TUNING with BUILD_STEP_AFTER_BUILDER_SUB no need PreRunOptimizeOriginalGraph.
  851. bool run_optimize_original_graph = !((options_.build_mode == BUILD_MODE_TUNING) &&
  852. (options_.build_step == BUILD_STEP_AFTER_UB_MATCH ||
  853. options_.build_step == BUILD_STEP_AFTER_MERGE ||
  854. options_.build_step == BUILD_STEP_AFTER_BUILDER_SUB));
  855. if (run_optimize_original_graph) {
  856. Status ret = PreRunOptimizeOriginalGraph(graph_node, inputs, compute_graph, session_id);
  857. if (ret != SUCCESS) {
  858. GELOGE(ret, "Run PreRunOptimizeOriginalGraph failed for graph:%s", compute_graph->GetName().c_str());
  859. return ret;
  860. }
  861. }
  862. ErrorManager::GetInstance().SetStage(ErrorMessage::kModelCompile, ErrorMessage::kPrepareOptimize);
  863. ret = PreRunOptimizeSubGraph(graph_node, compute_graph, session_id);
  864. if (ret != SUCCESS) {
  865. GELOGE(ret, "Run PreRunOptimizeSubGraph failed for graph:%s.", compute_graph->GetName().c_str());
  866. return ret;
  867. }
  868. /// 1. BUILD_MODE_TUNING with BUILD_STEP_BEFORE_UB_MATCH no need PreRunAfterOptimizeSubGraph;
  869. /// 2. BUILD_MODE_TUNING with BUILD_STEP_AFTER_BUILDER no need PreRunAfterOptimizeSubGraph.
  870. /// 3. BUILD_MODE_TUNING with BUILD_STEP_AFTER_BUILDER_SUB no need PreRunAfterOptimizeSubGraph.
  871. bool run_after_optimize_subgraph = !((options_.build_mode == BUILD_MODE_TUNING) &&
  872. (options_.build_step == BUILD_STEP_BEFORE_UB_MATCH ||
  873. options_.build_step == BUILD_STEP_AFTER_BUILDER ||
  874. options_.build_step == BUILD_STEP_AFTER_BUILDER_SUB));
  875. if (run_after_optimize_subgraph) {
  876. Status ret = PreRunAfterOptimizeSubGraph(graph_node, compute_graph, ge_root_model, session_id);
  877. if (ret != SUCCESS) {
  878. GELOGE(ret, "Run PreRunAfterOptimizeSubGraph failed for graph:%s.", compute_graph->GetName().c_str());
  879. return ret;
  880. }
  881. }
  882. ErrorManager::GetInstance().SetStage(ErrorMessage::kModelCompile, ErrorMessage::kOther);
  883. // when set incre build, save om model and var manager
  884. GeModelPtr ge_model = nullptr;
  885. auto save_ret = SaveCacheAfterBuild(graph_node->GetGraphId(), compute_graph, ge_model);
  886. if (save_ret != SUCCESS) {
  887. GELOGW("Fail to save cache.");
  888. }
  889. GEEVENT("[GEPERFTRACE] GE PreRun End");
  890. return SUCCESS;
  891. }
  892. Status GraphManager::SubexpressionMigration(ComputeGraphPtr &compute_graph) {
  893. PassManager pass_manager;
  894. GE_CHK_STATUS_RET(pass_manager.AddPass("SubexpressionMigrationPass", new (std::nothrow) SubexpressionMigrationPass));
  895. GE_CHK_STATUS_RET(pass_manager.AddPass("UnusedArgsCleanPass", new (std::nothrow) UnusedArgsCleanPass));
  896. GE_TIMESTAMP_START(SubexpressionMigrationPass);
  897. auto ret = pass_manager.Run(compute_graph);
  898. GE_TIMESTAMP_END(SubexpressionMigrationPass, "GraphManager::SubexpressionMigration");
  899. if (ret != SUCCESS && ret != NOT_CHANGED) {
  900. GELOGE(ret, "Run SubexpressionMigrationPass failed, ret:%u.", ret);
  901. return ret;
  902. }
  903. return SUCCESS;
  904. }
  905. Status GraphManager::StartForRunGraph(const GraphNodePtr &graph_node, const std::vector<GeTensor> &inputs,
  906. GeRootModelPtr &ge_root_model, uint64_t session_id) {
  907. // it will not execute graph prreprocess, optimize, parition, build if the graph has built successful.
  908. Status ret = SUCCESS;
  909. if (IsGraphNeedBuild(graph_node)) {
  910. ErrorManager::GetInstance().SetStage(ErrorMessage::kModelCompile, ErrorMessage::kOther);
  911. if (graph_node->GetBuildFlag()) {
  912. GELOGE(PARAM_INVALID,
  913. "The graph %u need to re-build, you should remove it from GE "
  914. "first, then AddGraph again and rebuild it.",
  915. graph_node->GetGraphId());
  916. return PARAM_INVALID;
  917. }
  918. GeModelPtr ge_model = nullptr;
  919. // check need incre build.
  920. ret = IncreBuild(graph_node, ge_model);
  921. if (ret != SUCCESS) {
  922. ret = PreRun(graph_node, inputs, ge_root_model, session_id);
  923. // release rts generate context
  924. RtContextUtil::GetInstance().DestroyRtContexts(session_id, graph_node->GetGraphId());
  925. if (ret != SUCCESS) {
  926. GELOGE(ret, "PreRun Failed. graph_id:%u.", graph_node->GetGraphId());
  927. return ret;
  928. }
  929. }
  930. ErrorManager::GetInstance().SetStage(ErrorMessage::kModelLoad, ErrorMessage::kModelLoad);
  931. if (!graph_node->IsAsync()) {
  932. ret = LoadGraph(ge_root_model, graph_node);
  933. } else {
  934. GE_CHECK_NOTNULL(ge_root_model);
  935. ret = LoadGraphAsync(ge_root_model, graph_node);
  936. }
  937. if (ret != SUCCESS) {
  938. GELOGE(ret, "LoadGraph Failed.");
  939. return ret;
  940. }
  941. graph_node->SetBuildFlag(true);
  942. var_acc_ctrl_.SetGraphBuildEnd(graph_node->GetGraphId());
  943. } else if (!graph_node->GetLoadFlag()) {
  944. ErrorManager::GetInstance().SetStage(ErrorMessage::kModelLoad, ErrorMessage::kModelLoad);
  945. GeRootModelPtr ge_root_model_ptr = graph_node->GetGeRootModel();
  946. if (!graph_node->IsAsync()) {
  947. ret = LoadGraph(ge_root_model_ptr, graph_node);
  948. } else {
  949. GE_CHECK_NOTNULL(ge_root_model);
  950. ret = LoadGraphAsync(ge_root_model_ptr, graph_node);
  951. }
  952. if (ret != SUCCESS) {
  953. GELOGE(ret, "LoadGraph Failed.");
  954. return ret;
  955. }
  956. }
  957. return ret;
  958. }
  959. Status GraphManager::LoadGraph(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node) {
  960. GELOGI("[LoadGraph] run_graph_flag[%d], graph_id[%u]", options_.run_graph_flag, graph_node->GetGraphId());
  961. if (options_.run_graph_flag && ge_root_model != nullptr) {
  962. ge_root_model->SetTrainFlag(GetTrainFlag());
  963. // synchronization run graph with model
  964. std::shared_ptr<GraphModelListener> model_listener = GetModelListener();
  965. ModelIdInfo model_id_info;
  966. bool is_unknown_shape = false;
  967. GE_CHK_STATUS_RET(ge_root_model->CheckIsUnknownShape(is_unknown_shape));
  968. if (!is_unknown_shape) {
  969. if (getenv(kEnvGeuseStaticMemory) != nullptr) {
  970. GELOGI("[LoadGraph] GE_USE_STATIC_MEMORY is seted.");
  971. } else {
  972. auto root_graph = ge_root_model->GetRootGraph();
  973. GE_CHECK_NOTNULL(root_graph);
  974. auto name_to_model = ge_root_model->GetSubgraphInstanceNameToModel();
  975. GeModelPtr ge_model = name_to_model[root_graph->GetName()];
  976. GE_CHK_STATUS_RET(CheckAndReleaseMemory(ge_model, graph_node));
  977. }
  978. }
  979. GE_TIMESTAMP_START(LoadGraph);
  980. Status ret = GraphLoader::LoadModelOnline(model_id_info.model_id, ge_root_model, model_listener);
  981. GE_TIMESTAMP_EVENT_END(LoadGraph, "GraphManager::LoadGraph");
  982. if (ret != SUCCESS) {
  983. GELOGE(ret, "[StartForRunGraph] LoadGraph Failed");
  984. graph_node->SetRunFlag(false);
  985. return ret;
  986. }
  987. graph_node->SetLoadFlag(true);
  988. ge_root_model->SetModelId(model_id_info.model_id);
  989. graph_node->SetGeRootModel(ge_root_model);
  990. }
  991. return SUCCESS;
  992. }
  993. Status GraphManager::LoadFromCache(const GraphNodePtr &graph_node, const ModelCacheHelperPtr &cache_helper,
  994. GeModelPtr &ge_model) {
  995. auto graph_id = graph_node->GetGraphId();
  996. auto ret = cache_helper->LoadOmModelFromCache(ge_model);
  997. if (ret != SUCCESS) {
  998. GELOGW("Fail to load om model from cache.");
  999. if (cache_helper->ClearCache(graph_id) != SUCCESS) {
  1000. GELOGW("Fail to clear cache of graph %u.", graph_id);
  1001. }
  1002. return FAILED;
  1003. }
  1004. ret = cache_helper->RecoverVarManagerFromCache();
  1005. if (ret != SUCCESS) {
  1006. GELOGW("Fail to recover VarManager from cache.");
  1007. if (cache_helper->ClearCache(graph_id) != SUCCESS) {
  1008. GELOGW("Fail to clear cache of graph %u.", graph_id);
  1009. }
  1010. return FAILED;
  1011. }
  1012. ComputeGraphPtr compute_graph_in_model = GraphUtils::GetComputeGraph(ge_model->GetGraph());
  1013. if (compute_graph_in_model == nullptr) {
  1014. GELOGW("Error occurred when get compute graph from om, abandon.");
  1015. return FAILED;
  1016. } else {
  1017. graph_node->SetComputeGraph(compute_graph_in_model);
  1018. graph_node->SetGeModel(ge_model);
  1019. GELOGI("Load model and graph form cache om file.");
  1020. }
  1021. return SUCCESS;
  1022. }
  1023. Status GraphManager::SaveCacheBeforeBuild(uint32_t graph_id, const ModelCacheHelperPtr &cache_helper) {
  1024. auto ret = cache_helper->SaveCacheInfoToCache();
  1025. if (ret != SUCCESS) {
  1026. GELOGW("Fail to save cache info of graph[%d] to cache.", graph_id);
  1027. return FAILED;
  1028. }
  1029. ret = cache_helper->SaveVarManagerToCache(true);
  1030. if (ret != SUCCESS) {
  1031. GELOGW("Fail to save var manager to cache.");
  1032. cache_helper->ClearCache(graph_id);
  1033. return FAILED;
  1034. }
  1035. GELOGI("Cache files have been saved.");
  1036. return SUCCESS;
  1037. }
  1038. Status GraphManager::SaveCacheAfterBuild(uint32_t graph_id, ge::ComputeGraphPtr graph, GeModelPtr &ge_model) {
  1039. std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
  1040. if ((instance_ptr == nullptr) || !instance_ptr->InitFlag()) {
  1041. GELOGW("GELib not initialized.");
  1042. return FAILED;
  1043. }
  1044. if (instance_ptr->IsIncreBuild()) {
  1045. std::lock_guard<std::mutex> lock(member_mutex_);
  1046. auto iter = cache_helper_map_.find(graph_id);
  1047. if (iter == cache_helper_map_.end()) {
  1048. GELOGW("Can not find ModelCacheHelper of graph[%u]", graph_id);
  1049. return FAILED;
  1050. } else {
  1051. ModelCacheHelperPtr cache_helper = iter->second;
  1052. auto ret = cache_helper->RefreshComputeGraph(graph);
  1053. if (ret != SUCCESS) {
  1054. cache_helper->ClearCache(graph_id);
  1055. GELOGW("Fail to refresh cache helper's compute graph");
  1056. return FAILED;
  1057. }
  1058. ret = cache_helper->SaveVarManagerToCache(false);
  1059. if (ret != SUCCESS) {
  1060. cache_helper->ClearCache(graph_id);
  1061. GELOGW("Fail to save VarManager to cache");
  1062. return FAILED;
  1063. }
  1064. ret = cache_helper->SaveOmModelToCache(ge_model);
  1065. if (ret != SUCCESS) {
  1066. cache_helper->ClearCache(graph_id);
  1067. GELOGW("Fail to save om model to cache");
  1068. return FAILED;
  1069. }
  1070. }
  1071. }
  1072. return SUCCESS;
  1073. }
  1074. Status GraphManager::InnerRunGraph(GraphNodePtr &graph_node, const GraphId &graph_id,
  1075. const std::vector<GeTensor> &inputs, std::vector<GeTensor> &outputs) {
  1076. Status ret = graph_executor_.SetCondition(&sync_run_mutex_, &condition_, graph_run_listener_);
  1077. if (ret != SUCCESS) {
  1078. GELOGE(GE_GRAPH_RUNGRAPH_FAILED, "[RunGraph] set condition failed, graph_id = %u.", graph_id);
  1079. graph_node->SetRunFlag(false);
  1080. return GE_GRAPH_RUNGRAPH_FAILED;
  1081. }
  1082. if (GetTrainFlag()) {
  1083. GE_CHK_STATUS_RET(graph_executor_.SetGraphContext(GetGraphContext()));
  1084. graph_executor_.SetTrainFlag(options_.train_graph_flag);
  1085. }
  1086. ret = graph_executor_.ExecuteGraph(graph_id, graph_node->GetGeRootModel(), inputs, outputs);
  1087. graph_node->SetRunFlag(false);
  1088. if (ret != SUCCESS) {
  1089. GELOGE(ret, "[RunGraph] execute graph failed, graph_id = %u.", graph_id);
  1090. return ret;
  1091. }
  1092. return SUCCESS;
  1093. }
  1094. Status GraphManager::RunGraph(const GraphId &graph_id, const std::vector<GeTensor> &inputs,
  1095. std::vector<GeTensor> &outputs, uint64_t session_id) {
  1096. ErrorManager::GetInstance().SetStage(ErrorMessage::kModelCompile, ErrorMessage::kOther);
  1097. std::lock_guard<std::mutex> lock(run_mutex_);
  1098. GELOGI("[RunGraph] start to run graph, graph_id = %u, is_train_graph: %d", graph_id, GetTrainFlag());
  1099. if (inputs.empty()) {
  1100. GELOGI("[RunGraph] initialize sub graph has no inputs");
  1101. }
  1102. // find graph
  1103. GraphNodePtr graph_node = nullptr;
  1104. Status ret = GetGraphNode(graph_id, graph_node);
  1105. if (ret != SUCCESS) {
  1106. GELOGE(ret, "[RunGraph] graph not exist, graph_id = %u.", graph_id);
  1107. return ret;
  1108. }
  1109. if (graph_node == nullptr) {
  1110. GELOGE(GE_GRAPH_GRAPH_NODE_NULL, "[RunGraph] graph node is NULL, graph_id = %u.", graph_id);
  1111. return GE_GRAPH_GRAPH_NODE_NULL;
  1112. }
  1113. if (graph_node->GetRunFlag()) {
  1114. GELOGE(GE_GRAPH_ALREADY_RUNNING, "[RunGraph] graph already running, graph id = %u", graph_id);
  1115. return GE_GRAPH_ALREADY_RUNNING;
  1116. }
  1117. UpdateLocalOmgContext(graph_id);
  1118. // set graph's run flag
  1119. graph_node->SetRunFlag(true);
  1120. ComputeGraphPtr compute_graph_tmp = GraphUtils::GetComputeGraph(*(graph_node->GetGraph()));
  1121. GE_IF_BOOL_EXEC(GetTrainFlag(),
  1122. GE_IF_BOOL_EXEC(compute_graph_tmp == nullptr,
  1123. GELOGE(GE_GRAPH_GRAPH_NODE_NULL,
  1124. "[RunGraph] compute_graph_tmp is NULL, graph id = %u.", graph_id);
  1125. return GE_GRAPH_GRAPH_NODE_NULL;))
  1126. // when set incre build, add cache helper map
  1127. AddModelCacheHelperToMap(graph_id, session_id, compute_graph_tmp);
  1128. if (options_.local_fmk_op_flag) {
  1129. GetCompilerStages(graph_id).optimizer.TranFrameOp(compute_graph_tmp);
  1130. }
  1131. GeRootModelPtr ge_root_model = nullptr;
  1132. ret = StartForRunGraph(graph_node, inputs, ge_root_model, session_id);
  1133. if (ret != SUCCESS) {
  1134. GELOGE(ret, "[RunGraph] StartForRunGraph failed!");
  1135. graph_node->SetRunFlag(false);
  1136. return ret;
  1137. }
  1138. ErrorManager::GetInstance().SetStage(ErrorMessage::kModelExecute, ErrorMessage::kModelExecute);
  1139. // excute graph
  1140. ret = InnerRunGraph(graph_node, graph_id, inputs, outputs);
  1141. if (ret != SUCCESS) {
  1142. return ret;
  1143. }
  1144. if (GetTrainFlag()) {
  1145. if (compute_graph_tmp->IsSummaryGraph()) {
  1146. ret = SummaryHandle(graph_id, outputs);
  1147. if (ret != SUCCESS) {
  1148. GELOGE(ret, "[RunGraph] SummaryHandle failed!");
  1149. }
  1150. }
  1151. GeRootModelPtr root_model = graph_node->GetGeRootModel();
  1152. if (root_model != nullptr) {
  1153. GELOGI("Start CheckpointHandle.");
  1154. auto checkPointGraph = root_model->GetRootGraph();
  1155. if (IsCheckpointGraph(checkPointGraph)) {
  1156. ret = CheckpointHandle(graph_id, checkPointGraph, outputs);
  1157. if (ret != SUCCESS) {
  1158. GELOGE(ret, "[RunGraph] CheckpointHandle failed!");
  1159. }
  1160. }
  1161. }
  1162. }
  1163. GELOGI("[RunGraph] run graph success, graph_id = %u.", graph_id);
  1164. return SUCCESS;
  1165. }
  1166. Status GraphManager::GenerateInfershapeGraph(GraphId &graph_id) {
  1167. GELOGI("[DumpInfershapeJson] start to DumpInfershapeJson graph, graph_id=%u.", graph_id);
  1168. // find graph
  1169. GraphNodePtr graph_node = nullptr;
  1170. Status ret = GetGraphNode(graph_id, graph_node);
  1171. if (ret != SUCCESS) {
  1172. GELOGE(ret, "[BuildGraph] graph not exist, graph_id = %u.", graph_id);
  1173. return ret;
  1174. }
  1175. if (graph_node == nullptr) {
  1176. GELOGE(GE_GRAPH_GRAPH_NODE_NULL, "[BuildGraph] graph node is NULL, graphId = %u.", graph_id);
  1177. return GE_GRAPH_GRAPH_NODE_NULL;
  1178. }
  1179. UpdateLocalOmgContext(graph_id);
  1180. ret = GetCompilerStages(graph_id).preparer.GenerateInfershapeGraph(graph_node->GetGraph());
  1181. if (ret != SUCCESS) {
  1182. GELOGE(ret, "ATC dump infershape json failed");
  1183. return ret;
  1184. }
  1185. GELOGI("[DumpInfershapeJson] Dump infershape json success, graph_id=%u.", graph_id);
  1186. return ret;
  1187. }
  1188. Status GraphManager::BuildGraphForUnregisteredOp(const GraphId &graph_id, const std::vector<GeTensor> &inputs,
  1189. GeRootModelPtr &ge_root_model, uint64_t session_id) {
  1190. // find graph
  1191. GraphNodePtr graph_node = nullptr;
  1192. Status ret = GetGraphNode(graph_id, graph_node);
  1193. if (ret != SUCCESS) {
  1194. GELOGE(ret, "[BuildGraph] graph not exist, graph_id = %u.", graph_id);
  1195. return ret;
  1196. }
  1197. if (graph_node == nullptr) {
  1198. GELOGE(GE_GRAPH_GRAPH_NODE_NULL, "[BuildGraph] graph node is NULL, graphId = %u.", graph_id);
  1199. return GE_GRAPH_GRAPH_NODE_NULL;
  1200. }
  1201. UpdateLocalOmgContext(graph_id);
  1202. auto compute_graph = GraphUtils::GetComputeGraph(*graph_node->GetGraph());
  1203. GE_CHECK_NOTNULL(compute_graph);
  1204. GM_RUN_AND_DUMP_PERF("Prepare", GetCompilerStages(graph_id).preparer.PrepareDynShape, graph_node, inputs,
  1205. compute_graph, session_id);
  1206. for (auto &node : compute_graph->GetAllNodes()) {
  1207. OpDescPtr op_desc = node->GetOpDesc();
  1208. GE_CHECK_NOTNULL(op_desc);
  1209. if (op_desc->HasAttr(ATTR_NAME_UNREGST_OPPATH)) {
  1210. vector<ge::NodePtr> node_vec = {node};
  1211. auto instance_ptr = ge::GELib::GetInstance();
  1212. if (instance_ptr == nullptr || !instance_ptr->InitFlag()) {
  1213. GELOGE(GE_CLI_GE_NOT_INITIALIZED, "GE is not initialized");
  1214. return GE_CLI_GE_NOT_INITIALIZED;
  1215. }
  1216. OpsKernelInfoStorePtr kernel_info =
  1217. instance_ptr->OpsKernelManagerObj().GetOpsKernelInfoStore(op_desc->GetOpKernelLibName());
  1218. if (kernel_info == nullptr) {
  1219. GELOGE(FAILED, "Get op kernel info store failed");
  1220. return FAILED;
  1221. }
  1222. ret = kernel_info->CompileOp(node_vec);
  1223. if (ret != SUCCESS) {
  1224. GELOGE(ret, "Compile op failed, op = %s, graph_id = %u.", op_desc->GetName().c_str(), graph_id);
  1225. return ret;
  1226. }
  1227. }
  1228. }
  1229. GM_RUN_AND_DUMP_PERF("Build", Build, graph_node, compute_graph, ge_root_model, session_id);
  1230. return SUCCESS;
  1231. }
  1232. Status GraphManager::BuildGraph(const GraphId &graph_id, const std::vector<GeTensor> &inputs,
  1233. GeRootModelPtr &ge_root_model, uint64_t session_id, bool async) {
  1234. ErrorManager::GetInstance().SetStage(ErrorMessage::kModelCompile, ErrorMessage::kOther);
  1235. GELOGD("[BuildGraph] start to build graph, graph_id:%u", graph_id);
  1236. if (inputs.empty()) {
  1237. GELOGW("[BuildGraph] BuildGraph warning: empty GeTensor inputs");
  1238. }
  1239. // find graph
  1240. GraphNodePtr graph_node = nullptr;
  1241. Status ret = GetGraphNode(graph_id, graph_node);
  1242. if (ret != SUCCESS) {
  1243. GELOGE(ret, "[BuildGraph] graph not exist, graph_id = %u.", graph_id);
  1244. return ret;
  1245. }
  1246. if (graph_node == nullptr) {
  1247. GELOGE(GE_GRAPH_GRAPH_NODE_NULL, "[BuildGraph] graph node is NULL, graphId = %u.", graph_id);
  1248. return GE_GRAPH_GRAPH_NODE_NULL;
  1249. }
  1250. if (graph_node->GetRunFlag()) {
  1251. GELOGE(GE_GRAPH_ALREADY_RUNNING, "[BuildGraph] graph already running, graph id = %u", graph_node->GetGraphId());
  1252. return GE_GRAPH_ALREADY_RUNNING;
  1253. }
  1254. UpdateLocalOmgContext(graph_id);
  1255. graph_node->SetAsync(async);
  1256. // set graph's run flag
  1257. graph_node->SetRunFlag(true);
  1258. ret = StartForRunGraph(graph_node, inputs, ge_root_model, session_id);
  1259. graph_node->SetRunFlag(false);
  1260. if (ret != SUCCESS) {
  1261. GELOGE(GE_GRAPH_PRERUN_FAILED, "[BuildGraph] StartForRunGraph failed! graph_id:%u.", graph_id);
  1262. return GE_GRAPH_PRERUN_FAILED;
  1263. }
  1264. GELOGI("[BuildGraph] build graph success, graph_id=%u.", graph_id);
  1265. return ret;
  1266. }
  1267. ///
  1268. /// @ingroup ge_graph
  1269. /// @brief Save extra attribute to Model
  1270. /// @param [in] model: Model attribues will save to.
  1271. /// @param [in] type: type of OpDesc.
  1272. /// @param [in] attrs: attributes of OpDesc.
  1273. /// @param [in] inputs: inputs tensor.
  1274. /// @param [in] outputs: outputs tensor.
  1275. /// @return: Status
  1276. ///
  1277. Status GraphManager::SaveParams(ge::GeModel &model, const std::string &type, const std::map<string, GeAttrValue> &attrs,
  1278. const std::vector<GeTensor> &inputs, const std::vector<GeTensor> &outputs) {
  1279. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetStr(&model, "ATTR_MODEL_OP_TYPE", type), return FAILED, "Set Op[%s] type fail",
  1280. type.c_str());
  1281. for (const auto &it : attrs) {
  1282. GE_CHK_BOOL_EXEC(model.SetAttr("ATTR_MODEL_" + it.first, it.second) == GRAPH_SUCCESS, return FAILED,
  1283. "Set OpDesc attribute[%s] fail", it.first.c_str());
  1284. }
  1285. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListTensor(&model, "ATTR_MODEL_TENSOR_INPUTS", inputs), return FAILED,
  1286. "Set Inputs tensor list fail");
  1287. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListTensor(&model, "ATTR_MODEL_TENSOR_OUTPUTS", outputs), return FAILED,
  1288. "Set Outputs tensor list fail");
  1289. return SUCCESS;
  1290. }
  1291. void GraphManager::RemoveModelCacheHelper(const GraphId &graph_id) {
  1292. std::lock_guard<std::mutex> lock(member_mutex_);
  1293. auto iter = cache_helper_map_.find(graph_id);
  1294. if (iter != cache_helper_map_.end()) {
  1295. cache_helper_map_.erase(iter);
  1296. } else {
  1297. GELOGW("[GraphManager] cache helper does not exist, graph_id = %u", graph_id);
  1298. }
  1299. }
  1300. bool GraphManager::CheckModelLoad(const GeRootModelPtr &ge_root_model, bool load_flag) {
  1301. return ((ge_root_model != nullptr) && (ge_root_model->GetModelId() != INVALID_MODEL_ID) && load_flag);
  1302. }
  1303. Status GraphManager::RemoveGraph(const GraphId &graph_id) {
  1304. auto it = to_be_deleted_graphs_.find(graph_id);
  1305. if (it != to_be_deleted_graphs_.end()) {
  1306. to_be_deleted_graphs_.erase(it);
  1307. }
  1308. GraphNodePtr graph_node = nullptr;
  1309. Status ret = GetGraphNode(graph_id, graph_node);
  1310. if (ret != SUCCESS || graph_node == nullptr) {
  1311. REPORT_INNER_ERROR("E19999", "Graph:%u not exist in graph_map, check invalid when GraphManager %s",
  1312. graph_id, __FUNCTION__);
  1313. GELOGE(GE_GRAPH_GRAPH_NOT_EXIST, "[GraphManager] Id %u does not exists.", graph_id);
  1314. return GE_GRAPH_GRAPH_NOT_EXIST;
  1315. }
  1316. if (graph_node->GetRunFlag()) {
  1317. // only put graph into to-be-deleted list when exceptional scenario
  1318. to_be_deleted_graphs_.insert(graph_id);
  1319. GELOGI("[GraphManager] Trying to remove running graph[Id:%u], added into to_be_deleted_graphs_.", graph_id);
  1320. return SUCCESS;
  1321. }
  1322. std::lock_guard<std::mutex> lock(unload_model_mutex_);
  1323. Status middle_ret;
  1324. rtError_t rt_ret;
  1325. var_acc_ctrl_.RemoveGraph(graph_id);
  1326. RemoveGraphNode(graph_id);
  1327. RemoveModelCacheHelper(graph_id);
  1328. auto ge_root_model = graph_node->GetGeRootModel();
  1329. if (CheckModelLoad(ge_root_model, graph_node->GetLoadFlag())) {
  1330. rt_ret = rtSetDevice(GetContext().DeviceId());
  1331. if (rt_ret != RT_ERROR_NONE) {
  1332. GELOGE(RT_FAILED, "[GraphManager:] rtSetDevice failed, modelId=%u, graphId=%u.", ge_root_model->GetModelId(),
  1333. graph_id);
  1334. return FAILED;
  1335. }
  1336. // same graph may be added for several times, different models were created separately,
  1337. // unload them respectively.
  1338. middle_ret = UnloadModel(ge_root_model, graph_id);
  1339. if (middle_ret != SUCCESS) {
  1340. REPORT_INNER_ERROR("E19999", "UnloadModel for graph:%u failed, check unload detail in GraphLoader %s",
  1341. graph_id, __FUNCTION__);
  1342. GELOGE(middle_ret, "[GraphManager:] unload model failed, graph_id=%u.", graph_id);
  1343. ret = middle_ret;
  1344. }
  1345. rt_ret = rtDeviceReset(GetContext().DeviceId());
  1346. if (rt_ret != RT_ERROR_NONE) {
  1347. REPORT_CALL_ERROR("E19999", "Call rtDeviceReset failed, device_id:%u, graph_id:%u, when GraphManager %s",
  1348. GetContext().DeviceId(), graph_id, __FUNCTION__);
  1349. GELOGE(RT_FAILED, "[GraphManager:] rtDeviceReset failed, graphId=%u.", graph_id);
  1350. ret = FAILED;
  1351. }
  1352. }
  1353. RemoveCompilerStages(graph_id);
  1354. RemoveGraphCount(graph_id);
  1355. RemoveAddGraphCondition(graph_id);
  1356. GE_CHK_STATUS_RET(ret, "[GraphManager:] Remove graph failed, graph_id=%u.", graph_id);
  1357. GELOGI("[GraphManager] remove graph success, graph_id=%u.", graph_id);
  1358. return SUCCESS;
  1359. }
  1360. Status GraphManager::ParseOptions(const std::map<std::string, std::string> &options) {
  1361. Status ret;
  1362. ParseOption(options, "ge.INPUT_NODES_SET_FP16", options_.input_nodes_set_fp16);
  1363. // parse streams max parallel num
  1364. ret = ParseOption(options, STREAM_MAX_PARALLEL_NUM, options_.stream_max_parallel_num);
  1365. if (ret != SUCCESS) {
  1366. GELOGE(GE_GRAPH_OPTIONS_INVALID,
  1367. "parse Key:%s value failed, it must be same format as "
  1368. "DNN_V100:2,DNN_HCCL:3",
  1369. STREAM_MAX_PARALLEL_NUM.c_str());
  1370. return GE_GRAPH_OPTIONS_INVALID;
  1371. }
  1372. // get stream num
  1373. ret = ParseOption(options, STREAM_NUM, options_.stream_num);
  1374. if ((ret != SUCCESS) || (options_.stream_num == 0)) {
  1375. GELOGE(GE_GRAPH_OPTIONS_INVALID, "Key:ge.stream_num, its value %d is invalid, must be not equal zero.",
  1376. options_.stream_num);
  1377. return GE_GRAPH_OPTIONS_INVALID;
  1378. }
  1379. // get perf level, its value please see enum PerfLevel
  1380. ret = ParseOption(options, PERF_LEVEL, options_.perf_level);
  1381. if ((ret != SUCCESS) || IsPerfLevelInvalid(options_.perf_level)) {
  1382. GELOGE(GE_GRAPH_OPTIONS_INVALID, "Key:ge.perfLevel, its value %d is invalid, must be enum PerfLevel type.",
  1383. options_.perf_level);
  1384. return GE_GRAPH_OPTIONS_INVALID;
  1385. }
  1386. // get encrypt mode
  1387. ret = ParseOption(options, ENCRYPT_MODE, options_.encrypt_mode);
  1388. GE_IF_BOOL_EXEC(ret != SUCCESS,
  1389. GELOGE(GE_GRAPH_OPTIONS_INVALID, "Key:ge.encryptMode value invalid.");
  1390. return GE_GRAPH_OPTIONS_INVALID);
  1391. // get ek file
  1392. ParseOption(options, EK_FILE, options_.ek_file);
  1393. // get cert file
  1394. ParseOption(options, CERT_FILE, options_.cert_file);
  1395. // get hw key file
  1396. ParseOption(options, HW_KEY_FILE, options_.hw_key_file);
  1397. // get private file
  1398. ParseOption(options, PRIVATE_KEY_FILE, options_.private_key_file);
  1399. // get framework type, its value please see enum FrameworkType
  1400. ret = ParseOption(options, FRAMEWORK_TYPE, options_.framework_type);
  1401. if (ret != SUCCESS) {
  1402. // print error log in ParseOption
  1403. return GE_GRAPH_OPTIONS_INVALID;
  1404. }
  1405. // get calibration info file
  1406. ParseOption(options, CALIBRATION_CONF_FILE, options_.calibration_conf_file);
  1407. // get insert op info file
  1408. ParseOption(options, INSERT_OP_FILE, options_.insert_op_file);
  1409. // get output node name
  1410. ParseOption(options, OUTPUT_NODE_NAME, options_.output_node_name);
  1411. // get function bin path
  1412. ParseOption(options, "ge.func_bin_path", options_.func_bin_path);
  1413. // get core type
  1414. ParseOption(options, CORE_TYPE, options_.core_type);
  1415. // get weight compress flag
  1416. ret = ParseOption(options, COMPRESS_FLAG, options_.compress_flag);
  1417. GE_IF_BOOL_EXEC(ret != SUCCESS,
  1418. GELOGE(GE_GRAPH_OPTIONS_INVALID, "Key:ge.compressFlag value is invalid, must be 0 or 1.");
  1419. return GE_GRAPH_OPTIONS_INVALID);
  1420. // Set Build model and step
  1421. ParseOption(options, BUILD_MODE, options_.build_mode);
  1422. ParseOption(options, BUILD_STEP, options_.build_step);
  1423. ParseOption(options, BUILD_STEP, options_.tuning_path);
  1424. // ge.graphType.
  1425. options_.run_graph_flag = true;
  1426. ret = ParseOption(options, RUN_FLAG, options_.run_graph_flag);
  1427. GE_IF_BOOL_EXEC(ret != SUCCESS,
  1428. GELOGE(GE_GRAPH_OPTIONS_INVALID, "Key:ge.runFlag value is invalid, must be 0 or 1.");
  1429. return GE_GRAPH_OPTIONS_INVALID);
  1430. // ge.graphType
  1431. ret = ParseTrainGraphFlag(options_.run_graph_flag, options_.train_graph_flag);
  1432. GE_IF_BOOL_EXEC(ret != SUCCESS,
  1433. GELOGE(GE_GRAPH_OPTIONS_INVALID, "Key:ge.runFlag value is invalid");
  1434. return GE_GRAPH_OPTIONS_INVALID);
  1435. // parse FmkOp
  1436. options_.local_fmk_op_flag = false;
  1437. ret = ParseOption(options, LOCAL_FMKOP_FLAG, options_.local_fmk_op_flag);
  1438. GE_IF_BOOL_EXEC(ret != SUCCESS,
  1439. GELOGE(GE_GRAPH_OPTIONS_INVALID, "Key:ge.localFmkopFlag value is invalid, must be 0 or 1.");
  1440. return GE_GRAPH_OPTIONS_INVALID);
  1441. options_.enable_print_op_pass = true;
  1442. ret = ParseOption(options, ENABLE_PRINT_OP_PASS, options_.enable_print_op_pass);
  1443. options_.is_single_op = false;
  1444. ret = ParseOption(options, SINGLE_OP_FLAG, options_.is_single_op);
  1445. GE_IF_BOOL_EXEC(ret != SUCCESS,
  1446. GELOGE(GE_GRAPH_OPTIONS_INVALID, "Key:ge.enablePrintOpPass value is invalid, must be 0 or 1.");
  1447. return GE_GRAPH_OPTIONS_INVALID);
  1448. // parse hcom parallel
  1449. options_.hcom_parallel = false;
  1450. ret = ParseOption(options, HCOM_PARALLEL, options_.hcom_parallel);
  1451. GE_IF_BOOL_EXEC(ret != SUCCESS,
  1452. GELOGE(GE_GRAPH_OPTIONS_INVALID, "Key:ge.hcomParallel value is invalid, must be 0 or 1.");
  1453. return GE_GRAPH_OPTIONS_INVALID);
  1454. // net output node dataType
  1455. ParseOption(options, OUTPUT_DATATYPE, options_.output_datatype);
  1456. // Set save_original_model flag (ge.save_original_model)
  1457. ParseOption(options, SAVE_ORIGINAL_MODEL, options_.save_original_model);
  1458. // Original model file name
  1459. ParseOption(options, ORIGINAL_MODEL_FILE, options_.original_model_file);
  1460. ParseOption(options, INPUT_SHAPE, options_.input_shape);
  1461. ParseOption(options, kDynamicDims, options_.dynamic_dims);
  1462. ParseOption(options, DYNAMIC_NODE_TYPE, options_.dynamic_node_type);
  1463. GELOGD("Dynamic dims params: input shape is %s, dynamic dims is %s, dynamic node type is %d",
  1464. options_.input_shape.c_str(), options_.dynamic_dims.c_str(), options_.dynamic_node_type);
  1465. return SUCCESS;
  1466. }
  1467. Status GraphManager::ParseTrainGraphFlag(bool &options, bool &option) {
  1468. std::shared_ptr<GELib> ge_instance_ptr = ge::GELib::GetInstance();
  1469. if (ge_instance_ptr == nullptr) {
  1470. GELOGW("[Initialize] set train_graph_flag to 0 when GE is not initialized or finalized");
  1471. option = false;
  1472. } else if (!ge_instance_ptr->isTrainMode()) {
  1473. option = false;
  1474. } else { // ge_instance_ptr->isTrainMode() is true
  1475. if (!options) {
  1476. GELOGE(GE_GRAPH_OPTIONS_INVALID,
  1477. "Key:ge.runFlag, its value %d is invalid, it must be 1 when GElib::is_train_mode_ flag is 1", options);
  1478. return GE_GRAPH_OPTIONS_INVALID;
  1479. }
  1480. option = true;
  1481. }
  1482. return SUCCESS;
  1483. }
  1484. bool GraphManager::IsPerfLevelInvalid(int32_t perf_level) {
  1485. return ((perf_level != static_cast<int32_t>(GEN_TASK_WITHOUT_L2FUSION)) &&
  1486. (perf_level != static_cast<int32_t>(GEN_TASK_WITHOUT_FUSION)) &&
  1487. (perf_level != -1));
  1488. }
  1489. void GraphManager::ParseOption(const std::map<std::string, std::string> &options, const std::string &key,
  1490. std::string &option) {
  1491. auto iter = options.find(key);
  1492. if (iter != options.end()) {
  1493. GELOGD("Set option %s from value %s to value%s", key.c_str(), option.c_str(), iter->second.c_str());
  1494. option = iter->second;
  1495. }
  1496. }
  1497. Status GraphManager::ParseOption(const std::map<std::string, std::string> &options, const std::string &key,
  1498. bool &option) {
  1499. auto iter = options.find(key);
  1500. if (iter != options.end()) {
  1501. string flag = iter->second;
  1502. if (flag == "0") {
  1503. option = false;
  1504. } else if (flag == "1") {
  1505. option = true;
  1506. } else {
  1507. GELOGE(GE_GRAPH_OPTIONS_INVALID, "Key:%s, its value %s is invalid, it must be 0 or 1.", key.c_str(),
  1508. flag.c_str());
  1509. return GE_GRAPH_OPTIONS_INVALID;
  1510. }
  1511. }
  1512. return SUCCESS;
  1513. }
  1514. Status GraphManager::ParseOption(const std::map<std::string, std::string> &options, const std::string &key,
  1515. int &option) {
  1516. const int kDecimal = 10;
  1517. char *ptr = nullptr;
  1518. auto iter = options.find(key);
  1519. if (iter != options.end()) {
  1520. option = static_cast<int32_t>(std::strtol(iter->second.c_str(), &ptr, kDecimal));
  1521. if (ptr != nullptr && *ptr != '\0') {
  1522. GELOGE(GE_GRAPH_OPTIONS_INVALID, "Key:%s, its value %s is invalid, must be int32_t type.", key.c_str(),
  1523. iter->second.c_str());
  1524. return GE_GRAPH_OPTIONS_INVALID;
  1525. }
  1526. }
  1527. return SUCCESS;
  1528. }
  1529. void GraphManager::Trim(std::string &str) {
  1530. if (!str.empty()) {
  1531. auto it = str.find_first_not_of(" ");
  1532. if (it != std::string::npos) {
  1533. str.erase(0, it);
  1534. }
  1535. it = str.find_last_not_of(" ");
  1536. if (it != std::string::npos) {
  1537. str.erase(it + 1);
  1538. }
  1539. }
  1540. }
  1541. Status GraphManager::ParseOption(const std::map<std::string, std::string> &options, const std::string &key,
  1542. std::map<std::string, int> &option) {
  1543. auto iter = options.find(key);
  1544. if (iter == options.end()) {
  1545. return SUCCESS;
  1546. }
  1547. GELOGI("Start to parse %s", key.c_str());
  1548. option.clear();
  1549. std::string op_num = iter->second;
  1550. // split string by ','
  1551. std::vector<std::string> split;
  1552. std::istringstream f(op_num);
  1553. std::string str_tmp;
  1554. while (getline(f, str_tmp, ',')) {
  1555. split.push_back(str_tmp);
  1556. }
  1557. for (const std::string &engine_parallel : split) {
  1558. // split engine and num by :
  1559. size_t pos = engine_parallel.find(':');
  1560. if (pos == string::npos) {
  1561. GELOGE(GE_GRAPH_OPTIONS_INVALID,
  1562. "engine and num must be connected by :, "
  1563. "while your input is %s",
  1564. engine_parallel.c_str());
  1565. return GE_GRAPH_OPTIONS_INVALID;
  1566. }
  1567. std::string engine_name = engine_parallel.substr(0, pos);
  1568. std::string parallel_num = engine_parallel.substr(pos + 1);
  1569. Trim(engine_name);
  1570. Trim(parallel_num);
  1571. Status ret = CheckEngineName(engine_name, key, option);
  1572. if (ret != SUCCESS) {
  1573. GELOGE(GE_GRAPH_OPTIONS_INVALID, "check engine name : %s failed, ", engine_name.c_str());
  1574. return GE_GRAPH_OPTIONS_INVALID;
  1575. }
  1576. int num = 0;
  1577. ret = ParseParallelNum(parallel_num, key, num);
  1578. if (ret != SUCCESS) {
  1579. GELOGE(GE_GRAPH_OPTIONS_INVALID, "parse parallel num failed");
  1580. return GE_GRAPH_OPTIONS_INVALID;
  1581. }
  1582. option.insert(std::make_pair(engine_name, num));
  1583. }
  1584. GELOGI("Parse %s successfully", key.c_str());
  1585. return SUCCESS;
  1586. }
  1587. Status GraphManager::CheckEngineName(const std::string &engine_name, const std::string &key,
  1588. const std::map<std::string, int> &option) {
  1589. if (engine_name.empty()) {
  1590. GELOGE(GE_GRAPH_OPTIONS_INVALID, "engine name of %s is empty", key.c_str());
  1591. return GE_GRAPH_OPTIONS_INVALID;
  1592. }
  1593. // judge whether exist in engine list
  1594. if (!GELib::GetInstance()->DNNEngineManagerObj().IsEngineRegistered(engine_name)) {
  1595. GELOGW("engine : %s is not registered in %s", engine_name.c_str(), key.c_str());
  1596. }
  1597. auto it_stream_repeat = option.find(engine_name);
  1598. if (it_stream_repeat != option.end()) {
  1599. GELOGE(GE_GRAPH_OPTIONS_INVALID, "engine : %s of %s is repeated", engine_name.c_str(), key.c_str());
  1600. return GE_GRAPH_OPTIONS_INVALID;
  1601. }
  1602. return SUCCESS;
  1603. }
  1604. Status GraphManager::ParseParallelNum(const std::string &parallel_num, const std::string &key, int &num) {
  1605. if (parallel_num.empty()) {
  1606. GELOGE(GE_GRAPH_OPTIONS_INVALID, "parallel num of %s is empty", key.c_str());
  1607. return GE_GRAPH_OPTIONS_INVALID;
  1608. }
  1609. for (char c : parallel_num) {
  1610. if (!isdigit(c)) {
  1611. GELOGE(GE_GRAPH_OPTIONS_INVALID, "%s input is invalid ", key.c_str());
  1612. return GE_GRAPH_OPTIONS_INVALID;
  1613. }
  1614. }
  1615. try {
  1616. num = std::stoi(parallel_num);
  1617. } catch (std::invalid_argument &) {
  1618. GELOGE(GE_GRAPH_OPTIONS_INVALID, "parallel num : %s of %s is invalid argument", parallel_num.c_str(), key.c_str());
  1619. return GE_GRAPH_OPTIONS_INVALID;
  1620. } catch (std::out_of_range &) {
  1621. GELOGE(GE_GRAPH_OPTIONS_INVALID, "parallel num : %s of %s is out of range", parallel_num.c_str(), key.c_str());
  1622. return GE_GRAPH_OPTIONS_INVALID;
  1623. } catch (...) {
  1624. GELOGE(GE_GRAPH_OPTIONS_INVALID, "parallel num : %s of %s is invalid argument", parallel_num.c_str(), key.c_str());
  1625. return GE_GRAPH_OPTIONS_INVALID;
  1626. }
  1627. if (num < 1) {
  1628. GELOGE(GE_GRAPH_OPTIONS_INVALID, "parallel num : %s of %s must bigger than 0", parallel_num.c_str(), key.c_str());
  1629. return GE_GRAPH_OPTIONS_INVALID;
  1630. }
  1631. return SUCCESS;
  1632. }
  1633. void GraphManager::AddGraphNode(GraphId graph_id, const GraphNodePtr &graph_node) {
  1634. std::lock_guard<std::mutex> lock(member_mutex_);
  1635. graph_map_.emplace(graph_id, graph_node);
  1636. }
  1637. void GraphManager::RemoveGraphNode(GraphId graph_id) {
  1638. std::lock_guard<std::mutex> lock(member_mutex_);
  1639. graph_map_.erase(graph_id);
  1640. }
  1641. bool GraphManager::HasGraphNode(GraphId graph_id) {
  1642. std::lock_guard<std::mutex> lock(member_mutex_);
  1643. return graph_map_.find(graph_id) != graph_map_.end();
  1644. }
  1645. Status GraphManager::GetGraphNode(const GraphId &graph_id, GraphNodePtr &out) {
  1646. std::lock_guard<std::mutex> lock(member_mutex_);
  1647. auto iter = graph_map_.find(graph_id);
  1648. if (iter == graph_map_.end()) {
  1649. out = nullptr;
  1650. GELOGE(GE_GRAPH_GRAPH_NOT_EXIST, "[GraphManager] graph not exist, graph_id= %u.", graph_id);
  1651. return GE_GRAPH_GRAPH_NOT_EXIST;
  1652. }
  1653. out = iter->second;
  1654. return SUCCESS;
  1655. }
  1656. Status GraphManager::GetVariable(const std::string &name, Tensor &val) {
  1657. GeTensorPtr ge_tensor_ptr = TensorAdapter::AsGeTensorPtr(val);
  1658. GE_CHECK_NOTNULL(ge_tensor_ptr);
  1659. return GetGraphContext()->GetVariableTensor(name, *(ge_tensor_ptr.get()));
  1660. }
  1661. Status GraphManager::SummaryHandle(const GraphId &graph_id, std::vector<GeTensor> &outputs) {
  1662. std::vector<GeTensor> without_summary_outputs;
  1663. std::set<int> summary_output_index;
  1664. GELOGI("[GraphManager] SummaryHandle, outputsSize=%zu.", outputs.size());
  1665. const std::map<uint32_t, std::map<string, size_t>> &whole_summary_output_indexes =
  1666. GetCompilerStages(graph_id).optimizer.GetSummaryOutputIndexes();
  1667. if (whole_summary_output_indexes.find(graph_id) == whole_summary_output_indexes.end()) {
  1668. GELOGE(FAILED, "No Summary graph found in map.");
  1669. return FAILED;
  1670. }
  1671. const std::map<string, size_t> &summary_output_indexes = whole_summary_output_indexes.at(graph_id);
  1672. GELOGI("[GraphManager] SummaryHandle, summaryOutputIndexesSize=%zu.", summary_output_indexes.size());
  1673. std::map<string, Tensor> summary_results;
  1674. for (auto iter = summary_output_indexes.begin(); iter != summary_output_indexes.end(); ++iter) {
  1675. GELOGI("[GraphManager] SummaryHandle, summaryName=%s, outputIndex=%zu.", iter->first.c_str(), iter->second);
  1676. summary_results.emplace(iter->first, TensorAdapter::AsTensor(outputs.at(iter->second)));
  1677. summary_output_index.emplace(iter->second);
  1678. }
  1679. // remove summary data from outputs
  1680. if (!summary_output_index.empty()) {
  1681. for (size_t j = 0; j < outputs.size(); ++j) {
  1682. if (summary_output_index.count(j) == 0) {
  1683. without_summary_outputs.emplace_back(outputs.at(j));
  1684. }
  1685. }
  1686. outputs.swap(without_summary_outputs);
  1687. GELOGI("[GraphManager] SummaryHandle, after swap outputsSize=%zu.", outputs.size());
  1688. }
  1689. if (!summary_results.empty()) {
  1690. return PushSummaryData2ME(graph_id, summary_results);
  1691. }
  1692. return SUCCESS;
  1693. }
  1694. Status GraphManager::CheckpointHandle(const GraphId &graph_id, const ComputeGraphPtr &compute_graph,
  1695. const std::vector<GeTensor> &outputs) {
  1696. GELOGI("[GraphManager] CheckpointHandle, outputsSize=%zu.", outputs.size());
  1697. std::vector<InputOutputDescInfo> outputs_desc = graph_executor_.GetOutputsDesc();
  1698. GELOGI("[GraphManager] CheckpointHandle, outputsDescSize=%zu.", outputs_desc.size());
  1699. std::map<string, Tensor> save_results;
  1700. NodePtr netoutput = nullptr;
  1701. for (const auto &node : compute_graph->GetAllNodes()) {
  1702. if (node->GetType() == kNetOutput) {
  1703. netoutput = node;
  1704. break;
  1705. }
  1706. }
  1707. if (netoutput == nullptr) {
  1708. GELOGE(FAILED, "Netoutput is null.");
  1709. return FAILED;
  1710. }
  1711. for (const auto &in : netoutput->GetAllInDataAnchors()) {
  1712. std::string desc_name;
  1713. auto out_anchor = in->GetPeerOutAnchor();
  1714. if (out_anchor == nullptr) {
  1715. GELOGE(FAILED, "out_anchor is null.");
  1716. return FAILED;
  1717. }
  1718. ge::NodePtr peer_node = out_anchor->GetOwnerNode();
  1719. // find the variable node in graph
  1720. while (peer_node != nullptr && peer_node->GetType() != kVariable) {
  1721. if (peer_node->GetAllInDataAnchors().size() != 1) {
  1722. GELOGE(FAILED, "More than one prior nodes of peer_node %s in checkpoint Graph.", peer_node->GetName().c_str());
  1723. return FAILED;
  1724. }
  1725. auto peer_node_in = peer_node->GetAllInDataAnchors().at(0);
  1726. auto peer_node_out_anchor = peer_node_in->GetPeerOutAnchor();
  1727. if (peer_node_out_anchor != nullptr) {
  1728. peer_node = peer_node_out_anchor->GetOwnerNode();
  1729. if (peer_node->GetType() == kVariable) {
  1730. break;
  1731. }
  1732. }
  1733. }
  1734. if (peer_node == nullptr) {
  1735. GELOGE(FAILED, "No variable op found in one branch, checkpoint graph illegal.");
  1736. return FAILED;
  1737. }
  1738. desc_name = peer_node->GetName();
  1739. GELOGI("[GraphManager] CheckpointHandle, descName=%s.", desc_name.c_str());
  1740. if (in->GetIdx() >= static_cast<int>(outputs.size())) {
  1741. GELOGE(FAILED, "variable index out of range.");
  1742. return FAILED;
  1743. }
  1744. save_results.emplace(desc_name, TensorAdapter::AsTensor(outputs.at(in->GetIdx())));
  1745. }
  1746. if (!save_results.empty()) {
  1747. return PushSaveData2ME(graph_id, save_results);
  1748. }
  1749. return SUCCESS;
  1750. }
  1751. Status GraphManager::RegisterCallBackFunc(
  1752. const std::string &key,
  1753. const std::function<Status(uint32_t, const std::map<std::string, ge::Tensor> &)> &callback) {
  1754. std::lock_guard<std::mutex> lock(member_mutex_);
  1755. GELOGI("[GraphManager] RegisterCallBackFunc, key=%s.", key.c_str());
  1756. me_callback_map_[key] = callback;
  1757. return SUCCESS;
  1758. }
  1759. Status GraphManager::RegisterCallBackFunc(
  1760. const std::string &key,
  1761. const std::function<Status(uint32_t, const std::map<AscendString, ge::Tensor> &)> &callback) {
  1762. std::lock_guard<std::mutex> lock(member_mutex_);
  1763. GELOGI("[GraphManager] RegisterCallBackFunc, key=%s.", key.c_str());
  1764. callback_map_[key] = callback;
  1765. return SUCCESS;
  1766. }
  1767. Status GraphManager::PushSummaryData2ME(const GraphId &graph_id,
  1768. const std::map<std::string, ge::Tensor> &summary_data) {
  1769. std::lock_guard<std::mutex> lock(member_mutex_);
  1770. GELOGI("[GraphManager] PushSummaryData2ME, dataSize=%zu.", summary_data.size());
  1771. auto itr = me_callback_map_.find(kSummary);
  1772. if (itr == me_callback_map_.end()) {
  1773. auto iter = callback_map_.find(kSummary);
  1774. if (iter != callback_map_.end()) {
  1775. std::map<AscendString, ge::Tensor> tmp_summary_data;
  1776. for (auto &data : summary_data) {
  1777. AscendString tmp(data.first.c_str());
  1778. tmp_summary_data[tmp] = data.second;
  1779. }
  1780. return iter->second(graph_id, tmp_summary_data);
  1781. }
  1782. GELOGE(FAILED, "[GraphManager] PushSummaryData2ME failed, not found summary callback.");
  1783. return FAILED;
  1784. }
  1785. return itr->second(graph_id, summary_data);
  1786. }
  1787. Status GraphManager::PushSaveData2ME(const GraphId &graph_id, const std::map<std::string, ge::Tensor> &save_data) {
  1788. std::lock_guard<std::mutex> lock(member_mutex_);
  1789. GELOGI("[GraphManager] PushSaveData2ME, dataSize=%zu.", save_data.size());
  1790. auto itr = me_callback_map_.find(kSave);
  1791. if (itr == me_callback_map_.end()) {
  1792. auto iter = callback_map_.find(kSave);
  1793. if (iter != callback_map_.end()) {
  1794. std::map<AscendString, ge::Tensor> tmp_save_data;
  1795. for (auto &data : save_data) {
  1796. AscendString tmp(data.first.c_str());
  1797. tmp_save_data[tmp] = data.second;
  1798. }
  1799. return iter->second(graph_id, tmp_save_data);
  1800. }
  1801. GELOGE(FAILED, "[GraphManager] PushSaveData2ME failed, not found checkpoint callback.");
  1802. return FAILED;
  1803. }
  1804. return itr->second(graph_id, save_data);
  1805. }
  1806. bool GraphManager::CheckNetOutputForCheckpointGraph(NodePtr &node) {
  1807. size_t in_data_anchor_size = node->GetAllInDataAnchors().size();
  1808. for (size_t i = 0; i < in_data_anchor_size; ++i) {
  1809. auto in = node->GetInDataAnchor(i);
  1810. if (in == nullptr) {
  1811. return false;
  1812. }
  1813. auto peerin = in->GetPeerOutAnchor();
  1814. GE_IF_BOOL_EXEC(peerin == nullptr, return false);
  1815. if (peerin->GetOwnerNode()->GetType() != kVariable && (!TransOpUtil::IsTransOp(peerin->GetOwnerNode()))) {
  1816. return false;
  1817. }
  1818. }
  1819. return true;
  1820. }
  1821. bool GraphManager::CheckVariableForCheckpointGraph(NodePtr &node) {
  1822. if (node->GetOpDesc()->HasAttr(kCheckPointForGetVar)) {
  1823. return false;
  1824. }
  1825. auto out = node->GetOutDataAnchor(0);
  1826. if (out == nullptr) {
  1827. GELOGE(GE_GRAPH_PARAM_NULLPTR, "out is nullptr.");
  1828. return false;
  1829. }
  1830. auto peer_out = out->GetPeerInDataAnchors();
  1831. for (size_t i = 0; i < peer_out.size(); ++i) {
  1832. if (peer_out.at(i)->GetOwnerNode()->GetType() != kNetOutput &&
  1833. (!TransOpUtil::IsTransOp(peer_out.at(i)->GetOwnerNode()))) {
  1834. return false;
  1835. }
  1836. }
  1837. return true;
  1838. }
  1839. bool GraphManager::CheckTransOpForCheckpointGraph(NodePtr &node) {
  1840. for (const auto &out_node : node->GetOutAllNodes()) {
  1841. if ((!TransOpUtil::IsTransOp(out_node)) && (out_node->GetType() != kNetOutput) && (out_node->GetType() != kSend)) {
  1842. return false;
  1843. }
  1844. }
  1845. for (const auto &in_node : node->GetInAllNodes()) {
  1846. if ((!TransOpUtil::IsTransOp(in_node)) && (in_node->GetType() != kVariable) && (in_node->GetType() != kRecv)) {
  1847. return false;
  1848. }
  1849. }
  1850. return true;
  1851. }
  1852. static inline bool CheckConstanOpForCheckpointGraph(NodePtr &node) { return node->GetOutDataNodes().empty(); }
  1853. bool GraphManager::IsCheckpointGraph(ComputeGraphPtr &compute_graph) {
  1854. if (compute_graph == nullptr) {
  1855. GELOGE(GE_GRAPH_PARAM_NULLPTR, "[IsCheckpointGraph] computeGraph is nullptr.");
  1856. return false;
  1857. }
  1858. for (auto &node : compute_graph->GetAllNodes()) {
  1859. OpDescPtr op = node->GetOpDesc();
  1860. GE_RT_FALSE_CHECK_NOTNULL(op);
  1861. if (op->GetType() == kNetOutput) {
  1862. if (!CheckNetOutputForCheckpointGraph(node)) {
  1863. return false;
  1864. }
  1865. } else if (op->GetType() == kVariable) {
  1866. if (!CheckVariableForCheckpointGraph(node)) {
  1867. return false;
  1868. }
  1869. } else if ((TransOpUtil::IsTransOp(node))) {
  1870. if (!CheckTransOpForCheckpointGraph(node)) {
  1871. return false;
  1872. }
  1873. } else if (op->GetType() == CONSTANTOP) {
  1874. if (!CheckConstanOpForCheckpointGraph(node)) {
  1875. return false;
  1876. }
  1877. } else if (op->GetType() != kSend && op->GetType() != kRecv) {
  1878. GELOGI("this node is not allow in checkpoint sub graph, node_type: %s, node_name: %s.", op->GetType().c_str(),
  1879. op->GetName().c_str());
  1880. return false;
  1881. }
  1882. }
  1883. GELOGI("current graph %s is checkpoint sub graph.", compute_graph->GetName().c_str());
  1884. return true;
  1885. }
  1886. bool GraphManager::IsBroadCastOpData(const ge::NodePtr &var_node) {
  1887. for (auto &out_anchor : var_node->GetAllOutDataAnchors()) {
  1888. GE_RT_FALSE_CHECK_NOTNULL(out_anchor);
  1889. for (auto &in_anchor : out_anchor->GetPeerInDataAnchors()) {
  1890. GE_RT_FALSE_CHECK_NOTNULL(in_anchor);
  1891. ge::NodePtr dst_node = in_anchor->GetOwnerNode();
  1892. GE_RT_FALSE_CHECK_NOTNULL(dst_node);
  1893. if (dst_node->GetType() == HCOMBROADCAST || dst_node->GetType() == HVDCALLBACKBROADCAST) {
  1894. return true;
  1895. }
  1896. }
  1897. }
  1898. return false;
  1899. }
  1900. void GraphManager::SetAttrForHcomBroadCastOp(ge::ComputeGraphPtr &compute_graph) {
  1901. // add variable attr for hccl broadcast,need to be removed after variable pass online
  1902. for (const ge::NodePtr &node : compute_graph->GetDirectNode()) {
  1903. if (node->GetOpDesc()->GetType() != ge::VARIABLE) {
  1904. continue;
  1905. }
  1906. if (IsBroadCastOpData(node)) {
  1907. AdjustBroadCastOpData(node);
  1908. }
  1909. if (IsAssignOpData(node)) {
  1910. AdjustAssignOpData(node);
  1911. }
  1912. }
  1913. }
  1914. void GraphManager::AdjustBroadCastOpData(const ge::NodePtr &var_node) {
  1915. if (!ge::AttrUtils::SetStr(var_node->GetOpDesc(), VAR_ATTR_VAR_IS_BROADCAST, "var_is_restore")) {
  1916. GELOGW("set var_is_restore failed");
  1917. }
  1918. }
  1919. bool GraphManager::IsAssignOpData(const ge::NodePtr &var_node) {
  1920. GELOGD("IsAssignOpData var_node %s", var_node->GetName().c_str());
  1921. std::map<std::string, std::set<int>> assign_ops = {{ASSIGN, {0}}};
  1922. ge::NodePtr assign_node = nullptr;
  1923. if (ConfirmUseOpAndIndexByNode(var_node, assign_ops, assign_node)) {
  1924. return true;
  1925. }
  1926. return false;
  1927. }
  1928. void GraphManager::AdjustAssignOpData(const ge::NodePtr &var_node) {
  1929. if (!ge::AttrUtils::SetStr(var_node->GetOpDesc(), VAR_ATTR_VAR_IS_RESTORE, "var_is_restore")) {
  1930. GELOGW("SetStr var_is_restore failed");
  1931. }
  1932. }
  1933. bool GraphManager::ConfirmUseOpAndIndexByAnchor(const ge::InDataAnchorPtr &in_anchor,
  1934. const map<string, std::set<int>> &confirm_ops, ge::NodePtr &use_node) {
  1935. GE_RT_FALSE_CHECK_NOTNULL(in_anchor);
  1936. ge::NodePtr dst_node = in_anchor->GetOwnerNode();
  1937. GE_RT_FALSE_CHECK_NOTNULL(dst_node);
  1938. ge::OpDescPtr dst_op_desc = dst_node->GetOpDesc();
  1939. GE_RT_FALSE_CHECK_NOTNULL(dst_op_desc);
  1940. const string &dst_type = dst_op_desc->GetType();
  1941. int input_index = in_anchor->GetIdx();
  1942. GELOGD("ConfirmUseOpAndIndex, var name %s, dst_type = %s, input index %d", dst_node->GetName().c_str(),
  1943. dst_type.c_str(), input_index);
  1944. if (confirm_ops.count(dst_type) > 0) {
  1945. if (confirm_ops.at(dst_type).count(input_index) > 0) {
  1946. use_node = dst_node;
  1947. return true;
  1948. }
  1949. }
  1950. return false;
  1951. }
  1952. bool GraphManager::ConfirmUseOpAndIndexByNode(const ge::NodePtr &var_node,
  1953. const map<string, std::set<int>> &confirm_ops, ge::NodePtr &use_node) {
  1954. GE_RT_FALSE_CHECK_NOTNULL(var_node);
  1955. for (auto &out_anchor : var_node->GetAllOutDataAnchors()) {
  1956. GE_RT_FALSE_CHECK_NOTNULL(out_anchor);
  1957. for (auto &in_anchor : out_anchor->GetPeerInDataAnchors()) {
  1958. GE_RT_FALSE_CHECK_NOTNULL(in_anchor);
  1959. if (ConfirmUseOpAndIndexByAnchor(in_anchor, confirm_ops, use_node)) {
  1960. return true;
  1961. }
  1962. }
  1963. }
  1964. return false;
  1965. }
  1966. Status GraphManager::RemoveIsolatedConstInThisGraph(ge::ComputeGraphPtr &compute_graph) {
  1967. for (ge::NodePtr &n : compute_graph->GetDirectNode()) {
  1968. if (n->GetOpDesc() == nullptr) {
  1969. continue;
  1970. }
  1971. if (n->GetOpDesc()->GetType() == CONSTANT || n->GetOpDesc()->GetType() == CONSTANTOP) {
  1972. // reset const type depend on train_flag
  1973. options_.train_graph_flag ? n->GetOpDesc()->SetType(CONSTANTOP) : n->GetOpDesc()->SetType(CONSTANT);
  1974. if (n->GetOutAllNodes().empty() && n->GetInAllNodes().empty()) {
  1975. // it is an isolated constant, just remove it
  1976. if (GraphUtils::RemoveJustNode(compute_graph, n) != GRAPH_SUCCESS) {
  1977. GELOGE(FAILED, "remove constant %s failed.", n->GetName().c_str());
  1978. return FAILED;
  1979. }
  1980. }
  1981. }
  1982. }
  1983. return SUCCESS;
  1984. }
  1985. Status GraphManager::RemoveIsolatedConst(ge::ComputeGraphPtr &compute_graph) {
  1986. GE_CHK_STATUS_RET(RemoveIsolatedConstInThisGraph(compute_graph));
  1987. for (auto &sub_graph : compute_graph->GetAllSubgraphs()) {
  1988. GE_CHK_STATUS_RET(RemoveIsolatedConstInThisGraph(sub_graph));
  1989. }
  1990. return SUCCESS;
  1991. }
  1992. Status GraphManager::OptimizeStage1(ge::ComputeGraphPtr &compute_graph) {
  1993. string options = "default";
  1994. if (GetContext().GetOption("ge.exec.variable_acc", options) != SUCCESS) {
  1995. GELOGI("get ge.exec.variable_acc failed. set default value.");
  1996. }
  1997. PassManager after_merge_passes;
  1998. GE_CHK_STATUS_RET(
  1999. after_merge_passes.AddPass("OptimizeStage1_1::MergeInputMemcpyPass", new (std::nothrow) MergeInputMemcpyPass));
  2000. GE_CHK_STATUS_RET(
  2001. after_merge_passes.AddPass("OptimizeStage1_1::SwitchDataEdgesBypass", new (std::nothrow) SwitchDataEdgesBypass));
  2002. GE_CHK_STATUS_RET(
  2003. after_merge_passes.AddPass("OptimizeStage1_1::ConstantFuseSamePass", new (std::nothrow) ConstantFuseSamePass));
  2004. /*
  2005. * Do CSE before FuseDataNodesWithCommonInputPass to resolve the scene in bertlarge as following:
  2006. * const
  2007. * / | \
  2008. * cast1 cast2 cast3
  2009. * \ | /
  2010. * case
  2011. * the node `const` is the fused const node after ConstantFuseSamePass
  2012. * the nodes `cast1`, `cast2` and 'cast3' will be fused by CSE.
  2013. * in order to eliminate hard code in FuseDataNodesWithCommonInputPass,
  2014. * we do CSE before FuseDataNodesWithCommonInputPass
  2015. * But it is a temp solution, this CSE will be deleted after change pass from graph pass to node pass
  2016. */
  2017. GE_CHK_STATUS_RET(after_merge_passes.AddPass("OptimizeStage1_1::CSEBeforeFuseDataNodesWithCommonInputPass",
  2018. new (std::nothrow) CommonSubexpressionEliminationPass));
  2019. // FuseDataNodesWithCommonInputPass: fuse same data with common input in same graph
  2020. GE_CHK_STATUS_RET(after_merge_passes.AddPass("OptimizeStage1_1::FuseDataNodesWithCommonInputPass",
  2021. new (std::nothrow) FuseDataNodesWithCommonInputPass));
  2022. GE_CHK_STATUS_RET(after_merge_passes.AddPass("OptimizeStage1_1::CommonSubexpressionEliminationPass",
  2023. new (std::nothrow) CommonSubexpressionEliminationPass));
  2024. GE_CHK_STATUS_RET(after_merge_passes.AddPass("OptimizeStage1_1::PermutePass", new (std::nothrow) PermutePass))
  2025. /*
  2026. * The SameTransdataBreadthFusionPass should be called before VariableOpPass, because of the scene following:
  2027. * node3
  2028. * |
  2029. * transdata1 node2
  2030. * | |
  2031. * cast1 transdata2
  2032. * \ /
  2033. * var
  2034. * the node `transdata1` should be moved to the front of the ndoe `cast1`,
  2035. * to ensure that `transdata1` and `transdata2` can be fusion with `var`.
  2036. * But it is a temp solution, because the `SameTransdataBreadthFusionPass`
  2037. * can only move `TransData` but not `Cast` nodes.
  2038. * So if we exchange Cast and TransData, the fusion mechanism will fail.
  2039. */
  2040. GE_CHK_STATUS_RET(after_merge_passes.AddPass("OptimizeStage1_1::SameTransdataBreadthFusionPass",
  2041. new (std::nothrow) SameTransdataBreadthFusionPass))
  2042. GE_IF_BOOL_EXEC(options == "default" || options == "1", GELOGI("turn on variable accelerator");
  2043. GE_CHK_STATUS_RET(after_merge_passes.AddPass("OptimizeStage1_1::VariableOpPass",
  2044. new (std::nothrow) VariableOpPass(&var_acc_ctrl_))))
  2045. GE_CHK_STATUS_RET(after_merge_passes.AddPass("OptimizeStage1_1::TransOpWithoutReshapeFusionPass",
  2046. new (std::nothrow) TransOpWithoutReshapeFusionPass))
  2047. GE_CHK_STATUS_RET(after_merge_passes.AddPass("OptimizeStage1_1::TransOpBreadthFusionPass",
  2048. new (std::nothrow) TransOpBreadthFusionPass))
  2049. GE_TIMESTAMP_START(after_merge_passes);
  2050. auto ret = after_merge_passes.Run(compute_graph);
  2051. GE_TIMESTAMP_END(after_merge_passes, "GraphManager::OptimizeStage1_1");
  2052. if (ret != SUCCESS && ret != NOT_CHANGED) {
  2053. GELOGE(ret, "Run passes when OptimizeStage1_1 failed, ret:%u.", ret);
  2054. return ret;
  2055. }
  2056. GE_DUMP(compute_graph, "OptimizeStage1_1");
  2057. NamesToPass names_to_passes;
  2058. TransOpNearbyAllreduceFusionPass trans_op_nearby_allreduce_fusion_pass;
  2059. ReshapeRemovePass reshape_remove_pass;
  2060. ConstantFoldingPass constant_folding_pass;
  2061. DimensionAdjustPass dimension_adjust_pass;
  2062. EnterPass enter_pass;
  2063. AddNPass addn_pass;
  2064. SwitchDeadBranchElimination switch_dead_branch_elimination;
  2065. SwitchLogicRemovePass switch_logic_remove_pass;
  2066. MergePass merge_pass;
  2067. CastRemovePass cast_remove_pass;
  2068. TransposeTransDataPass transpose_transdata_pass;
  2069. TransOpSymmetryEliminationPass symmetry_elimination_pass;
  2070. DimensionComputePass dimension_compute_pass;
  2071. UselessControlOutRemovePass useless_control_out_remove_pass;
  2072. names_to_passes.emplace_back("EnterPass", &enter_pass);
  2073. names_to_passes.emplace_back("AddNPass", &addn_pass);
  2074. names_to_passes.emplace_back("SwitchDeadBranchElimination", &switch_dead_branch_elimination);
  2075. names_to_passes.emplace_back("SwitchLogicRemovePass", &switch_logic_remove_pass);
  2076. names_to_passes.emplace_back("MergePass", &merge_pass);
  2077. names_to_passes.emplace_back("CastRemovePass", &cast_remove_pass);
  2078. names_to_passes.emplace_back("TransposeTransDataPass", &transpose_transdata_pass);
  2079. names_to_passes.emplace_back("ReshapeRemovePass", &reshape_remove_pass);
  2080. names_to_passes.emplace_back("TransOpSymmetryEliminationPass", &symmetry_elimination_pass);
  2081. names_to_passes.emplace_back("TransOpNearbyAllreduceFusionPass", &trans_op_nearby_allreduce_fusion_pass);
  2082. names_to_passes.emplace_back("DimensionComputePass", &dimension_compute_pass);
  2083. names_to_passes.emplace_back("ConstantFoldingPass", &constant_folding_pass);
  2084. names_to_passes.emplace_back("DimensionAdjustPass", &dimension_adjust_pass);
  2085. names_to_passes.emplace_back("UselessControlOutRemovePass", &useless_control_out_remove_pass);
  2086. GE_TIMESTAMP_START(names_to_passes);
  2087. ret = GEPass(compute_graph).Run(names_to_passes);
  2088. GE_TIMESTAMP_END(names_to_passes, "GraphManager::OptimizeStage1_2");
  2089. if (ret != SUCCESS) {
  2090. GELOGE(ret, "Run passes when OptimizeStage1_2 failed, ret:%u.", ret);
  2091. return ret;
  2092. }
  2093. // Calculate Op/Fe constantfolding cost
  2094. uint64_t op_constant_folding_cost = 0;
  2095. for (auto &it : constant_folding_pass.GetOpConstantFoldingPerfStatistic()) {
  2096. op_constant_folding_cost += it.second.second;
  2097. GELOGI("The time cost of %s constant folding is [%lu] micro second, calls is %lu.",
  2098. it.first.c_str(), it.second.second, it.second.first);
  2099. }
  2100. GEEVENT("[GEPERFTRACE] The time cost of extern constant folding is [%lu] micro second.", op_constant_folding_cost);
  2101. for (auto &it : constant_folding_pass.GetGeConstantFoldingPerfStatistic()) {
  2102. op_constant_folding_cost += it.second.second;
  2103. GELOGI("The time cost of %s constant folding is [%lu] micro second, calls is %lu.",
  2104. it.first.c_str(), it.second.second, it.second.first);
  2105. }
  2106. GE_DUMP(compute_graph, "OptimizeStage1_2");
  2107. PassManager graph_pass;
  2108. // the prune pass should between SwitchPass and SwitchToStreamSwitchPass
  2109. GE_CHK_STATUS_RET(graph_pass.AddPass("OptimizeStage1_3::Migration", new (std::nothrow) SubgraphConstMigrationPass));
  2110. GE_CHK_STATUS_RET(graph_pass.AddPass("OptimizeStage1_3::ArgsClean", new (std::nothrow) UnusedArgsCleanPass));
  2111. GE_CHK_STATUS_RET(graph_pass.AddPass("OptimizeStage1_3::PrunePass", new (std::nothrow) PrunePass))
  2112. GE_CHK_STATUS_RET(graph_pass.AddPass("OptimizeStage1_3::NextIterationPass", new (std::nothrow) NextIterationPass))
  2113. GE_CHK_STATUS_RET(graph_pass.AddPass("OptimizeStage1_3::ControlTriggerPass", new (std::nothrow) ControlTriggerPass))
  2114. GE_CHK_STATUS_RET(
  2115. graph_pass.AddPass("OptimizeStage1_3::MergeToStreamMergePass", new (std::nothrow) MergeToStreamMergePass))
  2116. GE_CHK_STATUS_RET(
  2117. graph_pass.AddPass("OptimizeStage1_3::SwitchToStreamSwitchPass", new (std::nothrow) SwitchToStreamSwitchPass))
  2118. GE_CHK_STATUS_RET(
  2119. graph_pass.AddPass("OptimizeStage1_3::AttachStreamLabelPass", new (std::nothrow) AttachStreamLabelPass))
  2120. GE_CHK_STATUS_RET(graph_pass.AddPass("OptimizeStage1_3::MultiBatchPass", new (std::nothrow) MultiBatchPass(true)))
  2121. GE_CHK_STATUS_RET(graph_pass.AddPass("OptimizeStage1_3::IteratorOpPass", new (std::nothrow) IteratorOpPass))
  2122. GE_CHK_STATUS_RET(graph_pass.AddPass("OptimizeStage1_3::VariableRefUselessControlOutDeletePass",
  2123. new (std::nothrow) VariableRefUselessControlOutDeletePass))
  2124. GE_CHK_STATUS_RET(graph_pass.AddPass("OptimizeStage1_3::ReshapeRecoveryPass", new (std::nothrow) ReshapeRecoveryPass))
  2125. GE_CHK_STATUS_RET(
  2126. graph_pass.AddPass("OptimizeStage1_3::RemoveSameConstPass", new (std::nothrow) RemoveSameConstPass))
  2127. if (options_.train_graph_flag) {
  2128. // Priority: The GlobalStepInsertPass should work before graph partitioner.
  2129. // Reason: Make sure that the var "global_step" can be partitioned to known sub graph and allocated memory
  2130. GE_CHK_STATUS_RET(
  2131. graph_pass.AddPass("OptimizeStage1_3::GlobalStepInsertPass", new (std::nothrow) GlobalStepInsertPass))
  2132. }
  2133. GE_TIMESTAMP_START(graph_pass);
  2134. ret = graph_pass.Run(compute_graph);
  2135. GE_TIMESTAMP_END(graph_pass, "GraphManager::OptimizeStage1_3");
  2136. if (ret != SUCCESS && ret != NOT_CHANGED) {
  2137. GELOGE(ret, "Run passes when OptimizeStage1_3 failed, ret:%u.", ret);
  2138. return ret;
  2139. }
  2140. NamesToPass node_pass;
  2141. GE_TIMESTAMP_START(node_pass);
  2142. IdentityPass identity_force_pass(false); // after SwitchToStreamSwitchPass
  2143. node_pass.emplace_back("IdentityPass", &identity_force_pass);
  2144. ret = GEPass(compute_graph).Run(node_pass);
  2145. GE_TIMESTAMP_END(node_pass, "GraphPrepare::node_pass");
  2146. if (ret != SUCCESS) {
  2147. GELOGE(ret, "Run identity remove pass for preprocess failed, ret:%u.", ret);
  2148. return ret;
  2149. }
  2150. return SUCCESS;
  2151. }
  2152. Status GraphManager::OptimizeStage2(ge::ComputeGraphPtr &compute_graph) {
  2153. GELOGD("Start optimize after merge sub graph.");
  2154. PassManager after_merge_passes;
  2155. GE_CHK_STATUS_RET(after_merge_passes.AddPass("OptimizeStage2::AfterMergePasses::LinkGenMaskNodesPass",
  2156. new (std::nothrow)
  2157. LinkGenMaskNodesPass(options_.stream_max_parallel_num)));
  2158. GE_CHK_STATUS_RET(after_merge_passes.AddPass("OptimizeStage2::HcclContinuousMemcpyPass",
  2159. new (std::nothrow) HcclContinuousMemcpyPass));
  2160. GE_TIMESTAMP_START(after_merge_passes);
  2161. auto ret = after_merge_passes.Run(compute_graph);
  2162. GE_TIMESTAMP_END(after_merge_passes, "OptimizeStage2::AfterMergePasses");
  2163. if (ret != SUCCESS && ret != NOT_CHANGED) {
  2164. GELOGE(ret, "Run passes after merge sub graph failed, ret:%d.", ret);
  2165. return ret;
  2166. }
  2167. SetAttrForHcomBroadCastOp(compute_graph);
  2168. NamesToPass names_to_passes;
  2169. ConstantFoldingPass constant_folding_pass;
  2170. ReshapeRemovePass reshape_remove_pass;
  2171. CondRemovePass condition_remove_pass;
  2172. BitcastPass bitcast_pass;
  2173. AssignRemovePass assign_remove_pass;
  2174. InplaceSupportCheckPass inplace_support_check_pass;
  2175. names_to_passes.emplace_back("ConstantFoldingPass", &constant_folding_pass);
  2176. names_to_passes.emplace_back("ReshapeRemovePass", &reshape_remove_pass);
  2177. names_to_passes.emplace_back("CondRemovePass", &condition_remove_pass);
  2178. names_to_passes.emplace_back("BitcastPass", &bitcast_pass);
  2179. if (GetContext().GetHostExecFlag()) {
  2180. names_to_passes.emplace_back("AssignRemovePass", &assign_remove_pass);
  2181. names_to_passes.emplace_back("InplaceSupportCheckPass", &inplace_support_check_pass);
  2182. }
  2183. GE_TIMESTAMP_START(names_to_passes);
  2184. ret = GEPass(compute_graph).Run(names_to_passes);
  2185. GE_TIMESTAMP_END(names_to_passes, "OptimizeStage2::MergedGraphNameToPasses");
  2186. if (ret != SUCCESS) {
  2187. GELOGE(ret, "Run ge_passes optimize for OptimizeAfterMergeSubGraph failed, ret:%d.", ret);
  2188. return ret;
  2189. }
  2190. ret = RemoveIsolatedConst(compute_graph);
  2191. if (ret != SUCCESS) {
  2192. GELOGE(ret, "Remove isolated Constant failed, ret:%d.", ret);
  2193. return ret;
  2194. }
  2195. PassManager pass_for_control_attr_optimize;
  2196. if (options_.train_graph_flag) {
  2197. GE_CHK_STATUS_RET(pass_for_control_attr_optimize.AddPass("OptimizeStage2::ControlAttrOptimize::FlowCtrlPass",
  2198. new (std::nothrow) FlowCtrlPass))
  2199. }
  2200. GE_CHK_STATUS_RET(pass_for_control_attr_optimize.AddPass("OptimizeStage2::ControlAttrOptimize::MultiBatchPass",
  2201. new (std::nothrow) MultiBatchPass))
  2202. GE_CHK_STATUS_RET(pass_for_control_attr_optimize.AddPass("OptimizeStage2::AfterMergePasses::RefIdentityDeleteOpPass",
  2203. new (std::nothrow) RefIdentityDeleteOpPass))
  2204. // the value of the attr is the original variable name the ref-variable ref from.
  2205. // The attr will be used when allocating memory,
  2206. // the node marked attr will be output to a variable instead of new-allocated memory.
  2207. // Therefore, ComputeGraph should not delete nodes after `VariableRefDeleteOpPass`
  2208. // to prevent unexpected deletion of nodes marked with attr
  2209. GE_CHK_STATUS_RET(pass_for_control_attr_optimize.AddPass("OptimizeStage2::AfterMergePasses::VariableRefDeleteOpPass",
  2210. new (std::nothrow) VariableRefDeleteOpPass))
  2211. GE_CHK_STATUS_RET(pass_for_control_attr_optimize.AddPass("OptimizeStage2::ControlAttrOptimize::CompileNodesPass",
  2212. new (std::nothrow) CompileNodesPass))
  2213. GE_CHK_STATUS_RET(pass_for_control_attr_optimize.AddPass(
  2214. "OptimizeStage2::AfterMergePasses::MarkGraphUnknownStatusPass", new(std::nothrow) MarkGraphUnknownStatusPass))
  2215. GE_CHK_STATUS_RET(
  2216. pass_for_control_attr_optimize.AddPass("OptimizeStage2::AfterMergePasses::InputOutputConnectionIdentifyPass",
  2217. new (std::nothrow) InputOutputConnectionIdentifyPass))
  2218. // When the input node to be cleared is after a `Data` node, the atomic-clean-node should not be inserted.
  2219. // So The ComputeGraph should not delete nodes after `AtomicAddrCleanPass`
  2220. // to prevent unexpected deletion of nodes after a `Data` node
  2221. GE_CHK_STATUS_RET(pass_for_control_attr_optimize.AddPass("OptimizeStage2::AfterMergePasses::AtomicAddrCleanPass",
  2222. new (std::nothrow) AtomicAddrCleanPass))
  2223. GE_CHK_STATUS_RET(pass_for_control_attr_optimize.AddPass("OptimizeStage2::AfterMergePasses::"
  2224. "EndOfSequenceAddControlPass",
  2225. new (std::nothrow) EndOfSequenceAddControlPass))
  2226. // 'SubgraphPass' solves memory_assign_conflicts by insert MemcpyAsync node, which depends on multi attrs and
  2227. // graph-structure. Passes after 'SubgraphPass' MUST NOT remove MemcpyAsync/Identity nodes in subgraphs.
  2228. GE_CHK_STATUS_RET(pass_for_control_attr_optimize.AddPass("OptimizeStage2::ControlAttrOptimize::SubgraphPass",
  2229. new (std::nothrow) SubgraphPass))
  2230. // 'AttachStreamLabelPass' modifies attr without changing structure of compute_graph
  2231. // All passes after 'AttachStreamLabelPass' MUST mark stream_label on new nodes by self.
  2232. GE_CHK_STATUS_RET(pass_for_control_attr_optimize.AddPass("OptimizeStage2::ControlAttrOptimize::AttachStreamLabelPass",
  2233. new (std::nothrow) AttachStreamLabelPass))
  2234. GE_TIMESTAMP_START(pass_for_control_attr_optimize);
  2235. ret = pass_for_control_attr_optimize.Run(compute_graph);
  2236. GE_TIMESTAMP_END(pass_for_control_attr_optimize, "OptimizeStage2::ControlAttrOptimize");
  2237. if (ret != SUCCESS && ret != NOT_CHANGED) {
  2238. GELOGE(ret, "Run passes when optimize stage 2 failed");
  2239. return ret;
  2240. }
  2241. // Assign functional op labels.
  2242. GE_TIMESTAMP_START(AssignFunctionalLabels);
  2243. LabelAllocator label_allocator(compute_graph);
  2244. GE_CHK_STATUS_RET(label_allocator.AssignFunctionalLabels(), "Assign label failed.");
  2245. GE_TIMESTAMP_END(AssignFunctionalLabels, "ModelBuilder::AssignFunctionalLabels");
  2246. // Add memcpy addr asynchronous node.
  2247. GE_TIMESTAMP_START(AddMemcpyAddrAsyncNode);
  2248. MemcpyAddrAsyncPass memcpy_addr;
  2249. GE_CHK_STATUS_RET(memcpy_addr.Run(compute_graph), "Add memcpy_addr_async node failed.");
  2250. GE_TIMESTAMP_END(AddMemcpyAddrAsyncNode, "MemcpyAddrAsyncPass::Run.");
  2251. // After while sub graph handle, mark all node rw type
  2252. auto result = GetCompilerStages(compute_graph->GetGraphID()).optimizer.HandleMemoryRWConflict(compute_graph);
  2253. if (result != SUCCESS) {
  2254. GELOGW(
  2255. "Mark node rw type failed. It will take some effect on memory_assign_conflicts handling."
  2256. "Please pay attention to it.");
  2257. }
  2258. ChangeConstTypeWhenTraining(compute_graph);
  2259. GELOGI("End optimize after merge sub graph.");
  2260. return SUCCESS;
  2261. }
  2262. void GraphManager::ChangeConstTypeWhenTraining(const ComputeGraphPtr &compute_graph) {
  2263. // The constant for train is CONSTANTOP, and is CONSTANT for inference. They will be unified in future.
  2264. if (options_.train_graph_flag) {
  2265. for (NodePtr &n : compute_graph->GetAllNodes()) {
  2266. // This can ensure that n is not a null pointer
  2267. if (n->GetOpDesc()->GetType() == CONSTANT) {
  2268. n->GetOpDesc()->SetType(CONSTANTOP);
  2269. }
  2270. }
  2271. }
  2272. }
  2273. Status GraphManager::LoadGraphAsync(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node) {
  2274. GELOGI("[LoadGraphAsync] run_graph_flag[%d], graph_id[%u]", options_.run_graph_flag, graph_node->GetGraphId());
  2275. if (options_.run_graph_flag && ge_root_model != nullptr) {
  2276. ge_root_model->SetTrainFlag(GetTrainFlag());
  2277. // synchronization run graph with model
  2278. ModelIdInfo model_id_info;
  2279. bool is_unknown_shape = false;
  2280. GE_CHK_STATUS_RET(ge_root_model->CheckIsUnknownShape(is_unknown_shape));
  2281. if (!is_unknown_shape) {
  2282. if (getenv(kEnvGeuseStaticMemory) != nullptr) {
  2283. GELOGI("[LoadGraphAsync] GE_USE_STATIC_MEMORY is seted.");
  2284. } else {
  2285. auto root_graph = ge_root_model->GetRootGraph();
  2286. GE_CHECK_NOTNULL(root_graph);
  2287. auto name_to_model = ge_root_model->GetSubgraphInstanceNameToModel();
  2288. GeModelPtr ge_model = name_to_model[root_graph->GetName()];
  2289. GE_CHK_STATUS_RET(CheckAndReleaseMemory(ge_model, graph_node));
  2290. }
  2291. }
  2292. GE_TIMESTAMP_START(LoadGraph);
  2293. auto listener = MakeShared<RunAsyncListener>();
  2294. GE_CHECK_NOTNULL(listener);
  2295. Status ret = GraphLoader::LoadModelOnline(model_id_info.model_id, ge_root_model, listener);
  2296. GE_TIMESTAMP_EVENT_END(LoadGraph, "GraphManager::LoadGraphAsync");
  2297. if (ret != SUCCESS) {
  2298. GELOGE(ret, "[LoadGraphAsync] LoadGraphAsync Failed");
  2299. graph_node->SetRunFlag(false);
  2300. return ret;
  2301. }
  2302. graph_node->SetLoadFlag(true);
  2303. ge_root_model->SetModelId(model_id_info.model_id);
  2304. graph_node->SetGeRootModel(ge_root_model);
  2305. }
  2306. return SUCCESS;
  2307. }
  2308. void GraphManager::ReleaseMemory(const GeModelPtr &ge_model, GraphNodePtr &graph_node,
  2309. const std::vector<uint32_t> &model_ids, uint32_t graph_id, uint64_t session_id) {
  2310. rtError_t rt_ret = rtSetDevice(GetContext().DeviceId());
  2311. if (rt_ret != RT_ERROR_NONE) {
  2312. REPORT_CALL_ERROR("E19999", "Call rtSetDevice failed, device_id:%u, when GraphManager %s",
  2313. GetContext().DeviceId(), __FUNCTION__);
  2314. GELOGE(RT_FAILED, "[GraphManager:] rtSetDevice failed, graphId=%u.", graph_id);
  2315. return;
  2316. }
  2317. for (auto model_id : model_ids) {
  2318. uint64_t max_memory_size = 0;
  2319. Status result = GraphLoader::GetMaxUsedMemory(model_id, max_memory_size);
  2320. if (result != SUCCESS) {
  2321. continue;
  2322. }
  2323. GELOGI("CheckAndReleaseMemory try to UnloadGraph[%u], model[%u] which MaxUsedMemory[%lu].", graph_id, model_id,
  2324. max_memory_size);
  2325. if (model_ids.size() > 1) {
  2326. result = ge_model->GetSessionId(model_id, session_id);
  2327. if (result != SUCCESS) {
  2328. GELOGW("[GraphManager:] get session failed when dynamic memory, modelId=%u, graphId=%u.", model_id,
  2329. graph_id);
  2330. continue;
  2331. }
  2332. }
  2333. result = GraphLoader::DestroyAicpuKernel(session_id, model_id, 0);
  2334. if (result != SUCCESS) {
  2335. GELOGW("[GraphManager:] destroy aicpu kernel failed when dynamic memory, modelId=%u, graphId=%u.", model_id,
  2336. graph_id);
  2337. }
  2338. result = GraphLoader::UnloadModel(model_id);
  2339. if (result != SUCCESS) {
  2340. GELOGW("[GraphManager:] unload model failed, modelId=%u, graphId=%u.", model_id, graph_id);
  2341. }
  2342. GELOGI("CheckAndReleaseMemory UnloadGraph[%u], model[%u] success.", graph_id, model_id);
  2343. }
  2344. graph_node->SetLoadFlag(false);
  2345. rt_ret = rtDeviceReset(GetContext().DeviceId());
  2346. if (rt_ret != RT_ERROR_NONE) {
  2347. REPORT_CALL_ERROR("E19999", "Call rtDeviceReset failed, device_id:%u, when GraphManager %s",
  2348. GetContext().DeviceId(), __FUNCTION__);
  2349. GELOGE(RT_FAILED, "[GraphManager:] rtDeviceReset failed, graphId=%u.", graph_id);
  2350. return;
  2351. }
  2352. }
  2353. Status GraphManager::CheckAndReleaseMemory(const GeModelPtr &ge_model, const GraphNodePtr &graph_node) {
  2354. GELOGI("CheckAndReleaseMemory graph_id[%u]", graph_node->GetGraphId());
  2355. int64_t value = 0;
  2356. bool ret = ge::AttrUtils::GetInt(ge_model, ATTR_MODEL_MEMORY_SIZE, value);
  2357. int64_t memory_size = ret ? value : 0;
  2358. ret = ge::AttrUtils::GetInt(ge_model, ATTR_MODEL_WEIGHT_SIZE, value);
  2359. int64_t weight_size = ret ? value : 0;
  2360. ret = ge::AttrUtils::GetInt(ge_model, MODEL_ATTR_SESSION_ID, value);
  2361. uint64_t session_id = ret ? value : 0;
  2362. int64_t free_memory = 0;
  2363. Status result = GraphLoader::GetMemoryInfo(free_memory);
  2364. if (result != SUCCESS) {
  2365. return result;
  2366. }
  2367. GELOGI(
  2368. "CheckAndReleaseMemory Graph[%u] need memory_size[%ld], weight_size[%ld],"
  2369. " Device[%u] free_memory_size[%ld]",
  2370. graph_node->GetGraphId(), memory_size, weight_size, GetContext().DeviceId(), free_memory);
  2371. if (ge::CheckInt64AddOverflow(memory_size, weight_size) != SUCCESS) {
  2372. GELOGE(INTERNAL_ERROR, "The sum of Memory size and weight size exceeds INT64_MAX");
  2373. return INTERNAL_ERROR;
  2374. }
  2375. if (free_memory >= (memory_size + weight_size)) {
  2376. return SUCCESS;
  2377. }
  2378. std::lock_guard<std::mutex> lock(unload_model_mutex_);
  2379. std::map<GraphId, GraphNodePtr> graph_map;
  2380. {
  2381. std::lock_guard<std::mutex> lock(member_mutex_);
  2382. graph_map = graph_map_;
  2383. }
  2384. for (auto &it : graph_map) {
  2385. auto graph_id = it.second->GetGraphId();
  2386. auto model = it.second->GetGeRootModel();
  2387. if (model == nullptr) {
  2388. continue;
  2389. }
  2390. auto model_id = model->GetModelId();
  2391. auto model_ids = model->GetAllModelId();
  2392. // unload model not release
  2393. bool is_unknown_shape = false;
  2394. GE_CHK_STATUS_RET(model->CheckIsUnknownShape(is_unknown_shape));
  2395. if (is_unknown_shape) {
  2396. GELOGD("model_id[%u] graph_id[%u] is unknown model, not release memory", model_id, graph_id);
  2397. continue;
  2398. }
  2399. // not loaded,no need unload
  2400. if (!it.second->GetLoadFlag()) {
  2401. GELOGI("CheckAndReleaseMemory graph[%u] has not been loaded.", graph_id);
  2402. continue;
  2403. }
  2404. ReleaseMemory(ge_model, it.second, model_ids, graph_id, session_id);
  2405. }
  2406. return SUCCESS;
  2407. }
  2408. Status GraphManager::ProcessSubGraphWithMultiThreads(GraphManager *graph_manager, GraphId root_graph_id,
  2409. const SubGraphInfoPtr &sub_graph_info_ptr,
  2410. const std::string &root_graph_name,
  2411. uint64_t session_id,
  2412. const struct ErrorMessage::Context &error_context,
  2413. const GEThreadLocalContext &ge_context) {
  2414. if (sub_graph_info_ptr != nullptr && graph_manager != nullptr) {
  2415. ErrorManager::GetInstance().SetErrorContext(error_context);
  2416. GetContext().SetSessionId(session_id);
  2417. GetThreadLocalContext() = ge_context;
  2418. graph_manager->UpdateLocalOmgContext(root_graph_id);
  2419. ComputeGraphPtr compute_graph_tmp = sub_graph_info_ptr->GetSubGraph();
  2420. const std::string &engine_name = sub_graph_info_ptr->GetEngineName();
  2421. GELOGD("ProcessSubGraphWithMultiThreads start, graph name is %s, engine_name is %s, thread id is %lu",
  2422. compute_graph_tmp != nullptr ? compute_graph_tmp->GetName().c_str() : "", engine_name.c_str(),
  2423. pthread_self());
  2424. GE_DUMP(compute_graph_tmp, "OptimizeSubGraphBefore");
  2425. GE_CHECK_NOTNULL(compute_graph_tmp);
  2426. if (!AttrUtils::SetInt(*compute_graph_tmp, ATTR_NAME_ROOT_GRAPH_ID, root_graph_id)) {
  2427. GELOGE(FAILED, "Failed to set attr ATTR_NAME_ROOT_GRAPH_ID for subgraph, graph_id: %u.", root_graph_id);
  2428. return FAILED;
  2429. }
  2430. if (!AttrUtils::SetStr(*compute_graph_tmp, ATTR_NAME_ROOT_GRAPH_NAME, root_graph_name)) {
  2431. GELOGE(FAILED, "Failed to set attr ATTR_NAME_ROOT_GRAPH_NAME for subgraph, \
  2432. root_graph_name: %s.", root_graph_name.c_str());
  2433. return FAILED;
  2434. }
  2435. compute_graph_tmp->SetSessionID(session_id);
  2436. Status ret = graph_manager->GetCompilerStages(root_graph_id).optimizer.OptimizeSubGraph(compute_graph_tmp,
  2437. engine_name);
  2438. if (ret != SUCCESS) {
  2439. GELOGE(ret, "SubGraph optimize Failed %s", engine_name.c_str());
  2440. return ret;
  2441. } else {
  2442. GELOGD("SubGraph optimize success %s", engine_name.c_str());
  2443. }
  2444. GE_DUMP(compute_graph_tmp, "OptimizeSubGraphAfter");
  2445. sub_graph_info_ptr->SetSubGraph(compute_graph_tmp);
  2446. GELOGD("ProcessSubGraphWithMultiThreads end, graph name is %s, engine_name is %s, thread id is %lu",
  2447. compute_graph_tmp != nullptr ? compute_graph_tmp->GetName().c_str() : "", engine_name.c_str(),
  2448. pthread_self());
  2449. } else {
  2450. GELOGE(FAILED, "graph_manager or sub_graph_info_ptr is nullptr");
  2451. return FAILED;
  2452. }
  2453. return SUCCESS;
  2454. }
  2455. // run graph async on session
  2456. Status GraphManager::RunGraphAsync(const GraphId &graph_id, const std::vector<ge::InputTensorInfo> &inputs,
  2457. uint64_t session_id, RunAsyncCallback callback) {
  2458. ErrorManager::GetInstance().SetStage(ErrorMessage::kModelExecute, ErrorMessage::kModelExecute);
  2459. GELOGI("[GraphManager] Start to run graph async, graph_id=%u, inputsSize=%zu.", graph_id, inputs.size());
  2460. bool ret = prerun_args_q_.Push(PreRunArgs({graph_id, inputs, session_id,
  2461. ErrorManager::GetInstance().GetErrorContext(),
  2462. GetThreadLocalContext(), callback}));
  2463. if (!ret) {
  2464. GELOGE(FAILED, "[GraphManager] Run graph async failed, graph_id=%u.", graph_id);
  2465. return FAILED;
  2466. }
  2467. GELOGI("[GraphManager] Run graph async success, graph_id=%u.", graph_id);
  2468. return SUCCESS;
  2469. }
  2470. void GraphManager::AddModelCacheHelperToMap(const GraphId &graph_id, uint64_t session_id,
  2471. ComputeGraphPtr &compute_graph) {
  2472. std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
  2473. if (instance_ptr != nullptr && instance_ptr->IsIncreBuild()) {
  2474. std::lock_guard<std::mutex> lock(member_mutex_);
  2475. auto iter = cache_helper_map_.find(graph_id);
  2476. if (iter == cache_helper_map_.end()) {
  2477. ModelCacheHelperPtr cache_helper = MakeShared<ge::ModelCacheHelper>(session_id, graph_id, compute_graph);
  2478. if (cache_helper != nullptr) {
  2479. cache_helper_map_.emplace(std::make_pair(graph_id, cache_helper));
  2480. } else {
  2481. GELOGW("Cache helper make shared failed, graph_id = %u.", graph_id);
  2482. }
  2483. }
  2484. }
  2485. }
  2486. ModelCacheHelperPtr GraphManager::FindModelCacheHelper(GraphId graph_id) {
  2487. std::lock_guard<std::mutex> lock(member_mutex_);
  2488. auto iter = cache_helper_map_.find(graph_id);
  2489. if (iter != cache_helper_map_.end()) {
  2490. return iter->second;
  2491. }
  2492. return nullptr;
  2493. }
  2494. Status GraphManager::IncreBuild(const GraphNodePtr &graph_node, GeModelPtr &ge_model) {
  2495. std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
  2496. if (instance_ptr == nullptr || !instance_ptr->IsIncreBuild()) {
  2497. return FAILED;
  2498. }
  2499. const uint32_t graph_id = graph_node->GetGraphId();
  2500. ModelCacheHelperPtr cache_helper = FindModelCacheHelper(graph_id);
  2501. if (cache_helper == nullptr) {
  2502. GELOGW("Can not find ModelCacheHelper of graph[%u]", graph_id);
  2503. return FAILED;
  2504. }
  2505. if (cache_helper->IsModelCacheHit()) {
  2506. GEEVENT("Model cache hit.");
  2507. Status ret = LoadFromCache(graph_node, cache_helper, ge_model);
  2508. if (ret == SUCCESS) {
  2509. return SUCCESS;
  2510. } else {
  2511. GELOGW("Error occurred when load from cache, abandon.");
  2512. }
  2513. } else {
  2514. GEEVENT("Model cache miss.");
  2515. }
  2516. if (SaveCacheBeforeBuild(graph_node->GetGraphId(), cache_helper) != SUCCESS) {
  2517. GELOGW("Error occurred when save cache.");
  2518. }
  2519. return FAILED;
  2520. }
  2521. void GraphManager::ConstructGeInput(const vector<InputTensorInfo> &inputs, vector<GeTensor> &ge_inputs) {
  2522. for (auto const &input : inputs) {
  2523. GeTensorDesc input_tensor_desc(GeShape(input.dims));
  2524. input_tensor_desc.SetDataType(static_cast<ge::DataType>(input.data_type));
  2525. ge_inputs.emplace_back(input_tensor_desc);
  2526. }
  2527. }
  2528. Status GraphManager::CheckIncreBuildAndPreRun(GraphManager *graph_manager, const PreRunArgs &args,
  2529. GraphNodePtr &graph_node, GeRootModelPtr &ge_root_model) {
  2530. if (!graph_manager->IsGraphNeedBuild(graph_node)) {
  2531. ge_root_model = graph_node->GetGeRootModel();
  2532. return SUCCESS;
  2533. }
  2534. if (graph_node->GetBuildFlag()) {
  2535. ReturnError(graph_manager, args.callback, PARAM_INVALID,
  2536. "The graph " + std::to_string(graph_node->GetGraphId()) +
  2537. " need to re-build, you should remove it"
  2538. " from GE first, then AddGraph again and rebuild it.");
  2539. graph_node->Unlock();
  2540. return PARAM_INVALID;
  2541. }
  2542. // check need incre build.
  2543. GeModelPtr ge_model = nullptr;
  2544. if (graph_manager->IncreBuild(graph_node, ge_model) != SUCCESS) {
  2545. std::vector<GeTensor> ge_inputs;
  2546. ConstructGeInput(args.input_tensor, ge_inputs);
  2547. Status ret = graph_manager->PreRun(graph_node, ge_inputs, ge_root_model, args.session_id);
  2548. // release rts generate context
  2549. RtContextUtil::GetInstance().DestroyRtContexts(args.session_id, graph_node->GetGraphId());
  2550. if (ret != SUCCESS) {
  2551. ReturnError(graph_manager, args.callback, ret, "PreRun Failed.");
  2552. return ret;
  2553. }
  2554. }
  2555. graph_node->SetBuildFlag(true);
  2556. graph_manager->var_acc_ctrl_.SetGraphBuildEnd(graph_node->GetGraphId());
  2557. return SUCCESS;
  2558. }
  2559. void GraphManager::PreRunThread(GraphManager *graph_manager) {
  2560. if (prctl(PR_SET_NAME, ("GE_PreRun")) != 0) {
  2561. GELOGW("Set thread name failed.");
  2562. }
  2563. PreRunArgs args;
  2564. while (graph_manager->thread_run_flag_) {
  2565. bool pop_status = graph_manager->prerun_args_q_.Pop(args);
  2566. if (!pop_status) {
  2567. continue;
  2568. }
  2569. GELOGI("[PreRunThread] A new loop start, graph_id:%u.", args.graph_id);
  2570. ErrorManager::GetInstance().SetErrorContext(args.error_context);
  2571. ErrorManager::GetInstance().SetStage(ErrorMessage::kModelCompile, ErrorMessage::kOther);
  2572. GetContext().SetSessionId(args.session_id);
  2573. GetThreadLocalContext() = args.context;
  2574. graph_manager->UpdateLocalOmgContext(args.graph_id);
  2575. // find graph
  2576. GraphNodePtr graph_node = nullptr;
  2577. Status ret = graph_manager->GetGraphNode(args.graph_id, graph_node);
  2578. if (ret != SUCCESS) {
  2579. ReturnError(graph_manager, args.callback, GE_GRAPH_ALREADY_RUNNING,
  2580. "[RunGraph] graph not exist, graph_id=" + std::to_string(args.graph_id));
  2581. return;
  2582. }
  2583. // more than one graph owns same graph_id
  2584. uint32_t count = 0;
  2585. if (graph_manager->GetGraphCount(args.graph_id, count) != SUCCESS) {
  2586. GELOGE(INTERNAL_ERROR, "Get graph [id:%u] count failed.", args.graph_id);
  2587. return;
  2588. }
  2589. // Avoid repeatively prerun for graphs owns same graph_id in online inference concurrency
  2590. if (count > 1 && graph_node->GetBuildFlag()) {
  2591. graph_node->Lock();
  2592. GELOGD("Avoid repeatively prerun, graph_id:%u.", args.graph_id);
  2593. // In online inference concurrency senario, graph_node is allowed to be locked for 'count' times
  2594. graph_node->SetSemSize(count);
  2595. graph_manager->run_args_q_.Push(RunArgs( { graph_node, args.graph_id, args.session_id, args.error_context,
  2596. args.input_tensor, graph_node->GetGeRootModel(), GetThreadLocalContext(), args.callback }));
  2597. GELOGI("[PreRunThread] Loop end. Start to run with cached build model.");
  2598. continue;
  2599. }
  2600. // Cannot be put ahead of the repeatively prerun judgement
  2601. graph_node->Lock();
  2602. if (graph_node->GetRunFlag()) {
  2603. ReturnError(graph_manager, args.callback, GE_GRAPH_GRAPH_NODE_NULL,
  2604. "[RunGraph] graph already running, graph id=" + std::to_string(args.graph_id));
  2605. graph_node->Unlock();
  2606. return;
  2607. }
  2608. // set graph's run flag
  2609. graph_node->SetRunFlag(true);
  2610. ComputeGraphPtr compute_graph_tmp = GraphUtils::GetComputeGraph(*(graph_node->GetGraph()));
  2611. if (compute_graph_tmp == nullptr) {
  2612. ReturnError(graph_manager, args.callback, GE_GRAPH_GRAPH_NODE_NULL,
  2613. "[RunGraph] compute_graph_tmp is NULL, graph id = %u.");
  2614. graph_node->Unlock();
  2615. return;
  2616. }
  2617. // when set incre build, save cache helper.
  2618. graph_manager->AddModelCacheHelperToMap(args.graph_id, args.session_id, compute_graph_tmp);
  2619. std::vector<GeModelPtr> ge_models;
  2620. if (graph_manager->options_.local_fmk_op_flag) {
  2621. graph_manager->GetCompilerStages(graph_node->GetGraphId()).optimizer.TranFrameOp(compute_graph_tmp);
  2622. }
  2623. // it will not execute graph preprocess, optimize, parition, build if the graph has built successful.
  2624. GELOGI("Start for run graph async.");
  2625. GeRootModelPtr ge_root_model = nullptr;
  2626. ret = CheckIncreBuildAndPreRun(graph_manager, args, graph_node, ge_root_model);
  2627. if (ret != SUCCESS) {
  2628. graph_node->SetRunFlag(false);
  2629. if (!ge::Analyzer::GetInstance()->IsEnableNetAnalyzeDebug()) {
  2630. ReturnError(graph_manager, args.callback, ret, "CheckIncreBuildAndPreRun Failed, thread exit..");
  2631. graph_node->Unlock();
  2632. return;
  2633. } else {
  2634. ReturnError(graph_manager, graph_node, args.callback, ret,
  2635. "CheckIncreBuildAndPreRun Failed, keep geop continue!");
  2636. graph_node->Unlock();
  2637. continue;
  2638. }
  2639. }
  2640. graph_manager->run_args_q_.Push(RunArgs( { graph_node, args.graph_id, args.session_id, args.error_context,
  2641. args.input_tensor, ge_root_model, GetThreadLocalContext(), args.callback }));
  2642. GELOGI("[PreRunThread] Loop end.");
  2643. }
  2644. }
  2645. void GraphManager::ParseInputsDimsForData(const std::vector<InputTensorInfo> &input_tensor) {
  2646. GELOGD("Start parse input dims from data.");
  2647. for (size_t i = 0; i < input_tensor.size(); ++i) {
  2648. std::vector<int64_t> dynamic_dim;
  2649. for (size_t j = 0; j < input_tensor[i].dims.size(); ++j) {
  2650. dynamic_dim.emplace_back(input_tensor[i].dims[j]);
  2651. }
  2652. GELOGD("Input tensor dims is %s.", formats::JoinToString(dynamic_dim).c_str());
  2653. GetLocalOmgContext().user_real_input_dims.emplace_back(input_tensor[i].dims);
  2654. }
  2655. }
  2656. Status GraphManager::ParseInputsDimsForGetNexNosinkAndData(const vector<NodePtr> &dynamic_nodes,
  2657. const std::vector<InputTensorInfo> &input_tensor) {
  2658. GELOGD("Start parse inputs dims when coexist data and getnext sink.");
  2659. for (size_t i = 0; i < dynamic_nodes.size(); ++i) {
  2660. auto op_desc = dynamic_nodes.at(i)->GetOpDesc();
  2661. if (op_desc == nullptr) {
  2662. continue;
  2663. }
  2664. GeAttrValue::INT index = 0;
  2665. if (!(AttrUtils::GetInt(op_desc, ATTR_NAME_INDEX, index))) {
  2666. GELOGE(PARAM_INVALID, "Get index from attr failed");
  2667. return PARAM_INVALID;
  2668. }
  2669. if (static_cast<size_t>(index) > input_tensor.size()) {
  2670. GELOGE(PARAM_INVALID, "The count of input tensor should be equal to the count of data.");
  2671. return PARAM_INVALID;
  2672. }
  2673. GetLocalOmgContext().user_real_input_dims.emplace_back(input_tensor.at(index).dims);
  2674. GELOGI("Shape dims of %zu data is %s.", index, formats::JoinToString(input_tensor.at(index).dims).c_str());
  2675. }
  2676. return SUCCESS;
  2677. }
  2678. Status GraphManager::ParseInputsDims(const std::vector<InputTensorInfo> &input_tensor) {
  2679. GELOGI("Start parse input dims of %zu input tensor.", input_tensor.size());
  2680. GetLocalOmgContext().user_real_input_dims.clear();
  2681. if (!GetLocalOmgContext().dynamic_node_type.empty()) {
  2682. vector<NodePtr> data_nodes;
  2683. vector<NodePtr> getnext_nosink_nodes;
  2684. data_nodes = GetLocalOmgContext().data_nodes;
  2685. getnext_nosink_nodes = GetLocalOmgContext().getnext_nosink_nodes;
  2686. GELOGD("Data nodes count is %zu, getnext nosink nodes count is %zu.", data_nodes.size(),
  2687. getnext_nosink_nodes.size());
  2688. if (GetLocalOmgContext().dynamic_node_type == DATA) {
  2689. if (getnext_nosink_nodes.empty()) {
  2690. // just data or data+getnext_sink
  2691. ParseInputsDimsForData(input_tensor);
  2692. } else {
  2693. // data+getnext_nosink, but only need to get shape_dims of data
  2694. if (ParseInputsDimsForGetNexNosinkAndData(data_nodes, input_tensor) != SUCCESS) {
  2695. GELOGE(PARAM_INVALID, "Failed to parse dims from data, when data coexist with getnext nosink.");
  2696. return PARAM_INVALID;
  2697. }
  2698. }
  2699. } else {
  2700. if (getnext_nosink_nodes.empty()) {
  2701. // just getnext_sink or getnext_sink+data, need to get shape_dims from aicpu op
  2702. GELOGI("Need to get dims from aicpu op: GETDYNAMICDIMS.");
  2703. return SUCCESS;
  2704. } else {
  2705. if (data_nodes.empty()) {
  2706. // just getnext_nosink
  2707. ParseInputsDimsForData(input_tensor);
  2708. } else {
  2709. // getnext_nosink + data, but only need to get shape_dims of getnext_nosink
  2710. if (ParseInputsDimsForGetNexNosinkAndData(getnext_nosink_nodes, input_tensor) != SUCCESS) {
  2711. GELOGE(PARAM_INVALID, "Failed to parse dims from getnext nosink, when data coexist with getnext nosink");
  2712. return PARAM_INVALID;
  2713. }
  2714. }
  2715. }
  2716. }
  2717. }
  2718. GELOGI("Parse %zu inputs dims success.", GetLocalOmgContext().user_real_input_dims.size());
  2719. return SUCCESS;
  2720. }
  2721. void GraphManager::RunThread(GraphManager *graph_manager) {
  2722. ErrorManager::GetInstance().SetStage(ErrorMessage::kModelExecute, ErrorMessage::kModelExecute);
  2723. if (prctl(PR_SET_NAME, ("GE_Run")) != 0) {
  2724. GELOGW("Set thread name failed.");
  2725. }
  2726. RunArgs args;
  2727. while (graph_manager->thread_run_flag_) {
  2728. bool pop_status = graph_manager->run_args_q_.Pop(args);
  2729. if (!pop_status) {
  2730. continue;
  2731. }
  2732. GELOGI("[RunThread] A new loop start, graph_id:%u.", args.graph_id);
  2733. ErrorManager::GetInstance().SetErrorContext(args.error_context);
  2734. GetContext().SetSessionId(args.session_id);
  2735. GetThreadLocalContext() = args.context;
  2736. graph_manager->UpdateLocalOmgContext(args.graph_id);
  2737. Status ret;
  2738. // parse inputs.dims to vector<vector<uint64_t>> dynamic_dims
  2739. ret = graph_manager->ParseInputsDims(args.input_tensor);
  2740. if (ret != SUCCESS) {
  2741. ReturnError(graph_manager, args.callback, ret, "ParseInputsDims failed, thread exit.");
  2742. args.graph_node->Unlock();
  2743. return;
  2744. }
  2745. args.graph_node->UpdateLoadFlag();
  2746. if (!args.graph_node->GetLoadFlag()) {
  2747. ErrorManager::GetInstance().SetStage(ErrorMessage::kModelLoad, ErrorMessage::kModelLoad);
  2748. args.ge_root_model->SetTrainFlag(graph_manager->GetTrainFlag());
  2749. ret = graph_manager->LoadGraphAsync(args.ge_root_model, args.graph_node);
  2750. if (ret != SUCCESS || args.ge_root_model == nullptr) {
  2751. StopQueue(graph_manager);
  2752. ReturnError(graph_manager, args.callback, ret, "LoadGraphAsync failed, thread exit.");
  2753. args.graph_node->Unlock();
  2754. return;
  2755. }
  2756. // control the times of graph loading in multi-thread scenario
  2757. args.graph_node->DecreaseLoadCount();
  2758. args.graph_node->IncreaseLoadRecord();
  2759. args.graph_node->SetLoadFlag(true);
  2760. GELOGI("LoadGraph[%u], model[%u] success and set LoadFlag to true.", args.graph_node->GetGraphId(),
  2761. args.ge_root_model->GetModelId());
  2762. }
  2763. ErrorManager::GetInstance().SetStage(ErrorMessage::kModelExecute, ErrorMessage::kModelExecute);
  2764. if (graph_manager->GetTrainFlag()) {
  2765. ret = graph_manager->graph_executor_.SetGraphContext(graph_manager->GetGraphContext());
  2766. if (ret != SUCCESS) {
  2767. GELOGW("[GraphManager] SetGraphContext failed, graph_id=%u.", args.graph_id);
  2768. }
  2769. graph_manager->graph_executor_.SetTrainFlag(graph_manager->options_.train_graph_flag);
  2770. }
  2771. ret = graph_manager->graph_executor_.ExecuteGraphAsync(args.graph_id, args.graph_node->GetGeRootModel(),
  2772. args.input_tensor, args.callback);
  2773. args.graph_node->SetRunFlag(false);
  2774. if (ret != SUCCESS) {
  2775. ReturnError(graph_manager, args.callback, ret, "ExecuteGraphAsync failed, thread exit.");
  2776. args.graph_node->Unlock();
  2777. return;
  2778. }
  2779. args.graph_node->Unlock();
  2780. GELOGI("[GraphManager] Run graph async success, graph_id=%u.", args.graph_id);
  2781. }
  2782. }
  2783. void GraphManager::StopQueue(GraphManager *graph_manager) {
  2784. if (graph_manager == nullptr) {
  2785. return;
  2786. }
  2787. graph_manager->thread_run_flag_.store(false);
  2788. graph_manager->prerun_args_q_.Stop();
  2789. graph_manager->run_args_q_.Stop();
  2790. }
  2791. void GraphManager::ReturnError(GraphManager *graph_manager, RunAsyncCallback callback, Status ret, const string &log) {
  2792. if (graph_manager == nullptr) {
  2793. return;
  2794. }
  2795. StopQueue(graph_manager);
  2796. GELOGE(ret, "%s.", log.c_str());
  2797. std::vector<ge::OutputTensorInfo> outputs;
  2798. callback(ret, outputs);
  2799. }
  2800. void GraphManager::ReturnError(GraphManager *graph_manager, GraphNodePtr &graph_node,
  2801. RunAsyncCallback callback, Status ret, const string &log) {
  2802. std::vector<ge::OutputTensorInfo> outputs;
  2803. auto compute_graph = GraphUtils::GetComputeGraph(*graph_node->GetGraph());
  2804. if (graph_manager == nullptr || compute_graph == nullptr) {
  2805. GELOGE(GRAPH_FAILED, "[Analyze Mode] compute graph is null!");
  2806. callback(GRAPH_FAILED, outputs);
  2807. return;
  2808. }
  2809. for (const auto &node : compute_graph->GetAllNodes()) {
  2810. if (node->GetType() != "NetOutput") {
  2811. continue;
  2812. }
  2813. for (size_t i = 0; i < node->GetAllInDataAnchorsSize(); i++) {
  2814. auto input_desc = node->GetOpDesc()->MutableInputDesc(i);
  2815. ge::OutputTensorInfo tensor;
  2816. tensor.dims = input_desc->GetShape().GetDims();
  2817. tensor.data_type = static_cast<uint32_t>(input_desc->GetDataType());
  2818. int64_t len = 1;
  2819. if (input_desc->GetShape().GetDims() != std::vector<int64_t>({})) {
  2820. len = input_desc->GetShape().GetShapeSize();
  2821. }
  2822. if (len < 0) {
  2823. GELOGE(GRAPH_FAILED, "Analyze Mode does not support GEOP output unknown shape!");
  2824. callback(GRAPH_FAILED, outputs);
  2825. return;
  2826. } else if (len == 0) {
  2827. GELOGI("getted shape size is 0.Do process as empty tensor!");
  2828. len = 1;
  2829. }
  2830. auto size = GetSizeByDataType(input_desc->GetDataType());
  2831. if (size <= 0) {
  2832. GELOGE(PARAM_INVALID, "Failed to get cube size, the data type %s is invalid",
  2833. ge::TypeUtils::DataTypeToSerialString(input_desc->GetDataType()).c_str());
  2834. callback(GRAPH_FAILED, outputs);
  2835. return;
  2836. }
  2837. if (CheckInt64MulOverflow(len, static_cast<int64_t>(size)) != true) {
  2838. GELOGE(MEMALLOC_FAILED, "int64 multiply happens overflow! a:%ld b:%d", len, size);
  2839. callback(GRAPH_FAILED, outputs);
  2840. return;
  2841. }
  2842. tensor.length = len * size;
  2843. tensor.data.reset(new(std::nothrow) uint8_t[tensor.length]);
  2844. // To avoid global step too small and can not stop, totally set a bigger value
  2845. for (int64_t i = 0; i < tensor.length; i++) {
  2846. tensor.data[i] = 0x7F; // here stands for a positive max value
  2847. }
  2848. outputs.emplace_back(std::move(tensor));
  2849. }
  2850. }
  2851. callback(SUCCESS, outputs);
  2852. return;
  2853. }
  2854. bool GraphManager::IsGraphNeedRebuild(uint32_t graph_id) {
  2855. // find graph
  2856. GraphNodePtr graph_node = nullptr;
  2857. Status ret = GetGraphNode(graph_id, graph_node);
  2858. if (ret != SUCCESS) {
  2859. GELOGE(ret, "[RunGraph] graph not exist, graph_id=%u.", graph_id);
  2860. return true;
  2861. }
  2862. if (graph_node == nullptr) {
  2863. GELOGE(GE_GRAPH_GRAPH_NODE_NULL, "[RunGraph] graph node is NULL, graphId=%u.", graph_id);
  2864. return true;
  2865. }
  2866. return IsGraphNeedBuild(graph_node);
  2867. }
  2868. bool GraphManager::IsGraphNeedBuild(const GraphNodePtr &graph_node) {
  2869. return !graph_node->GetBuildFlag() || var_acc_ctrl_.IsGraphNeedRebuild(graph_node->GetGraphId());
  2870. }
  2871. const map<std::string, std::string> *GraphManager::GetGraphOptions(uint32_t graph_id) {
  2872. GraphNodePtr graph_node = nullptr;
  2873. Status ret = GetGraphNode(graph_id, graph_node);
  2874. if (ret != SUCCESS) {
  2875. GELOGE(ret, "[RunGraph] graph not exist, graph_id=%u.", graph_id);
  2876. return nullptr;
  2877. }
  2878. if (!graph_node) {
  2879. GELOGE(GE_GRAPH_GRAPH_NODE_NULL, "[RunGraph] graph node is NULL, graph_id=%u.", graph_id);
  2880. return nullptr;
  2881. }
  2882. return &(graph_node->GetOptions());
  2883. }
  2884. void GraphManager::SetOptionsRunGraphFlag(bool run_graph_flag) { options_.run_graph_flag = run_graph_flag; }
  2885. Status GraphManager::OptimizeSubgraph(const GraphNodePtr &graph_node, ComputeGraphPtr &compute_graph,
  2886. uint64_t session_id) {
  2887. // graph partition
  2888. // Stage partition, only for root graph
  2889. GE_TIMESTAMP_START(StagePartition);
  2890. StagePartitioner stage_partitioner(compute_graph);
  2891. auto ret = stage_partitioner.Partition();
  2892. if (ret != SUCCESS) {
  2893. GELOGE(ret, "Graph partition by stage Failed");
  2894. return ret;
  2895. }
  2896. GE_TIMESTAMP_EVENT_END(StagePartition, "OptimizeSubgraph::StagePartition");
  2897. // all sub graph list of root graph and sub graph
  2898. GE_TIMESTAMP_START(GraphPartitionDynamicShape);
  2899. DynamicShapePartitioner dynamic_shape_partitioner(compute_graph);
  2900. ret = dynamic_shape_partitioner.Partition();
  2901. if (ret != SUCCESS) {
  2902. GELOGE(ret, "Graph partition by dynamic shape Failed");
  2903. return ret;
  2904. }
  2905. bool dynamic_shape_partitioned = false;
  2906. if (!AttrUtils::GetBool(*compute_graph, ATTR_NAME_DYNAMIC_SHAPE_PARTITIONED, dynamic_shape_partitioned)) {
  2907. GELOGE(FAILED, "failed get dynamic shape partitioned flag on partitioned graph.");
  2908. return FAILED;
  2909. }
  2910. GE_TIMESTAMP_EVENT_END(GraphPartitionDynamicShape, "OptimizeSubgraph::GraphPartitionDynamicShape");
  2911. GE_DUMP(compute_graph, "AfterDynamicShapePartition");
  2912. GE_TIMESTAMP_START(GraphPartition);
  2913. GraphPartitioner &partitioner = GetCompilerStages(graph_node->GetGraphId()).partitioner;
  2914. ret = partitioner.Partition(compute_graph, GraphPartitioner::kPartitioning);
  2915. if (ret != SUCCESS) {
  2916. GELOGE(ret, "Graph partition Failed");
  2917. return ret;
  2918. }
  2919. GE_TIMESTAMP_EVENT_END(GraphPartition, "OptimizeSubgraph::Partition1");
  2920. GE_TIMESTAMP_START(SetSubgraph);
  2921. ret = SetSubgraph(session_id, compute_graph, partitioner);
  2922. if (ret != SUCCESS) {
  2923. GELOGE(ret, "Graph set subgraph Failed");
  2924. return ret;
  2925. }
  2926. GE_TIMESTAMP_EVENT_END(SetSubgraph, "OptimizeSubgraph::SetSubGraph");
  2927. if ((options_.build_mode == BUILD_MODE_TUNING) &&
  2928. (options_.build_step == BUILD_STEP_BEFORE_UB_MATCH || options_.build_step == BUILD_STEP_AFTER_BUILDER ||
  2929. options_.build_step == BUILD_STEP_AFTER_BUILDER_SUB)) {
  2930. GE_TIMESTAMP_START(ConvertGraphToFile);
  2931. std::string tuning_path;
  2932. (void) GetContext().GetOption(TUNING_PATH, tuning_path);
  2933. Status ret = ConvertGraphToFile(compute_graph, partitioner, tuning_path,
  2934. (options_.build_step == BUILD_STEP_AFTER_BUILDER));
  2935. if (ret != SUCCESS) {
  2936. GELOGE(ret, "Convert graph[%s] to file failed", compute_graph->GetName().c_str());
  2937. return ret;
  2938. }
  2939. GE_TIMESTAMP_EVENT_END(ConvertGraphToFile, "OptimizeSubgraph::ConvertGraphToFile");
  2940. return SUCCESS;
  2941. }
  2942. ComputeGraphPtr merged_compute_graph = nullptr;
  2943. std::vector<ComputeGraphPtr> merged_sub_graph_list;
  2944. GE_TIMESTAMP_START(MergeSubgraph);
  2945. ret = MergeSubGraph(merged_compute_graph, compute_graph, graph_node->GetGraphId());
  2946. if (ret != SUCCESS) {
  2947. GELOGE(ret, "Merge SubGraph Failed");
  2948. return ret;
  2949. }
  2950. GE_CHECK_NOTNULL(merged_compute_graph);
  2951. merged_compute_graph->SetSessionID(session_id);
  2952. merged_compute_graph->SetGraphID(graph_node->GetGraphId());
  2953. merged_compute_graph->SetNeedIteration(compute_graph->GetNeedIteration());
  2954. for (auto &sub_graph : merged_compute_graph->GetAllSubgraphs()) {
  2955. sub_graph->SetSessionID(session_id);
  2956. sub_graph->SetGraphID(graph_node->GetGraphId());
  2957. }
  2958. bool off_superkernel = false;
  2959. if (AttrUtils::GetBool(compute_graph, ATTR_NAME_OFF_SUPERKERNEL_ATTR, off_superkernel)) {
  2960. GELOGI("Compute graph %s get superkernel flag %d.", compute_graph->GetName().c_str(), off_superkernel);
  2961. if (!AttrUtils::SetBool(merged_compute_graph, ATTR_NAME_OFF_SUPERKERNEL_ATTR, off_superkernel)) {
  2962. GELOGE(FAILED, "Compute graph %s set superkernel flag %d failed", merged_compute_graph->GetName().c_str(),
  2963. off_superkernel);
  2964. return FAILED;
  2965. }
  2966. }
  2967. GE_TIMESTAMP_EVENT_END(MergeSubgraph, "OptimizeSubgraph::MergeSubGraph");
  2968. GE_DUMP(merged_compute_graph, "mergedComputeGraph");
  2969. compute_graph = merged_compute_graph;
  2970. if (!AttrUtils::SetBool(*compute_graph, ATTR_NAME_DYNAMIC_SHAPE_PARTITIONED, dynamic_shape_partitioned)) {
  2971. GELOGE(FAILED, "failed set dynamic shape partitioned flag on partitioned graph.");
  2972. return FAILED;
  2973. }
  2974. return SUCCESS;
  2975. }
  2976. Status GraphManager::ConvertGraphToFile(ComputeGraphPtr &compute_graph, GraphPartitioner &partitioner, std::string path,
  2977. bool exe_flag) {
  2978. GE_CHECK_NOTNULL(compute_graph);
  2979. GELOGI("compute_graph [%s] path [%s] Enter ConvertGraphToFile.", compute_graph->GetName().c_str(), path.c_str());
  2980. std::vector<ComputeGraphPtr> non_tuning_subgraphs;
  2981. auto input_node_sub_graph_map = partitioner.graph_2_input_subgraph_;
  2982. const auto &input_subgraph_info = input_node_sub_graph_map[compute_graph];
  2983. GE_CHECK_NOTNULL(input_subgraph_info);
  2984. ComputeGraphPtr input_graph_tmp = input_subgraph_info->GetSubGraph();
  2985. non_tuning_subgraphs.push_back(input_graph_tmp);
  2986. auto sub_graph_map = partitioner.GetSubGraphMap();
  2987. const auto &subgraph_infos = sub_graph_map[compute_graph];
  2988. std::vector<ComputeGraphPtr> tuning_subgraphs;
  2989. for (const auto &sub_graph_info_ptr: subgraph_infos) {
  2990. GE_CHECK_NOTNULL(sub_graph_info_ptr);
  2991. ComputeGraphPtr sub_graph_tmp = sub_graph_info_ptr->GetSubGraph();
  2992. // need to tuning
  2993. if (sub_graph_info_ptr->GetEngineName() == kVectorEngine || sub_graph_info_ptr->GetEngineName() == kAIcoreEngine) {
  2994. tuning_subgraphs.push_back(sub_graph_tmp);
  2995. } else {
  2996. non_tuning_subgraphs.push_back(sub_graph_tmp);
  2997. }
  2998. }
  2999. // for function graphs to tune
  3000. for (auto &function_graph : compute_graph->GetAllSubgraphs()) {
  3001. auto subgraph_list = sub_graph_map[function_graph];
  3002. for (const auto &sub_graph_info_ptr : subgraph_list) {
  3003. GE_CHECK_NOTNULL(sub_graph_info_ptr);
  3004. ComputeGraphPtr sub_graph_tmp = sub_graph_info_ptr->GetSubGraph();
  3005. // need to tuning
  3006. if (sub_graph_info_ptr->GetEngineName() == kVectorEngine ||
  3007. sub_graph_info_ptr->GetEngineName() == kAIcoreEngine) {
  3008. tuning_subgraphs.push_back(sub_graph_tmp);
  3009. } else {
  3010. non_tuning_subgraphs.push_back(sub_graph_tmp);
  3011. }
  3012. }
  3013. }
  3014. return TuningUtils::ConvertGraphToFile(tuning_subgraphs, non_tuning_subgraphs, exe_flag, path);
  3015. }
  3016. Status GraphManager::Build(const GraphNodePtr &graph_node, ComputeGraphPtr &compute_graph,
  3017. GeRootModelPtr &ge_root_model, uint64_t session_id) {
  3018. ErrorManager::GetInstance().SetStage(ErrorMessage::kModelCompile, ErrorMessage::kOther);
  3019. // build
  3020. if (compute_graph != nullptr) {
  3021. std::string graph_name = compute_graph->GetName();
  3022. graph_name.append("_");
  3023. graph_name.append(std::to_string(graph_node->GetGraphId()));
  3024. compute_graph->SetName(graph_name);
  3025. }
  3026. auto ret = GetCompilerStages(graph_node->GetGraphId()).builder.Build(compute_graph, ge_root_model, session_id);
  3027. if (ret != SUCCESS) {
  3028. GELOGE(ret, "SubGraph build Failed.");
  3029. return ret;
  3030. }
  3031. bool is_always_dump = false;
  3032. if (!DumpManager::GetInstance().GetDumpProperties(session_id).GetDumpPath().empty()) {
  3033. is_always_dump = true;
  3034. }
  3035. GraphUtils::DumpGEGraph(compute_graph, "Build", is_always_dump);
  3036. GraphUtils::DumpGEGraphToOnnx(*compute_graph, "Build");
  3037. graph_node->SetGeRootModel(ge_root_model);
  3038. return SUCCESS;
  3039. }
  3040. Status GraphManager::GenCheckPointGraph(const std::map<std::string, GeTensorDesc> &all_variables, Graph &graph) {
  3041. ge::ComputeGraphPtr compute_graph = MakeShared<ComputeGraph>(kCheckPointGraph);
  3042. GE_CHECK_NOTNULL(compute_graph);
  3043. OpDescPtr save_desc = MakeShared<ge::OpDesc>(compute_graph->GetName() + "_" + kSave, kSave);
  3044. GE_CHECK_NOTNULL(save_desc);
  3045. uint32_t save_index = 0;
  3046. for (auto iter = all_variables.begin(); iter != all_variables.end(); ++iter) {
  3047. GE_CHK_GRAPH_STATUS_RET(save_desc->AddInputDesc(save_index, iter->second));
  3048. save_index++;
  3049. }
  3050. NodePtr save_node = compute_graph->AddNode(save_desc);
  3051. uint32_t index = 0;
  3052. for (auto iter = all_variables.begin(); iter != all_variables.end(); ++iter) {
  3053. OpDescPtr var_desc = MakeShared<ge::OpDesc>(iter->first, VARIABLE);
  3054. GE_CHECK_NOTNULL(var_desc);
  3055. if (!AttrUtils::SetBool(var_desc, kCheckPointForGetVar, true)) {
  3056. GELOGW("Set check point graph attr failed.");
  3057. }
  3058. GE_CHK_GRAPH_STATUS_RET(var_desc->AddOutputDesc(iter->second));
  3059. NodePtr var_node = compute_graph->AddNode(var_desc);
  3060. GE_CHK_STATUS(GraphUtils::AddEdge(var_node->GetOutDataAnchor(0), save_node->GetInDataAnchor(index)),
  3061. "Add edge[%s->%s] fail.", var_node->GetName().c_str(), save_node->GetName().c_str());
  3062. index++;
  3063. }
  3064. compute_graph->Dump();
  3065. graph = GraphUtils::CreateGraphFromComputeGraph(compute_graph);
  3066. return SUCCESS;
  3067. }
  3068. Status GraphManager::SaveVariables(const Graph &graph, const std::vector<std::string> &var_names,
  3069. const std::vector<Tensor> &outputs, std::vector<Tensor> &var_values) {
  3070. map<string, Tensor> var_results;
  3071. GE_CHK_STATUS_RET(SaveCheckPointResult(graph, outputs, var_results), "Save check point result failed.");
  3072. if (!var_names.empty()) {
  3073. for (const auto &var_name : var_names) {
  3074. if (var_results.count(var_name) == 0) {
  3075. GELOGE(FAILED, "Fetch var[%s] value failed.", var_name.c_str());
  3076. return FAILED;
  3077. } else {
  3078. auto var_tensor = var_results[var_name].GetTensorDesc();
  3079. var_tensor.SetName(var_name.c_str());
  3080. var_results[var_name].SetTensorDesc(var_tensor);
  3081. var_values.emplace_back(var_results[var_name]);
  3082. }
  3083. }
  3084. } else {
  3085. for (auto iter = var_results.begin(); iter != var_results.end(); ++iter) {
  3086. string var_name = iter->first;
  3087. auto var_tensor = iter->second.GetTensorDesc();
  3088. var_tensor.SetName(var_name.c_str());
  3089. iter->second.SetTensorDesc(var_tensor);
  3090. var_values.emplace_back(iter->second);
  3091. }
  3092. }
  3093. return SUCCESS;
  3094. }
  3095. Status GraphManager::SaveCheckPointResult(const Graph &graph, const std::vector<Tensor> &outputs,
  3096. map<string, Tensor> &var_results) {
  3097. auto compute_graph = GraphUtils::GetComputeGraph(graph);
  3098. NodePtr netoutput_node = nullptr;
  3099. for (const auto &node : compute_graph->GetAllNodes()) {
  3100. if (node->GetType() == NETOUTPUT) {
  3101. netoutput_node = node;
  3102. break;
  3103. }
  3104. }
  3105. GE_CHECK_NOTNULL(netoutput_node);
  3106. for (const auto &in : netoutput_node->GetAllInDataAnchors()) {
  3107. auto out_anchor = in->GetPeerOutAnchor();
  3108. GE_CHECK_NOTNULL(out_anchor);
  3109. auto peer_node = out_anchor->GetOwnerNode();
  3110. while (peer_node->GetType() != VARIABLE) {
  3111. if (peer_node->GetAllInDataAnchors().size() != 1) {
  3112. GELOGE(FAILED, "peer_node [%s] has more than 1 input in checkpoint Graph.", peer_node->GetName().c_str());
  3113. return FAILED;
  3114. }
  3115. auto peer_node_in_anchor = peer_node->GetAllInDataAnchors().at(0);
  3116. auto peer_node_out_anchor = peer_node_in_anchor->GetPeerOutAnchor();
  3117. if (peer_node_out_anchor != nullptr) {
  3118. peer_node = peer_node_out_anchor->GetOwnerNode();
  3119. if (peer_node->GetType() == VARIABLE) {
  3120. break;
  3121. }
  3122. }
  3123. }
  3124. if (peer_node->GetType() != VARIABLE) {
  3125. GELOGE(FAILED, " peer_node %s is not variable in checkpoint Graph.", peer_node->GetName().c_str());
  3126. return FAILED;
  3127. }
  3128. auto var_name = peer_node->GetName();
  3129. GELOGI("[GraphManager] SaveVariables, varName is %s.", var_name.c_str());
  3130. if (in->GetIdx() >= static_cast<int>(outputs.size())) {
  3131. GELOGE(FAILED, "variable index[%d] out of range[%zu].", in->GetIdx(), outputs.size());
  3132. return FAILED;
  3133. }
  3134. var_results.emplace(var_name, outputs.at(in->GetIdx()));
  3135. }
  3136. return SUCCESS;
  3137. }
  3138. void GraphManager::AddLocalOmgContext(GraphId graph_id, const OmgContext &omg_context) {
  3139. std::lock_guard<std::mutex> lock(member_mutex_);
  3140. omg_contexts_.emplace(graph_id, omg_context);
  3141. SetLocalOmgContext(omg_contexts_[graph_id]);
  3142. }
  3143. void GraphManager::UpdateLocalOmgContext(GraphId graph_id) {
  3144. std::lock_guard<std::mutex> lock(member_mutex_);
  3145. auto iter = omg_contexts_.find(graph_id);
  3146. if (iter != omg_contexts_.end()) {
  3147. SetLocalOmgContext(iter->second);
  3148. } else {
  3149. GELOGW("OmgContext of graph %u not found.", graph_id);
  3150. }
  3151. }
  3152. GraphManager::CompilerStages &GraphManager::GetCompilerStages(GraphId graph_id) {
  3153. std::lock_guard<std::mutex> lock(member_mutex_);
  3154. return compiler_stages_[graph_id];
  3155. }
  3156. void GraphManager::RemoveCompilerStages(GraphId graph_id) {
  3157. std::lock_guard<std::mutex> lock(member_mutex_);
  3158. compiler_stages_.erase(graph_id);
  3159. }
  3160. void GraphManager::IncreaseGraphCount(GraphId graph_id) {
  3161. std::lock_guard<std::mutex> lock(graph_count_mutex_);
  3162. auto it = graph_count_.find(graph_id);
  3163. if (it == graph_count_.end()) {
  3164. graph_count_.insert({graph_id, kInitGraphCount});
  3165. GELOGD("After increaseGraphCount, graph count of id[%u] is %u.", graph_id, graph_count_[graph_id]);
  3166. } else {
  3167. ++graph_count_[graph_id];
  3168. GELOGD("After increaseGraphCount, graph count of id[%u] is %u.", graph_id, graph_count_[graph_id]);
  3169. }
  3170. }
  3171. void GraphManager::RemoveGraphCount(GraphId graph_id) {
  3172. std::lock_guard<std::mutex> lock(graph_count_mutex_);
  3173. auto it = graph_count_.find(graph_id);
  3174. if (it == graph_count_.end()) {
  3175. GELOGW("Graph of id: %u has not been added, count cannot be decreased.", graph_id);
  3176. } else {
  3177. GELOGD("RemoveGraphCount success, graph count of id[%u] is %u.", graph_id, graph_count_[graph_id]);
  3178. graph_count_.erase(it);
  3179. }
  3180. }
  3181. void GraphManager::DecreaseGraphCount(GraphId graph_id) {
  3182. std::lock_guard<std::mutex> lock(graph_count_mutex_);
  3183. auto it = graph_count_.find(graph_id);
  3184. if (it == graph_count_.end()) {
  3185. GELOGW("Graph of id: %u has not been added, count cannot be decreased.", graph_id);
  3186. } else {
  3187. --it->second;
  3188. GELOGD("After DecreaseGraphCount, graph count of id[%u] is %u.", graph_id, graph_count_[graph_id]);
  3189. }
  3190. }
  3191. Status GraphManager::GetGraphCount(GraphId graph_id, uint32_t &count) {
  3192. std::lock_guard<std::mutex> lock(graph_count_mutex_);
  3193. auto it = graph_count_.find(graph_id);
  3194. if (it == graph_count_.end()) {
  3195. GELOGW("Graph [id:%u] has not been added.", graph_id);
  3196. return FAILED;
  3197. }
  3198. count = it->second;
  3199. return SUCCESS;
  3200. }
  3201. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示