You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

graph_mem_assigner.cc 68 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/build/memory/graph_mem_assigner.h"
  17. #include <cstring>
  18. #include <set>
  19. #include "common/math/math_util.h"
  20. #include "common/util/error_manager/error_manager.h"
  21. #include "framework/common/debug/ge_log.h"
  22. #include "graph/build/memory/hybrid_mem_assigner.h"
  23. #include "graph/build/memory/var_mem_assign_util.h"
  24. #include "graph/build/memory/block_mem_assigner.h"
  25. #include "graph/common/omg_util.h"
  26. #include "graph/debug/ge_attr_define.h"
  27. #include "graph/ge_attr_value.h"
  28. #include "graph/manager/graph_var_manager.h"
  29. #include "graph/utils/tensor_utils.h"
  30. #include "graph/utils/type_utils.h"
  31. namespace {
  32. const int kDataOutputIndex = 0;
  33. const int kAllInputAddrIsAtomic = -1;
  34. const int kVirtualInputNodeMemoryReuse = 0;
  35. const int kVirtualOutputNodeMemoryReuse = 1;
  36. const size_t kVirtualInputNodeOutputSize = 1;
  37. const size_t kVirtualOutputNodeInputSize = 1;
  38. const size_t kVirtualNodeDataIndex = 0;
  39. const char *const kMbatchNodeNameFlag = "_ascend_mbatch_batch_";
  40. } // namespace
  41. namespace ge {
  42. Status VariableMemoryAssigner::Assign() {
  43. Status result = ge::VarMemAssignUtil::AssignConstantOpMemory(compute_graph_);
  44. if (result != ge::SUCCESS) {
  45. return result;
  46. }
  47. result = ge::VarMemAssignUtil::AssignVarMemory(compute_graph_);
  48. if (result != ge::SUCCESS) {
  49. return result;
  50. }
  51. return ge::SUCCESS;
  52. }
  53. Status VariableMemoryAssigner::AssignVarAttr2Nodes() {
  54. Status result = ge::VarMemAssignUtil::AssignVarAttr2Nodes(compute_graph_);
  55. if (result != ge::SUCCESS) {
  56. return result;
  57. }
  58. return ge::SUCCESS;
  59. }
  60. Status GraphMemoryAssigner::AssignMemory() {
  61. ge::HybridMemAssignerPtr mem_assigner(new (std::nothrow) HybridMemAssigner(compute_graph_));
  62. if (mem_assigner->Assign() != ge::SUCCESS) {
  63. GELOGE(ge::FAILED, "Memory assigner failed");
  64. return ge::FAILED;
  65. }
  66. MemoryOffset memory_offset(RT_MEMORY_HBM, mem_assigner->GetMemOffset());
  67. memory_offset_.push_back(memory_offset);
  68. auto session_id = compute_graph_->GetSessionID();
  69. int64_t var_size_before_assign = ge::VarManager::Instance(session_id)->GetVarMemSize(RT_MEMORY_HBM);
  70. auto variable_assigner =
  71. std::unique_ptr<ge::VariableMemoryAssigner>(new (std::nothrow) ge::VariableMemoryAssigner(compute_graph_));
  72. if (variable_assigner == nullptr) {
  73. GELOGE(ge::FAILED, "Alloc VariableMemoryAssigner failed.");
  74. return ge::FAILED;
  75. }
  76. if (variable_assigner->Assign() != ge::SUCCESS) {
  77. return ge::FAILED;
  78. }
  79. int64_t var_size_assign = ge::VarManager::Instance(session_id)->GetVarMemSize(RT_MEMORY_HBM) - var_size_before_assign;
  80. GELOGI("GraphMemoryAssigner::AssignMemory variable size = %ld", var_size_assign);
  81. mem_assigner_ = std::move(mem_assigner);
  82. return ge::SUCCESS;
  83. }
  84. ge::Status GraphMemoryAssigner::AssignVarAttr2Nodes() {
  85. auto variable_assigner =
  86. std::unique_ptr<ge::VariableMemoryAssigner>(new (std::nothrow) ge::VariableMemoryAssigner(compute_graph_));
  87. if (variable_assigner == nullptr) {
  88. GELOGE(ge::FAILED, "Alloc VariableMemoryAssigner failed.");
  89. return ge::FAILED;
  90. }
  91. if (variable_assigner->AssignVarAttr2Nodes() != ge::SUCCESS) {
  92. return ge::FAILED;
  93. }
  94. return ge::SUCCESS;
  95. }
  96. ge::Status GraphMemoryAssigner::CalculateTensorRealSizeAndOutSize(const ge::ConstGeTensorDescPtr &output_desc,
  97. int64_t dim_index, int64_t &output_mem_size,
  98. int64_t &batch_dim_num, int64_t &out_size) {
  99. graphStatus graph_status = ge::TensorUtils::GetSize(*output_desc, out_size);
  100. if (graph_status != GRAPH_SUCCESS) {
  101. GELOGE(FAILED, "Opdesc GetSize failed!");
  102. return FAILED;
  103. }
  104. GeShape output_shape = output_desc->GetShape();
  105. std::vector<int64_t> output_dims = output_shape.GetDims();
  106. if (dim_index >= static_cast<int64_t>(output_dims.size())) {
  107. GELOGE(FAILED, "Invaild value(%ld) of attr _reuse_input_on_dim_index, which is out of data range [0, %zu).",
  108. dim_index, output_dims.size());
  109. return FAILED;
  110. }
  111. for (int64_t index = 0; index < dim_index; index++) {
  112. FMK_INT64_MULCHECK(batch_dim_num, output_dims[index]);
  113. batch_dim_num *= output_dims[index];
  114. output_dims[index] = 1;
  115. }
  116. output_shape = GeShape(output_dims);
  117. Format out_format = output_desc->GetFormat();
  118. DataType data_type = output_desc->GetDataType();
  119. graph_status = ge::TensorUtils::CalcTensorMemSize(output_shape, out_format, data_type, output_mem_size);
  120. if (graph_status != GRAPH_SUCCESS) {
  121. GELOGE(graph_status, "Opdesc CalcTensorMemSize failed!");
  122. return FAILED;
  123. }
  124. if (output_mem_size < 0) {
  125. GELOGE(FAILED, "After calculating tensor memory size, output_mem_size = %ld, out of data range [0, %ld]",
  126. output_mem_size, INT64_MAX);
  127. return FAILED;
  128. }
  129. return SUCCESS;
  130. }
  131. Status GraphMemoryAssigner::GetMaxBatchLabel(const map<string, vector<NodePtr>> &mem_reuse_virtual_nodes_map,
  132. int32_t mem_reuse_model, string &max_batch_label) {
  133. for (auto &i_map : mem_reuse_virtual_nodes_map) {
  134. vector<NodePtr> virtual_nodes_list = i_map.second;
  135. vector<int64_t> max_shape_dims;
  136. size_t max_batch_dim = 0;
  137. bool max_batch_dim_find = false;
  138. for (size_t i = 0; i < virtual_nodes_list.size(); ++i) {
  139. GE_CHECK_NOTNULL(virtual_nodes_list[i]);
  140. OpDescPtr op_desc = virtual_nodes_list[i]->GetOpDesc();
  141. GE_CHECK_NOTNULL(op_desc);
  142. ge::ConstGeTensorDescPtr input_output_desc;
  143. if (mem_reuse_model == kVirtualInputNodeMemoryReuse) {
  144. input_output_desc = op_desc->GetOutputDescPtr(kVirtualNodeDataIndex);
  145. } else if (mem_reuse_model == kVirtualOutputNodeMemoryReuse) {
  146. input_output_desc = op_desc->GetInputDescPtr(kVirtualNodeDataIndex);
  147. } else {
  148. GELOGE(FAILED, "Invalid parameter memory reuse model, which is: %d.", mem_reuse_model);
  149. return FAILED;
  150. }
  151. GE_CHECK_NOTNULL(input_output_desc);
  152. if (i == 0) {
  153. // All ops must have ATTR_NAME_BATCH_LABEL, no need to check return value.
  154. (void)ge::AttrUtils::GetStr(op_desc, ATTR_NAME_BATCH_LABEL, max_batch_label);
  155. max_shape_dims = input_output_desc->GetShape().GetDims();
  156. } else {
  157. vector<int64_t> current_shape_dims = input_output_desc->GetShape().GetDims();
  158. if (current_shape_dims.size() != max_shape_dims.size()) {
  159. GELOGE(FAILED, "The shape size of several nodes between multiple batches does not match.");
  160. return FAILED;
  161. }
  162. for (size_t j = 0; j < current_shape_dims.size(); ++j) {
  163. if (current_shape_dims[j] == max_shape_dims[j]) {
  164. continue;
  165. }
  166. if (max_batch_dim_find && max_batch_dim != j) {
  167. GELOGE(FAILED, "The shape of several nodes between multiple batches does not match.");
  168. return FAILED;
  169. }
  170. max_batch_dim_find = true;
  171. max_batch_dim = j;
  172. if (current_shape_dims[j] > max_shape_dims[j]) {
  173. max_shape_dims[j] = current_shape_dims[j];
  174. // All ops must have ATTR_NAME_BATCH_LABEL, no need to check return value.
  175. (void)ge::AttrUtils::GetStr(op_desc, ATTR_NAME_BATCH_LABEL, max_batch_label);
  176. }
  177. // Only compare the first different dim in shape.
  178. break;
  179. }
  180. }
  181. }
  182. // In every element of virtual_input_nodes_map, the label of the max batch node is the same.
  183. break;
  184. }
  185. return SUCCESS;
  186. }
  187. Status GraphMemoryAssigner::ReAssignMemory(bool is_loop_graph, size_t &mem_offset) {
  188. if (memory_offset_.empty()) {
  189. GELOGE(FAILED, "memory_offset_ is empty.");
  190. return ge::FAILED;
  191. }
  192. GE_CHK_STATUS_RET(ReAssignContinuousMemory(is_loop_graph), "ReAssignContinuousMemory Failed!");
  193. GE_CHK_STATUS_RET(ReAssignReuseAndNoPaddingContinuousInputMemory(),
  194. "ReAssignReuseAndNoPaddingContinuousInputMemory Failed!");
  195. GE_CHK_STATUS_RET(ReAssignReuseAndNoPaddingContinuousOutputMemory(),
  196. "ReAssignReuseAndNoPaddingContinuousOutputMemory Failed!");
  197. GE_CHK_STATUS_RET(ReAssignAtomicMemory(is_loop_graph), "ReAssignAtomicMemory Failed!");
  198. mem_offset = memory_offset_[0].mem_offset_;
  199. auto session_id = compute_graph_->GetSessionID();
  200. if (mem_offset > VarManager::Instance(session_id)->GetGraphMemoryMaxSize()) {
  201. GELOGE(ge::FAILED, "Current memoffset %zu is greater than memory manager malloc max size %zu", mem_offset,
  202. VarManager::Instance(session_id)->GetGraphMemoryMaxSize());
  203. ErrorManager::GetInstance().ATCReportErrMessage("E19022");
  204. return ge::FAILED;
  205. }
  206. return SUCCESS;
  207. }
  208. Status GraphMemoryAssigner::AssignZeroCopyMemory(size_t &mem_offset, size_t &zero_mem_copy_size) {
  209. BlockMemAssignerPtr priority_assigner = std::move(mem_assigner_->GetPriorityAssinger());
  210. GE_IF_BOOL_EXEC(priority_assigner == nullptr, GELOGE(FAILED, "Get priority_assigner failed."); return ge::FAILED;);
  211. size_t mem_offset_tmp = mem_offset;
  212. // set offset for zero copy block
  213. for (auto &memory_block : priority_assigner->GetMemoryBlocks()) {
  214. if (memory_block == nullptr || memory_block->deleted_block_ || !memory_block->is_zero_copy_) {
  215. continue;
  216. }
  217. memory_block->Resize();
  218. memory_block->SetHeadOffset(mem_offset);
  219. mem_offset += memory_block->Size();
  220. memory_block->SetTailOffset(mem_offset - 1);
  221. }
  222. GELOGI("mem_offset_ include zero_copy_memory is %zu.", mem_offset);
  223. // set offset for zero copy nodes
  224. priority_assigner->SetOpMemOffset(true);
  225. zero_mem_copy_size = mem_offset - mem_offset_tmp;
  226. memory_offset_[0].mem_offset_ = mem_offset;
  227. GELOGI("max_mem_offset:%zu, mem_offset:%zu, zero_mem_copy_size:%zu.", mem_offset, mem_offset_tmp, zero_mem_copy_size);
  228. return SUCCESS;
  229. }
  230. Status GraphMemoryAssigner::ReAssignContinuousMemory(bool is_loop_graph) {
  231. GELOGI("Begin to reassign continuous memory");
  232. Status ret;
  233. for (auto &node : compute_graph_->GetAllNodes()) {
  234. // Get the continuous input type of the node, default is false
  235. bool is_input_continuous = false;
  236. GE_CHECK_NOTNULL(node->GetOpDesc());
  237. // If GetBool fail, is_input_continuous is false.
  238. (void)ge::AttrUtils::GetBool(node->GetOpDesc(), ATTR_NAME_CONTINUOUS_INPUT, is_input_continuous);
  239. // Assign continuous input memory
  240. if (is_input_continuous) {
  241. int64_t mem_clean_start = 0;
  242. int64_t mem_clean_size = 0;
  243. ret = AssignContinuousInputMemory(node, mem_clean_start, mem_clean_size);
  244. if (ret != ge::SUCCESS) {
  245. GELOGE(ret, "Assign continuous input memory failed!");
  246. return ret;
  247. }
  248. // Clean up atomic address, eg, hcom node
  249. vector<int32_t> input_indexes;
  250. // If GetListInt fail, input_indexes is empty.
  251. (void)ge::AttrUtils::GetListInt(node->GetOpDesc(), ATOMIC_ATTR_INPUT_INDEX, input_indexes);
  252. if (!input_indexes.empty() && input_indexes[0] == kAllInputAddrIsAtomic) {
  253. // check whether there is an atomic conflict between the current node and the peer out node
  254. if (!CheckInputIsSupportAtomic(node)) {
  255. GELOGE(ge::FAILED,
  256. "There is an atomic conflict between the current node and the peer out node, not supported!");
  257. return ge::FAILED;
  258. } else if (is_loop_graph) {
  259. GE_CHK_STATUS_RET(SetLoopGraphAtomicAttr(node, mem_clean_start));
  260. } else {
  261. GE_CHK_STATUS_RET(SetAtomicCleanAttr(nullptr, {mem_clean_start}, {mem_clean_size}),
  262. "SetAtomicCleanAttr failed.");
  263. }
  264. }
  265. }
  266. // Get the reference type of the node, default is false
  267. bool is_ref = false;
  268. // If GetBool fail, is_ref is false.
  269. (void)ge::AttrUtils::GetBool(node->GetOpDesc(), ATTR_NAME_REFERENCE, is_ref);
  270. // Get the continuous output type of the node, default is false
  271. bool is_output_continuous = false;
  272. // If GetBool fail, is_output_continuous is false.
  273. (void)ge::AttrUtils::GetBool(node->GetOpDesc(), ATTR_NAME_CONTINUOUS_OUTPUT, is_output_continuous);
  274. // If the output is ref type and refers to the ref of an input, the name of the output
  275. // and the input are the same. Ge encounters ref type, finds matching relationship according
  276. // to the names of input and output, and allocates the same memory address, eg: HCOMBroadcast
  277. if (!is_ref && is_output_continuous) { // Assign continuous output memory
  278. ret = AssignContinuousOutputMemory(node);
  279. if (ret != ge::SUCCESS) {
  280. GELOGE(ret, "Assign reference memory failed!");
  281. return ret;
  282. }
  283. }
  284. }
  285. GELOGI("After reassign continuous memory, memoffset = %zu.", memory_offset_[0].mem_offset_);
  286. return ge::SUCCESS;
  287. }
  288. Status GraphMemoryAssigner::AssignContinuousInputMemory(const ge::NodePtr &node, int64_t &continuous_mem_start,
  289. int64_t &continuous_mem_size) {
  290. GELOGI("Current node %s needs continuous input.", node->GetName().c_str());
  291. continuous_mem_start = memory_offset_[0].mem_offset_;
  292. bool continuous_input_alloc = false;
  293. (void)ge::AttrUtils::GetBool(node->GetOpDesc(), ATTR_NAME_CONTINUOUS_INPUT_ALLOC, continuous_input_alloc);
  294. for (auto &in_data_anchor : node->GetAllInDataAnchors()) {
  295. auto peer_out_data_anchor = in_data_anchor->GetPeerOutAnchor();
  296. GE_IF_BOOL_EXEC(peer_out_data_anchor == nullptr, continue);
  297. auto peer_op_desc = peer_out_data_anchor->GetOwnerNode()->GetOpDesc();
  298. GE_IF_BOOL_EXEC(peer_op_desc == nullptr, continue);
  299. bool is_peer_output_continuous = false;
  300. // If GetBool fail, is_peer_output_continuous is false.
  301. (void)ge::AttrUtils::GetBool(peer_op_desc, ATTR_NAME_CONTINUOUS_OUTPUT, is_peer_output_continuous);
  302. // Get peer node output size, if size == 1(peer node has only one output), continuous input of the node and
  303. // continuous output of the previous node is the same, we can support it. If size != 1, there may be
  304. // conflict between the two, we can not support it.
  305. auto peer_output_size = peer_op_desc->GetOutputsSize();
  306. GE_IF_BOOL_EXEC(is_peer_output_continuous && (peer_output_size != 1),
  307. GELOGE(PARAM_INVALID,
  308. "Current node %s requires continuous input, while the previous node %s requires "
  309. "continuous output. There may be conflict between the two. This node is not supported now.",
  310. node->GetOpDesc()->GetName().c_str(), peer_op_desc->GetName().c_str());
  311. return PARAM_INVALID;);
  312. bool is_peer_reference = false;
  313. // If GetBool fail, is_peer_reference is false.
  314. (void)AttrUtils::GetBool(peer_op_desc, ATTR_NAME_REFERENCE, is_peer_reference);
  315. GE_IF_BOOL_EXEC(is_peer_reference,
  316. GELOGE(PARAM_INVALID,
  317. "Current node %s requires continuous input, while the previous node %s requires "
  318. "reference. There may be conflict between the two. This node is not supported now.",
  319. node->GetOpDesc()->GetName().c_str(), peer_op_desc->GetName().c_str());
  320. return PARAM_INVALID;);
  321. vector<int64_t> output_list = peer_op_desc->GetOutputOffset();
  322. std::vector<int64_t> offsets_for_fusion = {};
  323. bool has_offset_attr =
  324. AttrUtils::GetListInt(peer_op_desc, ATTR_NAME_OUTPUT_OFFSET_FOR_BUFFER_FUSION, offsets_for_fusion);
  325. if (peer_out_data_anchor->GetIdx() < static_cast<int>(output_list.size())) {
  326. if (continuous_input_alloc && !has_offset_attr) {
  327. if (in_data_anchor->GetIdx() == 0) {
  328. continuous_mem_start = output_list.at(peer_out_data_anchor->GetIdx());
  329. }
  330. // can not use else if, incase only one input
  331. if (in_data_anchor->GetIdx() == static_cast<int>(node->GetAllInDataAnchors().size()) - 1) {
  332. int64_t tensor_desc_size = 0;
  333. Status ret = ge::TensorUtils::GetSize(*(peer_op_desc->GetOutputDescPtr(peer_out_data_anchor->GetIdx())),
  334. tensor_desc_size);
  335. GE_IF_BOOL_EXEC(ret != ge::SUCCESS, GELOGE(FAILED, "GetSize failed."); return FAILED;);
  336. tensor_desc_size = (tensor_desc_size + MEM_ALIGN_SIZE - 1) / MEM_ALIGN_SIZE * MEM_ALIGN_SIZE;
  337. continuous_mem_size =
  338. output_list.at(peer_out_data_anchor->GetIdx()) - continuous_mem_start + tensor_desc_size + MEM_ALIGN_SIZE;
  339. }
  340. GELOGI(
  341. "[IMAS]Check Continuous input : Set %s name[%s] output[%d] offset to [%zu] stream_id[%ld] size[%zu] "
  342. "real_size[%u].",
  343. node->GetOwnerComputeGraph()->GetName().c_str(), peer_op_desc->GetName().c_str(),
  344. peer_out_data_anchor->GetIdx(), output_list.at(peer_out_data_anchor->GetIdx()), peer_op_desc->GetStreamId(),
  345. 0, 0);
  346. continue;
  347. }
  348. output_list.at(peer_out_data_anchor->GetIdx()) = memory_offset_[0].mem_offset_;
  349. } else {
  350. GELOGE(FAILED, "index : %d is out of range.", peer_out_data_anchor->GetIdx());
  351. return FAILED;
  352. }
  353. peer_op_desc->SetOutputOffset(output_list);
  354. size_t pre_mem_offset = memory_offset_[0].mem_offset_;
  355. int64_t tensor_desc_size = 0;
  356. if (has_offset_attr) {
  357. if (peer_out_data_anchor->GetIdx() < static_cast<int>(offsets_for_fusion.size())) {
  358. auto offset_for_fusion = offsets_for_fusion[peer_out_data_anchor->GetIdx()];
  359. memory_offset_[0].mem_offset_ += offset_for_fusion;
  360. } else {
  361. GELOGE(FAILED, "fusion: peer node %s index : %d is out of range.", peer_op_desc->GetName().c_str(),
  362. peer_out_data_anchor->GetIdx());
  363. return FAILED;
  364. }
  365. } else {
  366. Status ret =
  367. TensorUtils::GetSize(*(peer_op_desc->GetOutputDescPtr(peer_out_data_anchor->GetIdx())), tensor_desc_size);
  368. GE_IF_BOOL_EXEC(ret != ge::SUCCESS, GELOGE(FAILED, "GetSize failed."); return FAILED;);
  369. memory_offset_[0].mem_offset_ += tensor_desc_size;
  370. }
  371. // If set tensor_actual_size, Memory alignment is not required.
  372. int32_t is_tensor_actual_size = 0;
  373. ge::AttrUtils::GetInt(peer_op_desc, ATTR_NAME_GET_TENSOR_ACTUAL_SIZE, is_tensor_actual_size);
  374. if (is_tensor_actual_size == 0) {
  375. AlignMemOffset(MEM_ALIGN_SIZE);
  376. }
  377. GELOGI(
  378. "[IMAS]Continuous input : Set %s name[%s] output[%d] offset to [%zu] stream_id[%ld] size[%zu] "
  379. "real_size[%ld].",
  380. node->GetOwnerComputeGraph()->GetName().c_str(), peer_op_desc->GetName().c_str(), peer_out_data_anchor->GetIdx(),
  381. pre_mem_offset, peer_op_desc->GetStreamId(), (memory_offset_[0].mem_offset_ - pre_mem_offset), tensor_desc_size);
  382. }
  383. memory_offset_[0].mem_offset_ += MEM_ALIGN_SIZE;
  384. if (!continuous_input_alloc) {
  385. continuous_mem_size = memory_offset_[0].mem_offset_ - continuous_mem_start;
  386. }
  387. return SUCCESS;
  388. }
  389. Status GraphMemoryAssigner::AssignContinuousOutputMemory(const ge::NodePtr &node) {
  390. GELOGI("Current node %s needs continuous output.", node->GetName().c_str());
  391. auto out_op_desc = node->GetOpDesc();
  392. GE_IF_BOOL_EXEC(out_op_desc == nullptr, GELOGE(ge::FAILED, "out_op_desc is null."); return ge::FAILED);
  393. vector<int64_t> output_list = out_op_desc->GetOutputOffset();
  394. if ((out_op_desc->GetOutputsSize() > output_list.size()) || (output_list.size() == 0)) {
  395. GELOGE(ge::FAILED, "The size %zu of node output desc is more than output_list's size %zu.",
  396. out_op_desc->GetOutputsSize(), output_list.size());
  397. return ge::FAILED;
  398. }
  399. size_t mem_offset = output_list[0];
  400. for (auto &out_data_anchor : node->GetAllOutDataAnchors()) {
  401. output_list[out_data_anchor->GetIdx()] = mem_offset;
  402. int64_t tensor_desc_size = 0;
  403. if (ge::TensorUtils::GetSize(*(out_op_desc->GetOutputDescPtr(out_data_anchor->GetIdx())), tensor_desc_size) !=
  404. ge::SUCCESS) {
  405. GELOGE(FAILED, "GetSize failed.");
  406. return FAILED;
  407. }
  408. mem_offset += tensor_desc_size;
  409. if (mem_offset <= 0) {
  410. return FAILED;
  411. }
  412. mem_offset = (mem_offset + MEM_ALIGN_SIZE - 1) / MEM_ALIGN_SIZE * MEM_ALIGN_SIZE;
  413. GELOGI(
  414. "[IMAS]Continuous output : Set %s name[%s] output[%d] offset to [%zu] stream_id[%ld] size[%ld] "
  415. "real_size[%ld].",
  416. node->GetOwnerComputeGraph()->GetName().c_str(), out_op_desc->GetName().c_str(), out_data_anchor->GetIdx(),
  417. output_list[out_data_anchor->GetIdx()], out_op_desc->GetStreamId(), tensor_desc_size, tensor_desc_size);
  418. }
  419. out_op_desc->SetOutputOffset(output_list);
  420. return ge::SUCCESS;
  421. }
  422. Status GraphMemoryAssigner::ReAssignVirtualInputNodeMemory(NodePtr node, size_t &mem_offset_reuse) {
  423. OpDescPtr op_desc = node->GetOpDesc();
  424. vector<int64_t> output_list = op_desc->GetOutputOffset();
  425. if (output_list.empty()) {
  426. GELOGE(FAILED, "Outputoffset is empty node name:%s", node->GetName().c_str());
  427. return FAILED;
  428. }
  429. output_list.at(0) = mem_offset_reuse;
  430. op_desc->SetOutputOffset(output_list);
  431. GELOGI("Set virtual input node %s output offset to %zu.", op_desc->GetName().c_str(), mem_offset_reuse);
  432. int64_t attr_dim_index;
  433. bool get_attr_dim_flag = ge::AttrUtils::GetInt(op_desc, ATTR_NAME_REUSE_INPUT_ON_DIM_INDEX, attr_dim_index);
  434. if (!get_attr_dim_flag) {
  435. GELOGE(FAILED, "Get attr _reuse_input_on_dim_index failed.");
  436. return FAILED;
  437. }
  438. size_t extra_memory_size = 0;
  439. for (const auto &in_data_anchor : node->GetAllInDataAnchors()) {
  440. auto peer_out_data_anchor = in_data_anchor->GetPeerOutAnchor();
  441. GE_CHECK_NOTNULL(peer_out_data_anchor);
  442. auto peer_op_desc = peer_out_data_anchor->GetOwnerNode()->GetOpDesc();
  443. GE_CHECK_NOTNULL(peer_op_desc);
  444. vector<int64_t> output_offsets = peer_op_desc->GetOutputOffset();
  445. if (peer_out_data_anchor->GetIdx() >= static_cast<int>(output_offsets.size())) {
  446. GELOGE(ge::FAILED, "Index : %d is out of range.", peer_out_data_anchor->GetIdx());
  447. return ge::FAILED;
  448. }
  449. output_offsets.at(peer_out_data_anchor->GetIdx()) = mem_offset_reuse;
  450. peer_op_desc->SetOutputOffset(output_offsets);
  451. size_t pre_mem_offset = mem_offset_reuse;
  452. // Calculate tensor real size of each piece of data and out size of complete data
  453. ge::ConstGeTensorDescPtr output_desc = peer_op_desc->GetOutputDescPtr(peer_out_data_anchor->GetIdx());
  454. GE_CHECK_NOTNULL(output_desc);
  455. int64_t output_mem_size;
  456. int64_t batch_dim_num = 1;
  457. int64_t out_size;
  458. if (CalculateTensorRealSizeAndOutSize(output_desc, attr_dim_index, output_mem_size, batch_dim_num, out_size) !=
  459. SUCCESS) {
  460. GELOGE(FAILED, "CalculateTensorRealSizeAndOutSize failed for node %s output [%d].",
  461. peer_op_desc->GetName().c_str(), peer_out_data_anchor->GetIdx());
  462. return FAILED;
  463. }
  464. mem_offset_reuse += output_mem_size;
  465. extra_memory_size = extra_memory_size + out_size - output_mem_size;
  466. GELOGI(
  467. "[IMAS]Virtual node optimize: set %s name[%s] output[%d] offset to [%zu] stream_id[%ld] size[%ld] "
  468. "real_size[%ld].",
  469. node->GetOwnerComputeGraph()->GetName().c_str(), peer_op_desc->GetName().c_str(), peer_out_data_anchor->GetIdx(),
  470. pre_mem_offset, peer_op_desc->GetStreamId(), out_size, output_mem_size);
  471. }
  472. mem_offset_reuse += extra_memory_size;
  473. size_t after_mem_offset = mem_offset_reuse;
  474. GELOGI("After reassign virtual input node[name: %s, type: %s] memory, memory offset = %zu.",
  475. op_desc->GetName().c_str(), op_desc->GetType().c_str(), after_mem_offset);
  476. return SUCCESS;
  477. }
  478. Status GraphMemoryAssigner::ReAssignReuseAndNoPaddingContinuousInputMemory() {
  479. map<string, vector<NodePtr>> mem_reuse_virtual_input_nodes_map;
  480. for (const auto &n : compute_graph_->GetAllNodes()) {
  481. OpDescPtr op_desc = n->GetOpDesc();
  482. GE_CHECK_NOTNULL(op_desc);
  483. bool attr_continuous = false;
  484. bool get_continuous_flag = ge::AttrUtils::GetBool(op_desc, ATTR_NAME_NOPADDING_CONTINUOUS_INPUT, attr_continuous);
  485. GE_IF_BOOL_EXEC(!get_continuous_flag, continue);
  486. bool attr_reuse = false;
  487. bool get_reuse_flag = ge::AttrUtils::GetBool(op_desc, ATTR_NAME_OUTPUT_REUSE_INPUT, attr_reuse);
  488. GE_IF_BOOL_EXEC(!get_reuse_flag, continue);
  489. if (attr_reuse && attr_continuous) {
  490. if (op_desc->GetOutputsSize() != kVirtualInputNodeOutputSize) {
  491. // When current virtual node has several outputs, can't directly determine which input is the tensor for reuse.
  492. GELOGE(FAILED, "Only one output is supported, current virtual node %s has %zu inputs.", n->GetName().c_str(),
  493. op_desc->GetOutputsSize());
  494. return FAILED;
  495. }
  496. GELOGD("Start to reassign memory for virtual input node, memory offset = %zu.", memory_offset_[0].mem_offset_);
  497. string batch_label_string;
  498. // Not all ops have ATTR_NAME_BATCH_LABEL, no need to check return value, only check out parameter
  499. (void)ge::AttrUtils::GetStr(op_desc, ATTR_NAME_BATCH_LABEL, batch_label_string);
  500. if (batch_label_string.empty()) {
  501. size_t node_mem_offset = memory_offset_[0].mem_offset_;
  502. // No ATTR_NAME_BATCH_LABEL, no need to reuse memory.
  503. Status status = ReAssignVirtualInputNodeMemory(n, node_mem_offset);
  504. if (status != SUCCESS) {
  505. GELOGE(FAILED, "Reassign memory of virtual input node failed, node name: %s.", n->GetName().c_str());
  506. return FAILED;
  507. }
  508. memory_offset_[0].mem_offset_ = node_mem_offset;
  509. AlignMemOffset(MEM_ALIGN_SIZE);
  510. GELOGD("After reassign memory for virtual input node, align memory = %zu.", memory_offset_[0].mem_offset_);
  511. } else {
  512. // Has ATTR_NAME_BATCH_LABEL, for dynamic multi-batch node, need to reuse memory.
  513. string current_node_full_name = op_desc->GetName();
  514. size_t pos = current_node_full_name.find(kMbatchNodeNameFlag);
  515. if (pos == string::npos) {
  516. GELOGE(FAILED, "Cannot find key string [%s] of multi-batch in name of virtual input node, node name: %s.",
  517. kMbatchNodeNameFlag, n->GetName().c_str());
  518. return FAILED;
  519. }
  520. string fixed_name = current_node_full_name.substr(0, pos);
  521. vector<NodePtr> parallel_virtual_input_nodes;
  522. if (mem_reuse_virtual_input_nodes_map.count(fixed_name) != 0) {
  523. parallel_virtual_input_nodes = mem_reuse_virtual_input_nodes_map[fixed_name];
  524. }
  525. parallel_virtual_input_nodes.emplace_back(n);
  526. mem_reuse_virtual_input_nodes_map[fixed_name] = parallel_virtual_input_nodes;
  527. }
  528. }
  529. }
  530. int32_t mem_reuse_model = 0;
  531. if (ReAssignVirtualNodesMemory(mem_reuse_virtual_input_nodes_map, mem_reuse_model) != SUCCESS) {
  532. GELOGE(FAILED, "Reassign memory of virtual input nodes failed.");
  533. return FAILED;
  534. }
  535. return SUCCESS;
  536. }
  537. Status GraphMemoryAssigner::ReAssignVirtualOutputNodeMemory(NodePtr node, size_t &mem_offset_reuse) {
  538. OpDescPtr op_desc = node->GetOpDesc();
  539. // 1. set memory of to be reused input tensor
  540. auto in_data_anchor_list = node->GetAllInDataAnchors();
  541. auto peer_out_data_anchor = in_data_anchor_list.at(0)->GetPeerOutAnchor();
  542. GE_CHECK_NOTNULL(peer_out_data_anchor);
  543. auto peer_op_desc = peer_out_data_anchor->GetOwnerNode()->GetOpDesc();
  544. GE_CHECK_NOTNULL(peer_op_desc);
  545. vector<int64_t> in_node_output_offsets = peer_op_desc->GetOutputOffset();
  546. if (peer_out_data_anchor->GetIdx() >= static_cast<int>(in_node_output_offsets.size())) {
  547. GELOGE(FAILED, "Index : %d is out of range.", peer_out_data_anchor->GetIdx());
  548. return FAILED;
  549. }
  550. in_node_output_offsets.at(peer_out_data_anchor->GetIdx()) = mem_offset_reuse;
  551. peer_op_desc->SetOutputOffset(in_node_output_offsets);
  552. GELOGI("Set virtual output node %s input data offset to %zu.", op_desc->GetName().c_str(), mem_offset_reuse);
  553. // 2. set memory of output tensor
  554. vector<int64_t> output_list = op_desc->GetOutputOffset();
  555. if (output_list.empty()) {
  556. GELOGE(FAILED, "Outputoffset is empty, node name: %s", node->GetName().c_str());
  557. return FAILED;
  558. }
  559. if (op_desc->GetOutputsSize() > output_list.size()) {
  560. GELOGE(FAILED, "The size %zu of op_desc is more than output_list's size %zu.", op_desc->GetOutputsSize(),
  561. output_list.size());
  562. return FAILED;
  563. }
  564. int64_t attr_dim_index;
  565. bool get_attr_dim_flag = ge::AttrUtils::GetInt(op_desc, ATTR_NAME_REUSE_INPUT_ON_DIM_INDEX, attr_dim_index);
  566. if (!get_attr_dim_flag) {
  567. GELOGE(FAILED, "Get attr _reuse_input_on_dim_index failed.");
  568. return FAILED;
  569. }
  570. size_t extra_memory_size = 0;
  571. for (auto &out_data_anchor : node->GetAllOutDataAnchors()) {
  572. output_list[out_data_anchor->GetIdx()] = mem_offset_reuse;
  573. size_t pre_mem_offset = mem_offset_reuse;
  574. // calculate tensor real size of each piece of data and out size of complete data
  575. ge::ConstGeTensorDescPtr output_desc = op_desc->GetOutputDescPtr(out_data_anchor->GetIdx());
  576. GE_CHECK_NOTNULL(output_desc);
  577. int64_t output_mem_size;
  578. int64_t batch_dim_num = 1;
  579. int64_t out_size;
  580. if (CalculateTensorRealSizeAndOutSize(output_desc, attr_dim_index, output_mem_size, batch_dim_num, out_size) !=
  581. SUCCESS) {
  582. GELOGE(FAILED, "CalculateTensorRealSizeAndOutSize failed for node %s output [%d].", op_desc->GetName().c_str(),
  583. out_data_anchor->GetIdx());
  584. return FAILED;
  585. }
  586. mem_offset_reuse += output_mem_size;
  587. extra_memory_size = extra_memory_size + out_size - output_mem_size;
  588. GELOGI("[IMAS]Virtual node optimize: set %s name[%s] output[%d] offset to [%zu], size[%ld], real_size[%ld].",
  589. node->GetOwnerComputeGraph()->GetName().c_str(), op_desc->GetName().c_str(), out_data_anchor->GetIdx(),
  590. pre_mem_offset, out_size, output_mem_size);
  591. }
  592. op_desc->SetOutputOffset(output_list);
  593. mem_offset_reuse += extra_memory_size;
  594. size_t after_mem_offset = mem_offset_reuse;
  595. GELOGI("After reassign virtual output node[name: %s, type: %s] memory, memory offset = %zu.",
  596. op_desc->GetName().c_str(), op_desc->GetType().c_str(), after_mem_offset);
  597. return SUCCESS;
  598. }
  599. Status GraphMemoryAssigner::ReAssignReuseAndNoPaddingContinuousOutputMemory() {
  600. map<string, vector<NodePtr>> mem_reuse_virtual_output_nodes_map;
  601. for (const auto &n : compute_graph_->GetAllNodes()) {
  602. OpDescPtr op_desc = n->GetOpDesc();
  603. GE_CHECK_NOTNULL(op_desc);
  604. bool attr_continuous = false;
  605. bool get_continuous_flag = ge::AttrUtils::GetBool(op_desc, ATTR_NAME_NOPADDING_CONTINUOUS_OUTPUT, attr_continuous);
  606. GE_IF_BOOL_EXEC(!get_continuous_flag, continue);
  607. bool attr_reuse = false;
  608. bool get_reuse_flag = ge::AttrUtils::GetBool(op_desc, ATTR_NAME_OUTPUT_REUSE_INPUT, attr_reuse);
  609. GE_IF_BOOL_EXEC(!get_reuse_flag, continue);
  610. if (attr_reuse && attr_continuous) {
  611. auto in_data_anchor_list = n->GetAllInDataAnchors();
  612. if (in_data_anchor_list.size() != kVirtualOutputNodeInputSize) {
  613. // When current virtual node has several inputs, can't directly determine which input is the tensor for reuse.
  614. GELOGE(FAILED, "Only one input is supported, current virtual node %s has %zu inputs.", n->GetName().c_str(),
  615. in_data_anchor_list.size());
  616. return FAILED;
  617. }
  618. GELOGD("Start to reassign memory for virtual output node, memory offset = %zu.", memory_offset_[0].mem_offset_);
  619. string batch_label_string;
  620. // Not all ops have ATTR_NAME_BATCH_LABEL, no need to check return value, only check out parameter
  621. (void)ge::AttrUtils::GetStr(op_desc, ATTR_NAME_BATCH_LABEL, batch_label_string);
  622. if (batch_label_string.empty()) {
  623. size_t node_mem_offset = memory_offset_[0].mem_offset_;
  624. // No ATTR_NAME_BATCH_LABEL, no need to reuse memory.
  625. Status status = ReAssignVirtualOutputNodeMemory(n, node_mem_offset);
  626. if (status != SUCCESS) {
  627. GELOGE(FAILED, "Reassign memory of virtual output node failed, node name: %s.", n->GetName().c_str());
  628. return FAILED;
  629. }
  630. memory_offset_[0].mem_offset_ = node_mem_offset;
  631. AlignMemOffset(MEM_ALIGN_SIZE);
  632. GELOGD("After reassign memory for virtual output node, align memory = %zu.", memory_offset_[0].mem_offset_);
  633. } else {
  634. // Has ATTR_NAME_BATCH_LABEL, for dynamic multi-batch node, need to reuse memory.
  635. string current_node_full_name = op_desc->GetName();
  636. size_t pos = current_node_full_name.find(kMbatchNodeNameFlag);
  637. if (pos == string::npos) {
  638. GELOGE(FAILED, "Cannot find key string [%s] of multi-batch in name of virtual output node, node name: %s.",
  639. kMbatchNodeNameFlag, n->GetName().c_str());
  640. return FAILED;
  641. }
  642. string fixed_name = current_node_full_name.substr(0, pos);
  643. vector<NodePtr> parallel_virtual_output_nodes;
  644. if (mem_reuse_virtual_output_nodes_map.count(fixed_name) != 0) {
  645. parallel_virtual_output_nodes = mem_reuse_virtual_output_nodes_map[fixed_name];
  646. }
  647. parallel_virtual_output_nodes.emplace_back(n);
  648. mem_reuse_virtual_output_nodes_map[fixed_name] = parallel_virtual_output_nodes;
  649. }
  650. }
  651. }
  652. int32_t mem_reuse_model = 1;
  653. if (ReAssignVirtualNodesMemory(mem_reuse_virtual_output_nodes_map, mem_reuse_model) != SUCCESS) {
  654. GELOGE(FAILED, "Reassign memory of virtual output nodes failed.");
  655. return FAILED;
  656. }
  657. return SUCCESS;
  658. }
  659. Status GraphMemoryAssigner::ReAssignVirtualNodesMemory(map<string, vector<NodePtr>> &mem_reuse_nodes_map,
  660. int32_t mem_reuse_model) {
  661. // Find max batch label value
  662. string max_batch_label;
  663. if (GetMaxBatchLabel(mem_reuse_nodes_map, mem_reuse_model, max_batch_label) != SUCCESS) {
  664. GELOGE(FAILED, "Get max batch label failed.");
  665. return FAILED;
  666. }
  667. GELOGI("The batch label of max batch virtual nodes is %s.", max_batch_label.c_str());
  668. // Assign memory of max batch nodes that have the same batch label.
  669. GELOGD("Start to reassign memory for max batch virtual nodes, memory offset = %zu.", memory_offset_[0].mem_offset_);
  670. vector<size_t> nodes_mem_offset_list;
  671. for (auto &i_map : mem_reuse_nodes_map) {
  672. size_t max_batch_node_mem_offset = memory_offset_[0].mem_offset_;
  673. nodes_mem_offset_list.emplace_back(max_batch_node_mem_offset);
  674. vector<NodePtr> virtual_nodes_list = i_map.second;
  675. for (auto &i_node : virtual_nodes_list) {
  676. // Op_desc is not nullptr, it has been checked.
  677. OpDescPtr op_desc = i_node->GetOpDesc();
  678. string batch_label_string;
  679. // All ops must have ATTR_NAME_BATCH_LABEL, no need to check return value.
  680. (void)ge::AttrUtils::GetStr(op_desc, ATTR_NAME_BATCH_LABEL, batch_label_string);
  681. if (batch_label_string == max_batch_label) {
  682. Status status = SUCCESS;
  683. if (mem_reuse_model == kVirtualInputNodeMemoryReuse) {
  684. status = ReAssignVirtualInputNodeMemory(i_node, max_batch_node_mem_offset);
  685. } else if (mem_reuse_model == kVirtualOutputNodeMemoryReuse) {
  686. status = ReAssignVirtualOutputNodeMemory(i_node, max_batch_node_mem_offset);
  687. } else {
  688. GELOGE(FAILED, "Invalid parameter memory reuse model, which is: %d.", mem_reuse_model);
  689. return FAILED;
  690. }
  691. if (status != SUCCESS) {
  692. GELOGE(FAILED, "Reassign memory of virtual node failed, node name: %s.", i_node->GetName().c_str());
  693. return FAILED;
  694. }
  695. memory_offset_[0].mem_offset_ = max_batch_node_mem_offset;
  696. AlignMemOffset(MEM_ALIGN_SIZE);
  697. GELOGD("After reassign memory for virtual node, align memory = %zu.", memory_offset_[0].mem_offset_);
  698. // Only assign memory of max batch nodes.
  699. break;
  700. }
  701. }
  702. }
  703. // Assign memory of remaining nodes that have the same fixed_name.
  704. GELOGD("Start to reassign memory for remaining batch virtual nodes, memory offset = %zu.",
  705. memory_offset_[0].mem_offset_);
  706. size_t memory_reuse_index = 0;
  707. for (auto &i_map : mem_reuse_nodes_map) {
  708. vector<NodePtr> virtual_nodes_list = i_map.second;
  709. for (auto &i_node : virtual_nodes_list) {
  710. size_t remaining_batch_node_mem_offset = nodes_mem_offset_list[memory_reuse_index];
  711. Status status = SUCCESS;
  712. if (mem_reuse_model == kVirtualInputNodeMemoryReuse) {
  713. status = ReAssignVirtualInputNodeMemory(i_node, remaining_batch_node_mem_offset);
  714. } else if (mem_reuse_model == kVirtualOutputNodeMemoryReuse) {
  715. status = ReAssignVirtualOutputNodeMemory(i_node, remaining_batch_node_mem_offset);
  716. } else {
  717. GELOGE(FAILED, "Invalid parameter memory reuse model, which is: %d.", mem_reuse_model);
  718. return FAILED;
  719. }
  720. if (status != SUCCESS) {
  721. GELOGE(FAILED, "Reassign memory of virtual node failed, node name: %s.", i_node->GetName().c_str());
  722. return FAILED;
  723. }
  724. }
  725. memory_reuse_index++;
  726. }
  727. return SUCCESS;
  728. }
  729. Status GraphMemoryAssigner::ReAssignAtomicMemory(bool is_loop_graph) {
  730. GE_CHECK_NOTNULL(compute_graph_);
  731. // Atomic op memory start addr
  732. int64_t atomic_mem_start = static_cast<int64_t>(memory_offset_[0].mem_offset_);
  733. GELOGI("Begin to reAssign atomic memory, atomic initial address mem_offset = %zu!", memory_offset_[0].mem_offset_);
  734. vector<NodePtr> connect_netoutput_nodes;
  735. for (auto &node : compute_graph_->GetAllNodes()) {
  736. auto node_op_desc = node->GetOpDesc();
  737. if (node_op_desc == nullptr) {
  738. continue;
  739. }
  740. bool is_atomic = false;
  741. // If GetBool fail, is_atomic is false.
  742. (void)ge::AttrUtils::GetBool(node_op_desc, ATOMIC_ATTR_IS_ATOMIC_NODE, is_atomic);
  743. if (!is_atomic) {
  744. continue;
  745. }
  746. bool is_ref = false;
  747. // If GetBool fail, is_ref is false.
  748. (void)ge::AttrUtils::GetBool(node_op_desc, ATTR_NAME_REFERENCE, is_ref);
  749. if (is_ref) {
  750. GELOGE(ge::PARAM_INVALID, "The node %s cannot have both atomic and ref attribute.",
  751. node_op_desc->GetName().c_str());
  752. return ge::PARAM_INVALID;
  753. }
  754. vector<int> is_connect_netoutput;
  755. // If GetBool fail, attr is_connect_netoutput is an empty vector.
  756. (void)ge::AttrUtils::GetListInt(node_op_desc, ATTR_NAME_NODE_CONNECT_OUTPUT, is_connect_netoutput);
  757. if (!is_connect_netoutput.empty()) {
  758. connect_netoutput_nodes.emplace_back(node);
  759. continue;
  760. }
  761. // Atomic op memory start addr of loop graph
  762. int64_t loop_graph_atomic_mem_start = static_cast<int64_t>(memory_offset_[0].mem_offset_);
  763. vector<int64_t> mem_offset_end;
  764. if (AssignAtomicOutputAndWorkspaceMemory(node, mem_offset_end) != SUCCESS) {
  765. GELOGE(FAILED, "Assign atomic output and workspace memory failed, node is %s.", node->GetName().c_str());
  766. return FAILED;
  767. }
  768. /// In networks with loop op, atomic op uses atomic_addr_clean op independently,
  769. /// so we need to set the attr separately.
  770. if (is_loop_graph) {
  771. GE_CHK_STATUS_RET(SetLoopGraphAtomicAttr(node, loop_graph_atomic_mem_start));
  772. }
  773. }
  774. // In networks without loop op, the same atomic addr clean op is used for atomic op
  775. if (!is_loop_graph) {
  776. // Set the address attr of atomic clean operator
  777. int64_t atomic_mem_size = memory_offset_[0].mem_offset_ - atomic_mem_start;
  778. if (atomic_mem_size != 0) {
  779. GE_CHK_STATUS_RET(SetAtomicCleanAttr(nullptr, {atomic_mem_start}, {atomic_mem_size}),
  780. "SetAtomicCleanAttr failed.");
  781. }
  782. }
  783. if (AssignConnectNetOutputAtomicMemory(connect_netoutput_nodes) != SUCCESS) {
  784. GELOGE(FAILED, "Failed to assign memory of nodes that connect to netoutput.");
  785. return FAILED;
  786. }
  787. return SUCCESS;
  788. }
  789. Status GraphMemoryAssigner::AssignAtomicOutputAndWorkspaceMemory(const ge::NodePtr &node,
  790. vector<int64_t> &mem_offset_end) {
  791. auto node_op_desc = node->GetOpDesc();
  792. // Assign atomic node output memory
  793. Status ret = AssignAtomicOutputMemory(node, mem_offset_end);
  794. if (ret != SUCCESS) {
  795. GELOGE(ret, "Failed to assign atomic output memory, node is %s.", node_op_desc->GetName().c_str());
  796. return ret;
  797. }
  798. // Check and assign atomic node workspace memory
  799. map<string, map<int64_t, int64_t>> atomic_workspace_info;
  800. atomic_workspace_info = node_op_desc->TryGetExtAttr(EXT_ATTR_ATOMIC_WORKSPACE_INFO, atomic_workspace_info);
  801. if (!atomic_workspace_info.empty()) {
  802. bool is_fusion_node = false;
  803. // If GetBool fail, is_fusion_node is false.
  804. (void)ge::AttrUtils::GetBool(node_op_desc, ATOMIC_ATTR_IS_FUSION_NODE, is_fusion_node);
  805. if (is_fusion_node) {
  806. // Assign fusion atomic node workspace memory
  807. ret = AssignFusionAtomicWorkspaceMemory(node_op_desc, atomic_workspace_info, mem_offset_end);
  808. } else {
  809. // Assign single ordinary atomic node workspace memory, not include fusion node
  810. ret = AssignOrdinaryAtomicWorkspaceMemory(node_op_desc, atomic_workspace_info, mem_offset_end);
  811. }
  812. if (ret != SUCCESS) {
  813. GELOGE(ret, "Assign atomic workspace memory failed, node is %s.", node_op_desc->GetName().c_str());
  814. return ret;
  815. }
  816. }
  817. return SUCCESS;
  818. }
  819. Status GraphMemoryAssigner::AssignConnectNetOutputAtomicMemory(vector<NodePtr> &connect_netoutput_nodes) {
  820. for (auto &node : connect_netoutput_nodes) {
  821. GE_CHECK_NOTNULL(node);
  822. if (node->GetOpDesc() == nullptr) {
  823. GELOGW("Current node %s op desc is nullptr, memory assignment is skipped.", node->GetName().c_str());
  824. continue;
  825. }
  826. // Atomic memory start addr
  827. int64_t original_atomic_mem_start = static_cast<int64_t>(memory_offset_[0].mem_offset_);
  828. GELOGD("Start to assign memory of atomic node, node name: %s, node type: %s, mem_offset: %ld.",
  829. node->GetName().c_str(), node->GetOpDesc()->GetType().c_str(), original_atomic_mem_start);
  830. vector<int64_t> mem_offset_end;
  831. if (AssignAtomicOutputAndWorkspaceMemory(node, mem_offset_end) != SUCCESS) {
  832. GELOGE(FAILED, "Assign atomic output and workspace memory failed, node is %s.", node->GetName().c_str());
  833. return FAILED;
  834. }
  835. // All atomic nodes use atomic_addr_clean op independently, so we need to set the attr separately.
  836. if (SetIndependentAtomicAttr(node, original_atomic_mem_start, mem_offset_end) != SUCCESS) {
  837. GELOGE(FAILED, "Failed to set atomic attr separately.");
  838. return FAILED;
  839. }
  840. }
  841. return SUCCESS;
  842. }
  843. Status GraphMemoryAssigner::AssignReferenceMemory() {
  844. for (auto &node : compute_graph_->GetDirectNode()) {
  845. // Get the reference type of the node, default is false
  846. bool is_ref = false;
  847. // If GetBool fail, is_ref is false.
  848. (void)ge::AttrUtils::GetBool(node->GetOpDesc(), ATTR_NAME_REFERENCE, is_ref);
  849. if (!is_ref) {
  850. continue;
  851. }
  852. GELOGI("Current node %s needs to support the reference relationship between output and input.",
  853. node->GetName().c_str());
  854. auto out_op_desc = node->GetOpDesc();
  855. GE_IF_BOOL_EXEC(out_op_desc == nullptr, GELOGE(ge::FAILED, "out_op_desc is null."); return ge::FAILED);
  856. vector<int64_t> output_list = out_op_desc->GetOutputOffset();
  857. if (out_op_desc->GetOutputsSize() > output_list.size()) {
  858. GELOGE(ge::FAILED, "The size %zu of node output desc is more than output_list's size %zu.",
  859. out_op_desc->GetOutputsSize(), output_list.size());
  860. return ge::FAILED;
  861. }
  862. map<string, int> input_name_index;
  863. for (const auto &input_name : out_op_desc->GetAllInputNames()) {
  864. int index = out_op_desc->GetInputIndexByName(input_name);
  865. input_name_index.emplace(input_name, index);
  866. }
  867. for (auto &out_data_anchor : node->GetAllOutDataAnchors()) {
  868. string out_data_anchor_name = out_op_desc->GetOutputNameByIndex(out_data_anchor->GetIdx());
  869. auto iter = input_name_index.find(out_data_anchor_name);
  870. if (iter != input_name_index.end()) {
  871. int index = iter->second;
  872. GELOGI("Reference memory: input anchor index = %d, input anchor name = %s, output anchor name = %s.", index,
  873. iter->first.c_str(), out_data_anchor_name.c_str());
  874. GE_CHECK_NOTNULL(node->GetInDataAnchor(index));
  875. auto peer_out_anchor = node->GetInDataAnchor(index)->GetPeerOutAnchor();
  876. GE_IF_BOOL_EXEC(peer_out_anchor == nullptr, continue);
  877. int peer_out_anchor_index = peer_out_anchor->GetIdx();
  878. auto peer_out_node = peer_out_anchor->GetOwnerNode();
  879. auto peer_out_op_desc = peer_out_node->GetOpDesc();
  880. GE_CHECK_NOTNULL(peer_out_op_desc);
  881. output_list[out_data_anchor->GetIdx()] = peer_out_op_desc->GetOutputOffset()[peer_out_anchor_index];
  882. GELOGI("Reference output : Set %s name[%s] output[%d] offset to [%ld] stream_id[%ld]",
  883. node->GetOwnerComputeGraph()->GetName().c_str(), peer_out_op_desc->GetName().c_str(),
  884. out_data_anchor->GetIdx(), output_list[out_data_anchor->GetIdx()], peer_out_op_desc->GetStreamId());
  885. } else {
  886. GELOGI("Reference output : origin %s name[%s] output[%d] offset is [%ld] stream_id[%ld]",
  887. node->GetOwnerComputeGraph()->GetName().c_str(), out_op_desc->GetName().c_str(),
  888. out_data_anchor->GetIdx(), output_list[out_data_anchor->GetIdx()], out_op_desc->GetStreamId());
  889. }
  890. }
  891. out_op_desc->SetOutputOffset(output_list);
  892. }
  893. return ge::SUCCESS;
  894. }
  895. bool GraphMemoryAssigner::CheckInputIsSupportAtomic(const ge::NodePtr &node) {
  896. for (auto &in_data_anchor : node->GetAllInDataAnchors()) {
  897. auto peer_out_data_anchor = in_data_anchor->GetPeerOutAnchor();
  898. if (peer_out_data_anchor == nullptr) {
  899. continue;
  900. }
  901. auto peer_op_desc = peer_out_data_anchor->GetOwnerNode()->GetOpDesc();
  902. if (peer_op_desc == nullptr) {
  903. continue;
  904. }
  905. if ((peer_op_desc->GetType() == CONSTANTOP) || (peer_op_desc->GetType() == AIPP_DATA_TYPE) ||
  906. (peer_op_desc->GetType() == VARIABLE)) {
  907. GELOGE(ge::FAILED,
  908. "The current node is %s, and the peer out node is %s. Currently, this scenario is not supported",
  909. node->GetName().c_str(), peer_op_desc->GetName().c_str());
  910. return false;
  911. }
  912. }
  913. return true;
  914. }
  915. Status GraphMemoryAssigner::AssignAtomicOutputMemory(const ge::NodePtr &node, vector<int64_t> &mem_offset_end) {
  916. auto op_desc = node->GetOpDesc();
  917. GE_IF_BOOL_EXEC(op_desc == nullptr, GELOGE(ge::FAILED, "op_desc is null."); return ge::FAILED);
  918. mem_offset_end.clear();
  919. GELOGD("Begin to assign atomic output memory, node = %s.", op_desc->GetName().c_str());
  920. vector<int64_t> atomic_output_index;
  921. // If GetListInt fail, atomic_output_index is empty.
  922. (void)ge::AttrUtils::GetListInt(op_desc, ATOMIC_ATTR_OUTPUT_INDEX, atomic_output_index);
  923. // Check atomic output
  924. vector<int64_t> output_list = op_desc->GetOutputOffset();
  925. if (atomic_output_index.size() > output_list.size()) {
  926. GELOGE(ge::FAILED, "The size of atomic_output_index is more than the size of output_list");
  927. return ge::FAILED;
  928. }
  929. auto output_list_size = static_cast<int64_t>(output_list.size());
  930. for (auto &output_index : atomic_output_index) {
  931. if (output_index >= output_list_size) {
  932. GELOGE(ge::PARAM_INVALID, "The output index %ld is more than the size %ld of output_list.", output_index,
  933. output_list_size);
  934. return ge::PARAM_INVALID;
  935. }
  936. // If the input of the cascade op needs to clear the atomic addr, there is no need to clear it separately here
  937. bool is_assigned_mem = false;
  938. if (GetMemoryAssignmentStatus(node, output_index, is_assigned_mem) != SUCCESS) {
  939. GELOGE(ge::FAILED, "Failed to get memory assignment of node %s.", node->GetName().c_str());
  940. return ge::FAILED;
  941. }
  942. // If you have already assigned an atomic address, skip it, and you don't need to reassign it.
  943. if (is_assigned_mem) {
  944. GELOGI(
  945. "Node %s atomic output : we have assigned atomic memory as the input of next node in "
  946. "ReAssignContinuousMemory function.",
  947. op_desc->GetName().c_str());
  948. continue;
  949. }
  950. auto output_desc = op_desc->GetAllOutputsDescPtr().at(output_index);
  951. int64_t size = 0;
  952. if (ge::TensorUtils::GetSize(*output_desc, size) != SUCCESS) {
  953. GELOGI("Get size failed");
  954. }
  955. output_list[output_index] = memory_offset_[0].mem_offset_;
  956. GELOGI("[IMAS]Atomic output : Set %s name[%s] output[%ld] offset to [%zu] stream_id[%ld] size[%ld] real_size[%ld].",
  957. compute_graph_->GetName().c_str(), op_desc->GetName().c_str(), output_index, memory_offset_[0].mem_offset_,
  958. op_desc->GetStreamId(), size, size);
  959. memory_offset_[0].mem_offset_ += size;
  960. AlignMemOffset(MEM_ALIGN_SIZE);
  961. mem_offset_end.emplace_back(memory_offset_[0].mem_offset_);
  962. }
  963. op_desc->SetOutputOffset(output_list);
  964. return ge::SUCCESS;
  965. }
  966. Status GraphMemoryAssigner::GetMemoryAssignmentStatus(const ge::NodePtr &node, int64_t output_index,
  967. bool &is_mem_assigned) {
  968. if (static_cast<size_t>(output_index) >= node->GetAllOutDataAnchors().size()) {
  969. GELOGE(ge::PARAM_INVALID, "Output index %ld is more than the size of node's AllOutDataAnchors.", output_index);
  970. return ge::PARAM_INVALID;
  971. }
  972. auto out_data_anchor = node->GetAllOutDataAnchors().at(output_index);
  973. GE_CHECK_NOTNULL(out_data_anchor);
  974. auto input_anchors = out_data_anchor->GetPeerInDataAnchors();
  975. for (auto &input_anchor : input_anchors) {
  976. auto output_node = input_anchor->GetOwnerNode();
  977. /// Get input atomic attr of peer output op, if atomic_input_index[0] = -1, indicates that the atomic address
  978. /// has been assigned
  979. vector<int64_t> atomic_input_index;
  980. (void)ge::AttrUtils::GetListInt(output_node->GetOpDesc(), ATOMIC_ATTR_INPUT_INDEX, atomic_input_index);
  981. if (!atomic_input_index.empty() && (atomic_input_index[0] == kAllInputAddrIsAtomic)) {
  982. is_mem_assigned = true;
  983. break;
  984. }
  985. }
  986. return SUCCESS;
  987. }
  988. Status GraphMemoryAssigner::AssignOrdinaryAtomicWorkspaceMemory(const ge::OpDescPtr &op_desc,
  989. map<string, map<int64_t, int64_t>> &workspace_info,
  990. vector<int64_t> &mem_offset_end) {
  991. GELOGI("Begin to reassign normal atomic memory, node = %s.", op_desc->GetName().c_str());
  992. vector<int64_t> workspace_vector = op_desc->GetWorkspace();
  993. for (auto iter = workspace_info.begin(); iter != workspace_info.end(); ++iter) {
  994. if (op_desc->GetName() != iter->first) {
  995. GELOGE(ge::PARAM_INVALID, "The node name %s and the node name %s in workspace info are inconsistent.",
  996. op_desc->GetName().c_str(), iter->first.c_str());
  997. return ge::PARAM_INVALID;
  998. }
  999. if (iter->second.empty()) {
  1000. continue;
  1001. }
  1002. for (auto &info_iter : iter->second) {
  1003. auto workspace_index = static_cast<uint64_t>(info_iter.first);
  1004. auto workspace_size = info_iter.second;
  1005. if (workspace_index >= workspace_vector.size()) {
  1006. GELOGE(ge::PARAM_INVALID, "The workspace index %lu is more than the size %zu of workspace vector.",
  1007. workspace_index, workspace_vector.size());
  1008. return ge::PARAM_INVALID;
  1009. }
  1010. workspace_vector[workspace_index] = memory_offset_[0].mem_offset_;
  1011. GELOGI(
  1012. "[IMAS]Atomic ordinary workspace : Set %s name[%s] workspace[%lu] offset to [%zu] stream_id[%ld] "
  1013. "size[%ld] real_size[%ld].",
  1014. compute_graph_->GetName().c_str(), op_desc->GetName().c_str(), workspace_index, memory_offset_[0].mem_offset_,
  1015. op_desc->GetStreamId(), workspace_size, workspace_size);
  1016. memory_offset_[0].mem_offset_ += workspace_size;
  1017. mem_offset_end.emplace_back(memory_offset_[0].mem_offset_);
  1018. }
  1019. }
  1020. op_desc->SetWorkspace(workspace_vector);
  1021. return SUCCESS;
  1022. }
  1023. Status GraphMemoryAssigner::AssignFusionAtomicWorkspaceMemory(const ge::OpDescPtr &op_desc,
  1024. map<string, map<int64_t, int64_t>> &workspace_info,
  1025. vector<int64_t> &mem_offset_end) {
  1026. GELOGI("Begin to reassign fusion atomic memory, node = %s.", op_desc->GetName().c_str());
  1027. map<string, map<int64_t, int64_t>> sub_node_workspace_offset;
  1028. for (auto &iter : workspace_info) {
  1029. if (iter.second.empty()) {
  1030. continue;
  1031. }
  1032. map<int64_t, int64_t> index_offset;
  1033. for (auto &info_iter : iter.second) {
  1034. auto workspace_index = static_cast<uint64_t>(info_iter.first);
  1035. auto workspace_size = info_iter.second;
  1036. size_t workspace_offset = memory_offset_[0].mem_offset_;
  1037. GELOGI(
  1038. "[IMAS]Atomic fusion workspace : Set %s name[%s] workspace[%lu] offset to [%zu] stream_id[%ld] size[%ld] "
  1039. "real_size[%ld].",
  1040. compute_graph_->GetName().c_str(), op_desc->GetName().c_str(), workspace_index, memory_offset_[0].mem_offset_,
  1041. op_desc->GetStreamId(), workspace_size, workspace_size);
  1042. memory_offset_[0].mem_offset_ += workspace_size;
  1043. mem_offset_end.emplace_back(memory_offset_[0].mem_offset_);
  1044. index_offset.insert(std::make_pair(workspace_index, workspace_offset));
  1045. }
  1046. sub_node_workspace_offset.insert(std::make_pair(iter.first, index_offset));
  1047. }
  1048. if (!(op_desc->SetExtAttr(EXT_ATTR_ATOMIC_WORKSPACE_OFFSET, sub_node_workspace_offset))) {
  1049. GELOGE(FAILED, "Set EXT_ATTR_ATOMIC_WORKSPACE_OFFSET failed, op name:%s.", op_desc->GetName().c_str());
  1050. return FAILED;
  1051. }
  1052. return SUCCESS;
  1053. }
  1054. Status GraphMemoryAssigner::CheckOffset() {
  1055. for (const ge::NodePtr &node : compute_graph_->GetAllNodes()) {
  1056. GE_CHECK_NOTNULL(node->GetOpDesc());
  1057. vector<int64_t> input_list = node->GetOpDesc()->GetInputOffset();
  1058. for (auto input : input_list) {
  1059. if (input == ge::kInvalidOffset) {
  1060. GELOGE(FAILED, "Invalid offset in node: %s input: %ld.", node->GetName().c_str(), ge::kInvalidOffset);
  1061. return FAILED;
  1062. }
  1063. }
  1064. vector<int64_t> output_list = node->GetOpDesc()->GetOutputOffset();
  1065. for (auto output : output_list) {
  1066. if (output == ge::kInvalidOffset) {
  1067. GELOGE(FAILED, "Invalid offset in node: %s output: %ld.", node->GetName().c_str(), ge::kInvalidOffset);
  1068. return FAILED;
  1069. }
  1070. }
  1071. vector<int64_t> workspace_list = node->GetOpDesc()->GetWorkspace();
  1072. for (auto workspace : workspace_list) {
  1073. if (workspace == ge::kInvalidOffset) {
  1074. GELOGE(FAILED, "Invalid offset in node: %s workspace: %ld.", node->GetName().c_str(), ge::kInvalidOffset);
  1075. return FAILED;
  1076. }
  1077. }
  1078. }
  1079. return SUCCESS;
  1080. }
  1081. ge::Status GraphMemoryAssigner::SetInputOffset() {
  1082. if (memory_offset_.empty()) {
  1083. GELOGE(FAILED, "memory_offset_ is empty.");
  1084. return FAILED;
  1085. }
  1086. GEEVENT("[IMAS]AfterAssignMemory : %s memoffset[%zu]", compute_graph_->GetName().c_str(),
  1087. memory_offset_[0].mem_offset_);
  1088. for (const ge::NodePtr &node : compute_graph_->GetAllNodes()) {
  1089. if (UpdateOpInputOffset(node) != ge::SUCCESS) {
  1090. GELOGE(ge::FAILED, "Update op input offset failed");
  1091. return ge::FAILED;
  1092. }
  1093. }
  1094. return ge::SUCCESS;
  1095. }
  1096. NodePtr GraphMemoryAssigner::GetKnownInputNode(const NodePtr &node) const {
  1097. if (!node->GetOpDesc()->HasAttr(ATTR_NAME_PARENT_NODE_INDEX)) {
  1098. return node;
  1099. }
  1100. if (NodeUtils::IsDynamicShape(node)) {
  1101. return node;
  1102. }
  1103. return NodeUtils::GetParentInput(node);
  1104. }
  1105. ge::Status GraphMemoryAssigner::UpdateConstArgsOffset(const NodePtr &node, vector<int64_t> &input_list) const {
  1106. uint32_t parent_index = 0;
  1107. if (!AttrUtils::GetInt(node->GetOpDesc(), ATTR_NAME_PARENT_NODE_INDEX, parent_index)) {
  1108. return SUCCESS;
  1109. }
  1110. // Subgraph Data Node, check for constant input.
  1111. std::string op_type;
  1112. const auto &in_node = NodeUtils::GetParentInput(node);
  1113. if (NodeUtils::GetConstOpType(in_node, op_type)) {
  1114. input_list = in_node->GetOpDesc()->GetOutputOffset();
  1115. node->GetOpDesc()->SetOutputOffset(input_list); // Set Data output same as const output.
  1116. return SUCCESS; // Constant input.
  1117. }
  1118. // Memory allocated for dynamic shape subgraph Data.
  1119. if (NodeUtils::IsDynamicShape(node)) {
  1120. return SUCCESS;
  1121. }
  1122. const auto &owner = node->GetOwnerComputeGraph();
  1123. const auto &parent_desc = owner->GetParentNode()->GetOpDesc();
  1124. const auto parent_inputs = parent_desc->GetInputOffset();
  1125. if (parent_inputs.size() <= parent_index) {
  1126. GELOGE(FAILED, "Get Parent input offset failed, node: %s, input size: %zu, parent index: %u",
  1127. node->GetName().c_str(), parent_inputs.size(), parent_index);
  1128. return FAILED;
  1129. }
  1130. input_list = {parent_inputs[parent_index]};
  1131. node->GetOpDesc()->SetOutputOffset(input_list); // Set Data output same as parent input.
  1132. return SUCCESS;
  1133. }
  1134. ge::Status GraphMemoryAssigner::UpdateOpInputOffset(const NodePtr &node, vector<int64_t> &input_list) const {
  1135. vector<int64_t> origin_input_list;
  1136. vector<int64_t> memory_type;
  1137. auto tmp_op_desc = node->GetOpDesc();
  1138. origin_input_list = tmp_op_desc->GetInputOffset();
  1139. bool has_mem_type_attr = ge::AttrUtils::GetListInt(tmp_op_desc, ATTR_NAME_INPUT_MEM_TYPE_LIST, memory_type);
  1140. for (const auto &anchor : node->GetAllInDataAnchors()) {
  1141. vector<int64_t> output_list;
  1142. auto peer_out_anchor = anchor->GetPeerOutAnchor();
  1143. if (peer_out_anchor == nullptr) {
  1144. continue;
  1145. }
  1146. // If the current node not broadcast, the OutputOffset of the previous node is used to update the input_list
  1147. auto last_peer_out_node = peer_out_anchor->GetOwnerNode();
  1148. auto last_peer_out_op_desc = last_peer_out_node->GetOpDesc();
  1149. GE_CHECK_NOTNULL(last_peer_out_op_desc);
  1150. output_list = last_peer_out_op_desc->GetOutputOffset();
  1151. if (output_list.size() > static_cast<size_t>(peer_out_anchor->GetIdx())) {
  1152. auto input_index = anchor->GetIdx();
  1153. if (has_mem_type_attr) {
  1154. auto input_size = tmp_op_desc->GetInputsSize();
  1155. auto ori_input_offset_list_size = origin_input_list.size();
  1156. auto mem_type_size = memory_type.size();
  1157. if ((input_size != mem_type_size) || (input_size != ori_input_offset_list_size)) {
  1158. GELOGE(ge::FAILED,
  1159. "fusion: node[%s] input_size[%zu] diff from memory_type_size[%zu]"
  1160. " from ori_input_offset_list_size[%lu]",
  1161. tmp_op_desc->GetName().c_str(), input_size, mem_type_size, ori_input_offset_list_size);
  1162. return ge::FAILED;
  1163. }
  1164. // not hbm keep orignal inputoffest
  1165. // hbm inputoffset = original inputoffset + outputoffset
  1166. input_list.emplace_back(memory_type[input_index] == RT_MEMORY_L1
  1167. ? origin_input_list[input_index]
  1168. : origin_input_list[input_index] + output_list.at(peer_out_anchor->GetIdx()));
  1169. GELOGI("fuison: node[%s] input[%d] is set from node[%s] out index[%d] offset[%ld]",
  1170. tmp_op_desc->GetName().c_str(), input_index,
  1171. peer_out_anchor->GetOwnerNode()->GetOpDesc()->GetName().c_str(), peer_out_anchor->GetIdx(),
  1172. input_list.back());
  1173. } else {
  1174. int64_t output_offset = output_list.at(peer_out_anchor->GetIdx());
  1175. const auto &in_node = GetKnownInputNode(peer_out_anchor->GetOwnerNode());
  1176. if (in_node->GetType() == CONSTANT) {
  1177. GeTensorDesc tensor_desc = tmp_op_desc->GetInputDesc(input_index);
  1178. GE_CHK_STATUS(TensorUtils::GetDataOffset(tensor_desc, output_offset));
  1179. }
  1180. GELOGI("node[%s] input[%d] is set from node[%s] out index[%d] offset[%ld]", tmp_op_desc->GetName().c_str(),
  1181. input_index, peer_out_anchor->GetOwnerNode()->GetOpDesc()->GetName().c_str(), peer_out_anchor->GetIdx(),
  1182. output_offset);
  1183. input_list.emplace_back(output_offset);
  1184. }
  1185. }
  1186. }
  1187. return ge::SUCCESS;
  1188. }
  1189. ge::Status GraphMemoryAssigner::UpdateOpInputOffset(const NodePtr &node) const {
  1190. GE_CHECK_NOTNULL(node->GetOpDesc());
  1191. vector<int64_t> input_list;
  1192. if (node->GetType() == HCOMBROADCAST || node->GetType() == HVDCALLBACKBROADCAST) {
  1193. for (const auto &anchor : node->GetAllInDataAnchors()) {
  1194. vector<int64_t> output_list;
  1195. auto peer_out_anchor = anchor->GetPeerOutAnchor();
  1196. if (peer_out_anchor == nullptr) {
  1197. continue;
  1198. }
  1199. auto last_peer_out_node = peer_out_anchor->GetOwnerNode();
  1200. // If the current node is broadcast and the preceding node is variable, because InputOffset has been set
  1201. // in function:AssignVarAttr2Nodes, then the InputOffset of the broadcast node is taken to update the input_list.
  1202. // Otherwise, the OutputOffset of the previous node is used to update the input_list.
  1203. if (last_peer_out_node->GetType() != VARIABLE) {
  1204. auto last_peer_out_op_desc = last_peer_out_node->GetOpDesc();
  1205. GE_CHECK_NOTNULL(last_peer_out_op_desc);
  1206. output_list = last_peer_out_op_desc->GetOutputOffset();
  1207. if (output_list.size() > static_cast<size_t>(peer_out_anchor->GetIdx())) {
  1208. input_list.emplace_back(output_list.at(peer_out_anchor->GetIdx()));
  1209. }
  1210. } else {
  1211. vector<int64_t> cur_node_input_list;
  1212. auto cur_node_op_desc = node->GetOpDesc();
  1213. GE_CHECK_NOTNULL(cur_node_op_desc);
  1214. cur_node_input_list = cur_node_op_desc->GetInputOffset();
  1215. if (cur_node_input_list.size() > static_cast<size_t>(anchor->GetIdx())) {
  1216. input_list.emplace_back(cur_node_input_list.at(anchor->GetIdx()));
  1217. }
  1218. }
  1219. }
  1220. } else if (node->GetType() == DATA_TYPE) {
  1221. if (UpdateConstArgsOffset(node, input_list) != SUCCESS) {
  1222. GELOGE(FAILED, "Update data: %s args offset failed.", node->GetName().c_str());
  1223. return FAILED;
  1224. }
  1225. } else {
  1226. if (UpdateOpInputOffset(node, input_list) != SUCCESS) {
  1227. GELOGE(FAILED, "Update node: %s input offset failed.", node->GetName().c_str());
  1228. return FAILED;
  1229. }
  1230. }
  1231. node->GetOpDesc()->SetInputOffset(input_list);
  1232. return SUCCESS;
  1233. }
  1234. Status GraphMemoryAssigner::SetIndependentAtomicAttr(const ge::NodePtr &node, int64_t atomic_mem_start,
  1235. const vector<int64_t> &mem_offset_end) {
  1236. GELOGD("Start to set independent atomic attr, atomic_addr_clean memory offset start is %ld", atomic_mem_start);
  1237. // Parsing offset and size vectors
  1238. vector<int64_t> memory_offset_start;
  1239. vector<int64_t> memory_offset_size;
  1240. memory_offset_start.emplace_back(atomic_mem_start);
  1241. for (size_t i = 0; i < mem_offset_end.size(); ++i) {
  1242. memory_offset_start.emplace_back(mem_offset_end[i]);
  1243. // Number 1 means element index
  1244. auto size = memory_offset_start[i + 1] - memory_offset_start[i];
  1245. memory_offset_size.emplace_back(size);
  1246. }
  1247. memory_offset_start.pop_back();
  1248. const auto &in_control_anchor = node->GetInControlAnchor();
  1249. if (!memory_offset_size.empty() && in_control_anchor != nullptr) {
  1250. for (auto &peer_out_control_anchor : in_control_anchor->GetPeerOutControlAnchors()) {
  1251. if (peer_out_control_anchor == nullptr) {
  1252. continue;
  1253. }
  1254. auto peer_out_node = peer_out_control_anchor->GetOwnerNode();
  1255. auto peer_out_node_desc = peer_out_node->GetOpDesc();
  1256. if (peer_out_node_desc == nullptr) {
  1257. continue;
  1258. }
  1259. GELOGD("Current node memory_offset vector size is %zu, node name %s, node type is %s.", memory_offset_size.size(),
  1260. peer_out_node_desc->GetName().c_str(), peer_out_node_desc->GetType().c_str());
  1261. if (peer_out_node_desc->GetType() == ATOMICADDRCLEAN) {
  1262. if (SetAtomicCleanAttr(peer_out_node, memory_offset_start, memory_offset_size) != SUCCESS) {
  1263. GELOGE(FAILED, "Set atomic clean attr failed.");
  1264. return FAILED;
  1265. }
  1266. }
  1267. }
  1268. }
  1269. return SUCCESS;
  1270. }
  1271. Status GraphMemoryAssigner::SetLoopGraphAtomicAttr(const ge::NodePtr &node, int64_t atomic_mem_start) {
  1272. // set the address attr of atomic clean operator for loop graph
  1273. int64_t atomic_mem_size = memory_offset_[0].mem_offset_ - atomic_mem_start;
  1274. GELOGI("SetLoopGraphAtomicAttr beign, atomic_addr_clean start size is %ld, mem_size is %ld, mem_offset is %zu.",
  1275. atomic_mem_start, atomic_mem_size, memory_offset_[0].mem_offset_);
  1276. const auto &in_control_anchor = node->GetInControlAnchor();
  1277. if (atomic_mem_size != 0 && in_control_anchor != nullptr) {
  1278. for (auto &peer_out_control_anchor : in_control_anchor->GetPeerOutControlAnchors()) {
  1279. if (peer_out_control_anchor == nullptr) {
  1280. continue;
  1281. }
  1282. auto peer_out_node = peer_out_control_anchor->GetOwnerNode();
  1283. auto peer_out_node_desc = peer_out_node->GetOpDesc();
  1284. if (peer_out_node_desc == nullptr) {
  1285. continue;
  1286. }
  1287. GELOGD("SetLoopGraphAtomicAttr, node is %s, op type is %s.", peer_out_node_desc->GetName().c_str(),
  1288. peer_out_node_desc->GetType().c_str());
  1289. if (peer_out_node_desc->GetType() == ATOMICADDRCLEAN) {
  1290. GE_CHK_STATUS_EXEC(SetAtomicCleanAttr(peer_out_node, {atomic_mem_start}, {atomic_mem_size}),
  1291. GELOGE(FAILED, "SetAtomicCleanAttr failed.");
  1292. return FAILED);
  1293. }
  1294. }
  1295. }
  1296. return SUCCESS;
  1297. }
  1298. ge::Status GraphMemoryAssigner::SetAtomicCleanAttr(const NodePtr &n, const vector<int64_t> &atomic_mem_start,
  1299. const vector<int64_t> &atomic_mem_size) {
  1300. for (ge::NodePtr &node : compute_graph_->GetAllNodes()) {
  1301. auto node_op_desc = node->GetOpDesc();
  1302. GE_IF_BOOL_EXEC(node_op_desc == nullptr, continue);
  1303. if (((n != nullptr) && (node->GetName() == n->GetName())) ||
  1304. ((n == nullptr) && (node_op_desc->GetType() == ATOMICADDRCLEAN))) {
  1305. vector<int64_t> workspace_vector = node_op_desc->GetWorkspace();
  1306. vector<int64_t> workspace_byte_vector = node_op_desc->GetWorkspaceBytes();
  1307. workspace_vector.insert(workspace_vector.end(), atomic_mem_start.begin(), atomic_mem_start.end());
  1308. workspace_byte_vector.insert(workspace_byte_vector.end(), atomic_mem_size.begin(), atomic_mem_size.end());
  1309. node_op_desc->SetWorkspace(workspace_vector);
  1310. node_op_desc->SetWorkspaceBytes(workspace_byte_vector);
  1311. std::vector<int64_t> mem_start_vector;
  1312. // If GetListInt fail, mem_start_vector is empty.
  1313. (void)ge::AttrUtils::GetListInt(node_op_desc, ATTR_NAME_AUTOMIC_ADD_START, mem_start_vector);
  1314. mem_start_vector.insert(mem_start_vector.end(), atomic_mem_start.begin(), atomic_mem_start.end());
  1315. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListInt(node_op_desc, ATTR_NAME_AUTOMIC_ADD_START, mem_start_vector),
  1316. GELOGE(FAILED, "SetListInt failed.");
  1317. return FAILED);
  1318. std::vector<int64_t> mem_size_vector;
  1319. // If GetListInt fail, mem_size_vector is empty.
  1320. (void)ge::AttrUtils::GetListInt(node_op_desc, ATTR_NAME_AUTOMIC_ADD_MEM_SIZE, mem_size_vector);
  1321. mem_size_vector.insert(mem_size_vector.end(), atomic_mem_size.begin(), atomic_mem_size.end());
  1322. GE_CHK_BOOL_EXEC(ge::AttrUtils::SetListInt(node_op_desc, ATTR_NAME_AUTOMIC_ADD_MEM_SIZE, mem_size_vector),
  1323. GELOGE(FAILED, "SetListInt failed.");
  1324. return FAILED);
  1325. std::stringstream ss;
  1326. for (auto iter : atomic_mem_start) {
  1327. ss << iter << " ";
  1328. }
  1329. string atomic_mem_start_str = ss.str();
  1330. ss.clear();
  1331. ss.str("");
  1332. for (auto iter : atomic_mem_size) {
  1333. ss << iter << " ";
  1334. }
  1335. string atomic_mem_size_str = ss.str();
  1336. GELOGI("[IMAS]SetAtomicCleanAttr : Set graph[%s] atomic_node[%s] output offset [%s] size[%s] streamid[%ld]",
  1337. node->GetOwnerComputeGraph()->GetName().c_str(), node_op_desc->GetName().c_str(),
  1338. atomic_mem_start_str.c_str(), atomic_mem_size_str.c_str(), node->GetOpDesc()->GetStreamId());
  1339. }
  1340. }
  1341. return SUCCESS;
  1342. }
  1343. void GraphMemoryAssigner::AlignMemOffset(const int64_t &mem_align_size) {
  1344. if (mem_align_size <= 0) {
  1345. return;
  1346. }
  1347. memory_offset_[0].mem_offset_ =
  1348. (memory_offset_[0].mem_offset_ + mem_align_size - 1) / mem_align_size * mem_align_size;
  1349. }
  1350. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示