You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

var_mem_assign_util.cc 17 kB

5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/build/memory/var_mem_assign_util.h"
  17. #include <vector>
  18. #include "common/types.h"
  19. #include "framework/common/debug/ge_log.h"
  20. #include "graph/common/transop_util.h"
  21. #include "graph/debug/ge_attr_define.h"
  22. #include "graph/manager/graph_mem_allocator.h"
  23. #include "graph/manager/graph_var_manager.h"
  24. #include "graph/tensor.h"
  25. #include "graph/types.h"
  26. #include "graph/utils/attr_utils.h"
  27. #include "graph/utils/graph_utils.h"
  28. #include "graph/utils/tensor_utils.h"
  29. using std::string;
  30. using std::vector;
  31. namespace ge {
  32. Status VarMemAssignUtil::AssignVarMemory(ge::ComputeGraphPtr &compute_graph) {
  33. GE_CHK_STATUS_RET(AssignMemory2VariableNode(compute_graph));
  34. GE_CHK_STATUS_RET(AssignMemory2HasRefAttrNode(compute_graph));
  35. return SUCCESS;
  36. }
  37. Status VarMemAssignUtil::AssignConstantOpMemory(ge::ComputeGraphPtr &compute_graph) {
  38. return AssignStaticMemory2Node(compute_graph);
  39. }
  40. Status VarMemAssignUtil::AssignMemory2VariableNode(ge::ComputeGraphPtr &compute_graph) {
  41. return AssignStaticMemory2Node(compute_graph);
  42. }
  43. Status VarMemAssignUtil::AssignStaticMemory2Node(ge::ComputeGraphPtr &compute_graph) {
  44. GE_IF_BOOL_EXEC(compute_graph == nullptr, return FAILED);
  45. for (const ge::NodePtr &n : compute_graph->GetAllNodes()) {
  46. GE_IF_BOOL_EXEC((n->GetType() != VARIABLE) && (n->GetType() != CONSTANTOP), continue);
  47. string ref_var_src_var_name;
  48. GE_CHECK_NOTNULL(n->GetOpDesc());
  49. GE_IF_BOOL_EXEC(ge::AttrUtils::GetStr(n->GetOpDesc(), REF_VAR_SRC_VAR_NAME, ref_var_src_var_name), continue);
  50. string node_name = n->GetName();
  51. GE_IF_BOOL_EXEC(n->GetOpDesc()->GetAllOutputsDesc().empty(),
  52. GELOGE(FAILED, "node:%s has no OutputDesc.", n->GetName().c_str());
  53. return FAILED);
  54. ge::ConstGeTensorDescPtr tensor_desc = n->GetOpDesc()->GetOutputDescPtr(0);
  55. GE_CHECK_NOTNULL(tensor_desc);
  56. rtMemType_t memory_type = RT_MEMORY_HBM;
  57. uint32_t mem_type = 0;
  58. if (AttrUtils::GetInt(n->GetOpDesc(), ATTR_OUTPUT_MEMORY_TYPE, mem_type) && (mem_type == 1)) {
  59. memory_type = RT_MEMORY_RDMA_HBM;
  60. }
  61. if (!VarManager::Instance(compute_graph->GetSessionID())->IsVarExist(node_name, *tensor_desc)) {
  62. GE_CHK_STATUS_RET(
  63. VarManager::Instance(compute_graph->GetSessionID())->AssignVarMem(node_name, *tensor_desc, memory_type));
  64. GE_IF_BOOL_EXEC(n->GetType() == VARIABLE,
  65. GE_CHK_STATUS_RET(AssignData2Fp32Var(n, compute_graph->GetSessionID())));
  66. GE_CHK_STATUS_RET(VarManager::Instance(compute_graph->GetSessionID())
  67. ->SetAllocatedGraphId(node_name, compute_graph->GetGraphID()));
  68. }
  69. uint8_t *dev_ptr = nullptr;
  70. GE_CHK_STATUS_RET(VarManager::Instance(compute_graph->GetSessionID())
  71. ->GetVarAddr(node_name, *tensor_desc, &dev_ptr, memory_type));
  72. vector<int64_t> output_list = n->GetOpDesc()->GetOutputOffset();
  73. GE_IF_BOOL_EXEC(output_list.empty(), return FAILED);
  74. output_list[0] = static_cast<int64_t>(reinterpret_cast<intptr_t>(dev_ptr));
  75. n->GetOpDesc()->SetOutputOffset(output_list);
  76. }
  77. return SUCCESS;
  78. }
  79. Status VarMemAssignUtil::AssignData2Fp32Var(const ge::NodePtr &node, uint64_t session_id) {
  80. string src_var_name;
  81. GE_CHECK_NOTNULL(node->GetOpDesc());
  82. if (ge::AttrUtils::GetStr(node->GetOpDesc(), VAR_ATTR_SRC_VAR_NAME, src_var_name)) {
  83. ge::GeTensorDesc cur_tensor_desc;
  84. uint8_t *dev_ptr = nullptr;
  85. rtMemType_t memory_type = RT_MEMORY_HBM;
  86. GE_CHK_STATUS_RET(VarManager::Instance(session_id)->GetCurVarDesc(src_var_name, cur_tensor_desc));
  87. GE_CHK_STATUS_RET(
  88. VarManager::Instance(session_id)->GetVarAddr(src_var_name, cur_tensor_desc, &dev_ptr, memory_type));
  89. GE_CHK_STATUS_RET(
  90. VarManager::Instance(session_id)->SetVarAddr(node->GetName(), cur_tensor_desc, dev_ptr, memory_type));
  91. }
  92. return SUCCESS;
  93. }
  94. Status VarMemAssignUtil::AssignVarAttr2Nodes(ge::ComputeGraphPtr &compute_graph) {
  95. for (const ge::NodePtr &node : compute_graph->GetAllNodes()) {
  96. GE_IF_BOOL_EXEC(node->GetType() != VARIABLE, continue);
  97. string ref_var_src_var_name;
  98. GE_CHECK_NOTNULL(node->GetOpDesc());
  99. GE_IF_BOOL_EXEC(ge::AttrUtils::GetStr(node->GetOpDesc(), REF_VAR_SRC_VAR_NAME, ref_var_src_var_name), continue);
  100. GE_CHK_STATUS_RET(DealVariableNode(compute_graph->GetGraphID(), node, compute_graph->GetSessionID()));
  101. }
  102. return SUCCESS;
  103. }
  104. Status VarMemAssignUtil::SetOutVariableAttr(const ge::NodePtr &node, const ge::NodePtr &var_node, int index,
  105. uint64_t session_id) {
  106. vector<int64_t> output_list;
  107. uint8_t *dev_ptr = nullptr;
  108. GE_CHECK_NOTNULL(node->GetOpDesc());
  109. output_list = node->GetOpDesc()->GetOutputOffset();
  110. if (output_list.empty()) {
  111. GELOGE(PARAM_INVALID, "Output_list is empty");
  112. return PARAM_INVALID;
  113. }
  114. GE_CHECK_NOTNULL(var_node->GetOpDesc());
  115. GeTensorDesc var_tensor_desc = var_node->GetOpDesc()->GetOutputDesc(0);
  116. rtMemType_t memory_type = RT_MEMORY_HBM;
  117. GE_CHK_STATUS_RET(
  118. VarManager::Instance(session_id)->GetVarAddr(var_node->GetName(), var_tensor_desc, &dev_ptr, memory_type));
  119. int out_list_size = static_cast<int>(output_list.size());
  120. GE_CHK_BOOL_RET_STATUS(index < out_list_size, FAILED, "index %d >= output_list.size() %d", index, out_list_size);
  121. output_list[index] = static_cast<int64_t>(reinterpret_cast<intptr_t>(dev_ptr));
  122. GELOGI("Assign node outputOffset[index] is: %ld", output_list[index]);
  123. node->GetOpDesc()->SetOutputOffset(output_list);
  124. return SUCCESS;
  125. }
  126. Status VarMemAssignUtil::DealExportVariableNode(const ge::NodePtr &node, const ge::NodePtr &var_node,
  127. uint64_t session_id) {
  128. ge::OutDataAnchorPtr var_out_anchor = node->GetOutDataAnchor(0);
  129. GE_IF_BOOL_EXEC(var_out_anchor == nullptr, return FAILED);
  130. for (const ge::InDataAnchorPtr &dst_in_var_anchor : var_out_anchor->GetPeerInDataAnchors()) {
  131. ge::NodePtr dst_node = dst_in_var_anchor->GetOwnerNode();
  132. if ((dst_node->GetType() == ASSIGN) || (dst_node->GetType() == ASSIGNADD) || (dst_node->GetType() == ASSIGNSUB)) {
  133. if (dst_in_var_anchor == dst_node->GetInDataAnchor(0)) {
  134. GE_CHK_STATUS_RET(DealExportVariableNode(dst_node, var_node, session_id));
  135. }
  136. }
  137. }
  138. GE_CHK_STATUS_RET(SetOutVariableAttr(node, var_node, 0, session_id));
  139. return SUCCESS;
  140. }
  141. Status VarMemAssignUtil::DealBroadCastNode(uint32_t graph_id, const ge::NodePtr &node,
  142. const ge::InDataAnchorPtr &in_data_anchor, const ge::NodePtr &var_node,
  143. uint64_t session_id) {
  144. VarBroadCastInfo broad_cast_info;
  145. broad_cast_info.idx = in_data_anchor->GetIdx();
  146. broad_cast_info.var_name = var_node->GetName();
  147. broad_cast_info.broadcast_name = node->GetName();
  148. auto op_desc = node->GetOpDesc();
  149. GE_CHK_BOOL_RET_STATUS(op_desc != nullptr, FAILED, "Get broadcast op %s desc is nullptr", node->GetName().c_str());
  150. GE_IF_BOOL_EXEC(broad_cast_info.idx < 0,
  151. GELOGI("Broadcast input index must be positive, actual %d", broad_cast_info.idx);
  152. return INTERNAL_ERROR);
  153. auto broad_cast_index = static_cast<size_t>(broad_cast_info.idx);
  154. auto input_tensor_desc_ptr_vistor = op_desc->GetAllInputsDescPtr();
  155. GE_CHK_BOOL_RET_STATUS(input_tensor_desc_ptr_vistor.size() > broad_cast_index, FAILED,
  156. "Get broadcast op %s input tensor desc size [%zu] < idx [%d]", node->GetName().c_str(),
  157. input_tensor_desc_ptr_vistor.size(), broad_cast_info.idx);
  158. const ge::GeTensorDescPtr input_tensor_desc =
  159. input_tensor_desc_ptr_vistor.at(static_cast<size_t>(broad_cast_info.idx));
  160. int64_t input_size = 0;
  161. GE_CHK_STATUS(TensorUtils::GetSize(*input_tensor_desc, input_size), "get input size failed.");
  162. broad_cast_info.input_size = input_size;
  163. vector<int64_t> output_list = op_desc->GetOutputOffset();
  164. GE_CHK_BOOL_RET_STATUS(output_list.size() > broad_cast_index, FAILED,
  165. "Get broadcast op %s output_list size [%zu] < idx [%d]", node->GetName().c_str(),
  166. output_list.size(), broad_cast_info.idx);
  167. broad_cast_info.input_offset = output_list[broad_cast_info.idx];
  168. broad_cast_info.output_offset = output_list[broad_cast_info.idx];
  169. op_desc->SetInputOffset(output_list);
  170. auto output_tensor_desc_ptr_vistor = op_desc->GetAllOutputsDescPtr();
  171. GE_CHK_BOOL_RET_STATUS(output_tensor_desc_ptr_vistor.size() > broad_cast_index, FAILED,
  172. "Get broadcast op %s output tensor desc size [%zu] < idx [%d]", node->GetName().c_str(),
  173. output_tensor_desc_ptr_vistor.size(), broad_cast_info.idx);
  174. const ge::GeTensorDescPtr output_tensor_desc =
  175. output_tensor_desc_ptr_vistor.at(static_cast<size_t>(broad_cast_info.idx));
  176. int64_t output_size = 0;
  177. GE_CHK_STATUS(TensorUtils::GetSize(*output_tensor_desc, output_size), "get input size failed.");
  178. broad_cast_info.output_size = output_size;
  179. GE_CHK_BOOL_RET_STATUS(broad_cast_info.output_size == broad_cast_info.input_size, FAILED,
  180. "Broadcast op input size[%lu] is not equal output size[%lu]", broad_cast_info.input_size,
  181. broad_cast_info.output_size);
  182. GE_CHK_STATUS_RET(VarManager::Instance(session_id)->SaveBroadCastInfo(graph_id, broad_cast_info));
  183. return SUCCESS;
  184. }
  185. Status VarMemAssignUtil::DealVariableNode(uint32_t graph_id, const ge::NodePtr &node, uint64_t session_id) {
  186. GE_CHK_STATUS_RET(SetOutVariableAttr(node, node, 0, session_id));
  187. for (const ge::OutDataAnchorPtr &var_out_data_anchor : node->GetAllOutDataAnchors()) {
  188. for (const ge::InDataAnchorPtr &dst_in_data_anchor : var_out_data_anchor->GetPeerInDataAnchors()) {
  189. ge::NodePtr dst_node = dst_in_data_anchor->GetOwnerNode();
  190. if (dst_node->GetType() == HCOMBROADCAST || dst_node->GetType() == HVDCALLBACKBROADCAST) {
  191. GE_CHK_STATUS_RET(DealBroadCastNode(graph_id, dst_node, dst_in_data_anchor, node, session_id));
  192. continue;
  193. }
  194. if ((dst_node->GetType() == ASSIGN) || (dst_node->GetType() == ASSIGNADD) || (dst_node->GetType() == ASSIGNSUB)) {
  195. if (dst_in_data_anchor == dst_node->GetInDataAnchor(0)) {
  196. GE_CHK_STATUS_RET(DealExportVariableNode(dst_node, node, session_id));
  197. }
  198. }
  199. auto dst_type = dst_node->GetType();
  200. bool is_trans_node =
  201. (dst_type == TRANSDATA) || (dst_type == CAST) || (dst_type == TRANSPOSE) || (dst_type == PERMUTE);
  202. if (is_trans_node) {
  203. NodePtr final_trans_node = GetFinalTransNode(dst_node);
  204. GE_CHK_STATUS_RET(DealTransNode(final_trans_node));
  205. }
  206. }
  207. }
  208. return SUCCESS;
  209. }
  210. ge::NodePtr VarMemAssignUtil::GetFinalTransNode(const ge::NodePtr &trans_node) {
  211. NodePtr final_ref_node = trans_node;
  212. OutDataAnchorPtr trans_out_data_anchor = trans_node->GetOutDataAnchor(0);
  213. GE_IF_BOOL_EXEC(trans_out_data_anchor == nullptr, return final_ref_node);
  214. for (const auto &dst_in_anchor : trans_out_data_anchor->GetPeerInDataAnchors()) {
  215. NodePtr dst_node = dst_in_anchor->GetOwnerNode();
  216. auto dst_type = dst_node->GetType();
  217. bool is_trans_node =
  218. (dst_type == TRANSDATA) || (dst_type == CAST) || (dst_type == TRANSPOSE) || (dst_type == PERMUTE);
  219. if (is_trans_node && (dst_in_anchor->GetIdx() == 0)) {
  220. final_ref_node = GetFinalTransNode(dst_node);
  221. }
  222. }
  223. GELOGI("Final writable node is %s", final_ref_node->GetName().c_str());
  224. return final_ref_node;
  225. }
  226. Status VarMemAssignUtil::DealTransNode(const ge::NodePtr &final_trans_node) {
  227. ge::OutDataAnchorPtr final_trans_out_anchor = final_trans_node->GetOutDataAnchor(0);
  228. GE_IF_BOOL_EXEC(final_trans_out_anchor == nullptr, return SUCCESS);
  229. for (const ge::InDataAnchorPtr &dst_in_var_anchor : final_trans_out_anchor->GetPeerInDataAnchors()) {
  230. ge::NodePtr dst_node = dst_in_var_anchor->GetOwnerNode();
  231. if ((dst_node->GetType() == ASSIGN) || (dst_node->GetType() == ASSIGNADD) || (dst_node->GetType() == ASSIGNSUB)) {
  232. GE_CHK_STATUS_RET(DealExportTransNode(dst_node, final_trans_node));
  233. }
  234. }
  235. return SUCCESS;
  236. }
  237. Status VarMemAssignUtil::DealExportTransNode(const ge::NodePtr &node, const ge::NodePtr &final_trans_node) {
  238. ge::OutDataAnchorPtr node_out_anchor = node->GetOutDataAnchor(0);
  239. GE_CHECK_NOTNULL(node_out_anchor);
  240. for (const ge::InDataAnchorPtr &dst_in_var_anchor : node_out_anchor->GetPeerInDataAnchors()) {
  241. ge::NodePtr dst_node = dst_in_var_anchor->GetOwnerNode();
  242. if ((dst_node->GetType() == ASSIGN) || (dst_node->GetType() == ASSIGNADD) || (dst_node->GetType() == ASSIGNSUB)) {
  243. GE_CHK_STATUS_RET(DealExportTransNode(dst_node, final_trans_node));
  244. }
  245. }
  246. GE_CHK_STATUS_RET(SetOutTransNodeToAssign(node, final_trans_node, 0));
  247. return SUCCESS;
  248. }
  249. Status VarMemAssignUtil::SetOutTransNodeToAssign(const ge::NodePtr &node, const ge::NodePtr &final_trans_node,
  250. size_t index) {
  251. GE_CHECK_NOTNULL(node->GetOpDesc());
  252. GE_CHECK_NOTNULL(final_trans_node->GetOpDesc());
  253. // get final_trans_node outputOffset
  254. vector<int64_t> final_trans_output_list = final_trans_node->GetOpDesc()->GetOutputOffset();
  255. GE_CHECK_SIZE(final_trans_output_list.size());
  256. // get assign_node outputOffset
  257. vector<int64_t> output_list = node->GetOpDesc()->GetOutputOffset();
  258. auto out_list_size = output_list.size();
  259. GE_CHECK_SIZE(out_list_size);
  260. GE_CHK_BOOL_RET_STATUS(index < out_list_size, FAILED, "index %zu >= output_list.size() %zu", index, out_list_size);
  261. // final_trans_node outputOffset[0] to assign_node outputOffset[0]
  262. GELOGI("final_trans_node outputOffset[0] is: %ld", final_trans_output_list[0]);
  263. output_list[index] = final_trans_output_list[0];
  264. GELOGI("Assign node outputOffset[0] is: %ld", output_list[index]);
  265. node->GetOpDesc()->SetOutputOffset(output_list);
  266. return SUCCESS;
  267. }
  268. Status VarMemAssignUtil::AssignMemory2HasRefAttrNode(ge::ComputeGraphPtr &compute_graph) {
  269. for (const ge::NodePtr &n : compute_graph->GetAllNodes()) {
  270. string ref_var_src_var_name;
  271. auto op_desc = n->GetOpDesc();
  272. GE_CHECK_NOTNULL(op_desc);
  273. for (uint32_t idx = 0; idx < op_desc->GetOutputsSize(); idx += 1) {
  274. const auto out_desc = op_desc->MutableOutputDesc(idx);
  275. if (ge::AttrUtils::GetStr(out_desc, REF_VAR_SRC_VAR_NAME, ref_var_src_var_name)) {
  276. GE_CHK_STATUS_RET(AssignData2VarRef(n, ref_var_src_var_name, compute_graph->GetSessionID(), idx));
  277. }
  278. }
  279. }
  280. return SUCCESS;
  281. }
  282. Status VarMemAssignUtil::AssignData2VarRef(const ge::NodePtr &has_ref_attr_node, const string &src_var_name,
  283. uint64_t session_id, uint32_t out_index) {
  284. // Get ref_var_src_var address
  285. auto root_graph = GraphUtils::FindRootGraph(has_ref_attr_node->GetOwnerComputeGraph());
  286. GE_CHECK_NOTNULL(root_graph);
  287. ge::NodePtr var_ref_src_var = root_graph->FindNode(src_var_name);
  288. if (var_ref_src_var == nullptr) {
  289. for (auto sub_graph : root_graph->GetAllSubgraphs()) {
  290. auto node_ptr = sub_graph->FindNode(src_var_name);
  291. if (node_ptr != nullptr) {
  292. var_ref_src_var = node_ptr;
  293. break;
  294. }
  295. }
  296. }
  297. GE_IF_BOOL_EXEC(var_ref_src_var == nullptr || var_ref_src_var->GetOpDesc() == nullptr, return FAILED);
  298. GeTensorDesc src_tensor_desc = var_ref_src_var->GetOpDesc()->GetOutputDesc(0);
  299. uint8_t *dev_ptr = nullptr;
  300. GE_CHK_STATUS_RET(VarManager::Instance(session_id)->GetVarAddr(src_var_name, src_tensor_desc, &dev_ptr));
  301. GE_CHECK_NOTNULL(has_ref_attr_node->GetOpDesc());
  302. vector<int64_t> ref_attr_node_output_list = has_ref_attr_node->GetOpDesc()->GetOutputOffset();
  303. GE_CHECK_SIZE(ref_attr_node_output_list.size());
  304. GE_CHK_BOOL_RET_STATUS(out_index < ref_attr_node_output_list.size(), FAILED,
  305. "out_index %u >= ref_attr_node_output_list.size() %zu", out_index,
  306. ref_attr_node_output_list.size());
  307. ref_attr_node_output_list[out_index] = static_cast<int64_t>(reinterpret_cast<uintptr_t>(dev_ptr));
  308. has_ref_attr_node->GetOpDesc()->SetOutputOffset(ref_attr_node_output_list);
  309. GELOGI("Refresh address successfully, ref node: [%s], addr: [%ld]", has_ref_attr_node->GetName().c_str(),
  310. ref_attr_node_output_list[out_index]);
  311. return SUCCESS;
  312. }
  313. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示