You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

var_mem_assign_util.cc 17 kB

5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/build/memory/var_mem_assign_util.h"
  17. #include <vector>
  18. #include "common/types.h"
  19. #include "framework/common/debug/ge_log.h"
  20. #include "graph/common/transop_util.h"
  21. #include "graph/debug/ge_attr_define.h"
  22. #include "graph/manager/graph_mem_allocator.h"
  23. #include "graph/manager/graph_var_manager.h"
  24. #include "graph/tensor.h"
  25. #include "graph/types.h"
  26. #include "graph/utils/attr_utils.h"
  27. #include "graph/utils/graph_utils.h"
  28. #include "graph/utils/tensor_utils.h"
  29. using std::string;
  30. using std::vector;
  31. namespace ge {
  32. Status VarMemAssignUtil::AssignVarMemory(ge::ComputeGraphPtr &compute_graph) {
  33. return AssignMemory2VariableNode(compute_graph);
  34. }
  35. Status VarMemAssignUtil::AssignConstantOpMemory(ge::ComputeGraphPtr &compute_graph) {
  36. return AssignStaticMemory2Node(compute_graph);
  37. }
  38. Status VarMemAssignUtil::AssignMemory2VariableNode(ge::ComputeGraphPtr &compute_graph) {
  39. return AssignStaticMemory2Node(compute_graph);
  40. }
  41. Status VarMemAssignUtil::AssignStaticMemory2Node(ge::ComputeGraphPtr &compute_graph) {
  42. GE_IF_BOOL_EXEC(compute_graph == nullptr, return FAILED);
  43. for (const ge::NodePtr &n : compute_graph->GetAllNodes()) {
  44. GE_IF_BOOL_EXEC((n->GetType() != VARIABLE) && (n->GetType() != CONSTANTOP), continue);
  45. string ref_var_src_var_name;
  46. GE_CHECK_NOTNULL(n->GetOpDesc());
  47. GE_IF_BOOL_EXEC(ge::AttrUtils::GetStr(n->GetOpDesc(), REF_VAR_SRC_VAR_NAME, ref_var_src_var_name), continue);
  48. string node_name = n->GetName();
  49. GE_IF_BOOL_EXEC(n->GetOpDesc()->GetAllOutputsDesc().empty(),
  50. GELOGE(FAILED, "node:%s has no OutputDesc.", n->GetName().c_str());
  51. return FAILED);
  52. ge::ConstGeTensorDescPtr tensor_desc = n->GetOpDesc()->GetOutputDescPtr(0);
  53. GE_CHECK_NOTNULL(tensor_desc);
  54. rtMemType_t memory_type = RT_MEMORY_HBM;
  55. uint32_t mem_type = 0;
  56. if (AttrUtils::GetInt(n->GetOpDesc(), ATTR_OUTPUT_MEMORY_TYPE, mem_type) && (mem_type == 1)) {
  57. memory_type = RT_MEMORY_RDMA_HBM;
  58. }
  59. if (!VarManager::Instance(compute_graph->GetSessionID())->IsVarExist(node_name, *tensor_desc)) {
  60. GE_CHK_STATUS_RET(
  61. VarManager::Instance(compute_graph->GetSessionID())->AssignVarMem(node_name, *tensor_desc, memory_type));
  62. GE_IF_BOOL_EXEC(n->GetType() == VARIABLE,
  63. GE_CHK_STATUS_RET(AssignData2Fp32Var(n, compute_graph->GetSessionID())));
  64. GE_CHK_STATUS_RET(VarManager::Instance(compute_graph->GetSessionID())
  65. ->SetAllocatedGraphId(node_name, compute_graph->GetGraphID()));
  66. }
  67. uint8_t *dev_ptr = nullptr;
  68. GE_CHK_STATUS_RET(VarManager::Instance(compute_graph->GetSessionID())
  69. ->GetVarAddr(node_name, *tensor_desc, &dev_ptr, memory_type));
  70. vector<int64_t> output_list = n->GetOpDesc()->GetOutputOffset();
  71. GE_IF_BOOL_EXEC(output_list.empty(), return FAILED);
  72. output_list[0] = static_cast<int64_t>(reinterpret_cast<intptr_t>(dev_ptr));
  73. n->GetOpDesc()->SetOutputOffset(output_list);
  74. }
  75. return SUCCESS;
  76. }
  77. Status VarMemAssignUtil::AssignData2Fp32Var(const ge::NodePtr &node, uint64_t session_id) {
  78. string src_var_name;
  79. GE_CHECK_NOTNULL(node->GetOpDesc());
  80. if (ge::AttrUtils::GetStr(node->GetOpDesc(), VAR_ATTR_SRC_VAR_NAME, src_var_name)) {
  81. ge::GeTensorDesc cur_tensor_desc;
  82. uint8_t *dev_ptr = nullptr;
  83. rtMemType_t memory_type = RT_MEMORY_HBM;
  84. GE_CHK_STATUS_RET(VarManager::Instance(session_id)->GetCurVarDesc(src_var_name, cur_tensor_desc));
  85. GE_CHK_STATUS_RET(
  86. VarManager::Instance(session_id)->GetVarAddr(src_var_name, cur_tensor_desc, &dev_ptr, memory_type));
  87. GE_CHK_STATUS_RET(
  88. VarManager::Instance(session_id)->SetVarAddr(node->GetName(), cur_tensor_desc, dev_ptr, memory_type));
  89. }
  90. return SUCCESS;
  91. }
  92. Status VarMemAssignUtil::AssignVarAttr2Nodes(ge::ComputeGraphPtr &compute_graph) {
  93. for (const ge::NodePtr &node : compute_graph->GetAllNodes()) {
  94. GE_IF_BOOL_EXEC(node->GetType() != VARIABLE, continue);
  95. string ref_var_src_var_name;
  96. GE_CHECK_NOTNULL(node->GetOpDesc());
  97. GE_IF_BOOL_EXEC(ge::AttrUtils::GetStr(node->GetOpDesc(), REF_VAR_SRC_VAR_NAME, ref_var_src_var_name), continue);
  98. GE_CHK_STATUS_RET(DealVariableNode(compute_graph->GetGraphID(), node, compute_graph->GetSessionID()));
  99. }
  100. return SUCCESS;
  101. }
  102. Status VarMemAssignUtil::SetOutVariableAttr(const ge::NodePtr &node, const ge::NodePtr &var_node, int index,
  103. uint64_t session_id) {
  104. vector<int64_t> output_list;
  105. uint8_t *dev_ptr = nullptr;
  106. GE_CHECK_NOTNULL(node->GetOpDesc());
  107. output_list = node->GetOpDesc()->GetOutputOffset();
  108. if (output_list.empty()) {
  109. GELOGE(PARAM_INVALID, "Output_list is empty");
  110. return PARAM_INVALID;
  111. }
  112. GE_CHECK_NOTNULL(var_node->GetOpDesc());
  113. GeTensorDesc var_tensor_desc = var_node->GetOpDesc()->GetOutputDesc(0);
  114. rtMemType_t memory_type = RT_MEMORY_HBM;
  115. GE_CHK_STATUS_RET(
  116. VarManager::Instance(session_id)->GetVarAddr(var_node->GetName(), var_tensor_desc, &dev_ptr, memory_type));
  117. int out_list_size = static_cast<int>(output_list.size());
  118. GE_CHK_BOOL_RET_STATUS(index < out_list_size, FAILED, "index %d >= output_list.size() %d", index, out_list_size);
  119. output_list[index] = static_cast<int64_t>(reinterpret_cast<intptr_t>(dev_ptr));
  120. GELOGI("Assign node outputOffset[index] is: %ld", output_list[index]);
  121. node->GetOpDesc()->SetOutputOffset(output_list);
  122. return SUCCESS;
  123. }
  124. Status VarMemAssignUtil::DealExportVariableNode(const ge::NodePtr &node, const ge::NodePtr &var_node,
  125. uint64_t session_id) {
  126. ge::OutDataAnchorPtr var_out_anchor = node->GetOutDataAnchor(0);
  127. GE_IF_BOOL_EXEC(var_out_anchor == nullptr, return FAILED);
  128. for (const ge::InDataAnchorPtr &dst_in_var_anchor : var_out_anchor->GetPeerInDataAnchors()) {
  129. ge::NodePtr dst_node = dst_in_var_anchor->GetOwnerNode();
  130. if ((dst_node->GetType() == ASSIGN) || (dst_node->GetType() == ASSIGNADD) || (dst_node->GetType() == ASSIGNSUB)) {
  131. if (dst_in_var_anchor == dst_node->GetInDataAnchor(0)) {
  132. GE_CHK_STATUS_RET(DealExportVariableNode(dst_node, var_node, session_id));
  133. }
  134. }
  135. }
  136. GE_CHK_STATUS_RET(SetOutVariableAttr(node, var_node, 0, session_id));
  137. return SUCCESS;
  138. }
  139. Status VarMemAssignUtil::DealBroadCastNode(uint32_t graph_id, const ge::NodePtr &node,
  140. const ge::InDataAnchorPtr &in_data_anchor, const ge::NodePtr &var_node,
  141. uint64_t session_id) {
  142. VarBroadCastInfo broad_cast_info;
  143. broad_cast_info.idx = in_data_anchor->GetIdx();
  144. broad_cast_info.var_name = var_node->GetName();
  145. broad_cast_info.broadcast_name = node->GetName();
  146. auto op_desc = node->GetOpDesc();
  147. GE_CHK_BOOL_RET_STATUS(op_desc != nullptr, FAILED, "Get broadcast op %s desc is nullptr", node->GetName().c_str());
  148. GE_IF_BOOL_EXEC(broad_cast_info.idx < 0,
  149. GELOGI("Broadcast input index must be positive, actual %d", broad_cast_info.idx);
  150. return INTERNAL_ERROR);
  151. auto broad_cast_index = static_cast<size_t>(broad_cast_info.idx);
  152. auto input_tensor_desc_ptr_vistor = op_desc->GetAllInputsDescPtr();
  153. GE_CHK_BOOL_RET_STATUS(input_tensor_desc_ptr_vistor.size() > broad_cast_index, FAILED,
  154. "Get broadcast op %s input tensor desc size [%zu] < idx [%d]", node->GetName().c_str(),
  155. input_tensor_desc_ptr_vistor.size(), broad_cast_info.idx);
  156. const ge::GeTensorDescPtr input_tensor_desc =
  157. input_tensor_desc_ptr_vistor.at(static_cast<size_t>(broad_cast_info.idx));
  158. int64_t input_size = 0;
  159. GE_CHK_STATUS(TensorUtils::GetSize(*input_tensor_desc, input_size), "get input size failed.");
  160. broad_cast_info.input_size = input_size;
  161. vector<int64_t> output_list = op_desc->GetOutputOffset();
  162. GE_CHK_BOOL_RET_STATUS(output_list.size() > broad_cast_index, FAILED,
  163. "Get broadcast op %s output_list size [%zu] < idx [%d]", node->GetName().c_str(),
  164. output_list.size(), broad_cast_info.idx);
  165. broad_cast_info.input_offset = output_list[broad_cast_info.idx];
  166. broad_cast_info.output_offset = output_list[broad_cast_info.idx];
  167. op_desc->SetInputOffset(output_list);
  168. auto output_tensor_desc_ptr_vistor = op_desc->GetAllOutputsDescPtr();
  169. GE_CHK_BOOL_RET_STATUS(output_tensor_desc_ptr_vistor.size() > broad_cast_index, FAILED,
  170. "Get broadcast op %s output tensor desc size [%zu] < idx [%d]", node->GetName().c_str(),
  171. output_tensor_desc_ptr_vistor.size(), broad_cast_info.idx);
  172. const ge::GeTensorDescPtr output_tensor_desc =
  173. output_tensor_desc_ptr_vistor.at(static_cast<size_t>(broad_cast_info.idx));
  174. int64_t output_size = 0;
  175. GE_CHK_STATUS(TensorUtils::GetSize(*output_tensor_desc, output_size), "get input size failed.");
  176. broad_cast_info.output_size = output_size;
  177. GE_CHK_BOOL_RET_STATUS(broad_cast_info.output_size == broad_cast_info.input_size, FAILED,
  178. "Broadcast op input size[%lu] is not equal output size[%lu]", broad_cast_info.input_size,
  179. broad_cast_info.output_size);
  180. GE_CHK_STATUS_RET(VarManager::Instance(session_id)->SaveBroadCastInfo(graph_id, broad_cast_info));
  181. return SUCCESS;
  182. }
  183. Status VarMemAssignUtil::DealVariableNode(uint32_t graph_id, const ge::NodePtr &node, uint64_t session_id) {
  184. GE_CHK_STATUS_RET(SetOutVariableAttr(node, node, 0, session_id));
  185. for (const ge::OutDataAnchorPtr &var_out_data_anchor : node->GetAllOutDataAnchors()) {
  186. for (const ge::InDataAnchorPtr &dst_in_data_anchor : var_out_data_anchor->GetPeerInDataAnchors()) {
  187. ge::NodePtr dst_node = dst_in_data_anchor->GetOwnerNode();
  188. if (dst_node->GetType() == HCOMBROADCAST || dst_node->GetType() == HVDCALLBACKBROADCAST) {
  189. GE_CHK_STATUS_RET(DealBroadCastNode(graph_id, dst_node, dst_in_data_anchor, node, session_id));
  190. continue;
  191. }
  192. if ((dst_node->GetType() == ASSIGN) || (dst_node->GetType() == ASSIGNADD) || (dst_node->GetType() == ASSIGNSUB)) {
  193. if (dst_in_data_anchor == dst_node->GetInDataAnchor(0)) {
  194. GE_CHK_STATUS_RET(DealExportVariableNode(dst_node, node, session_id));
  195. }
  196. }
  197. auto dst_type = dst_node->GetType();
  198. bool is_trans_node =
  199. (dst_type == TRANSDATA) || (dst_type == CAST) || (dst_type == TRANSPOSE) || (dst_type == PERMUTE);
  200. if (is_trans_node) {
  201. NodePtr final_trans_node = GetFinalTransNode(dst_node);
  202. GE_CHK_STATUS_RET(DealTransNode(final_trans_node));
  203. }
  204. }
  205. }
  206. return SUCCESS;
  207. }
  208. ge::NodePtr VarMemAssignUtil::GetFinalTransNode(const ge::NodePtr &trans_node) {
  209. NodePtr final_ref_node = trans_node;
  210. OutDataAnchorPtr trans_out_data_anchor = trans_node->GetOutDataAnchor(0);
  211. GE_IF_BOOL_EXEC(trans_out_data_anchor == nullptr, return final_ref_node);
  212. for (const auto &dst_in_anchor : trans_out_data_anchor->GetPeerInDataAnchors()) {
  213. NodePtr dst_node = dst_in_anchor->GetOwnerNode();
  214. auto dst_type = dst_node->GetType();
  215. bool is_trans_node =
  216. (dst_type == TRANSDATA) || (dst_type == CAST) || (dst_type == TRANSPOSE) || (dst_type == PERMUTE);
  217. if (is_trans_node && (dst_in_anchor->GetIdx() == 0)) {
  218. final_ref_node = GetFinalTransNode(dst_node);
  219. }
  220. }
  221. GELOGI("Final writable node is %s", final_ref_node->GetName().c_str());
  222. return final_ref_node;
  223. }
  224. Status VarMemAssignUtil::DealTransNode(const ge::NodePtr &final_trans_node) {
  225. ge::OutDataAnchorPtr final_trans_out_anchor = final_trans_node->GetOutDataAnchor(0);
  226. GE_IF_BOOL_EXEC(final_trans_out_anchor == nullptr, return SUCCESS);
  227. for (const ge::InDataAnchorPtr &dst_in_var_anchor : final_trans_out_anchor->GetPeerInDataAnchors()) {
  228. ge::NodePtr dst_node = dst_in_var_anchor->GetOwnerNode();
  229. if ((dst_node->GetType() == ASSIGN) || (dst_node->GetType() == ASSIGNADD) || (dst_node->GetType() == ASSIGNSUB)) {
  230. GE_CHK_STATUS_RET(DealExportTransNode(dst_node, final_trans_node));
  231. }
  232. }
  233. return SUCCESS;
  234. }
  235. Status VarMemAssignUtil::DealExportTransNode(const ge::NodePtr &node, const ge::NodePtr &final_trans_node) {
  236. ge::OutDataAnchorPtr node_out_anchor = node->GetOutDataAnchor(0);
  237. GE_CHECK_NOTNULL(node_out_anchor);
  238. for (const ge::InDataAnchorPtr &dst_in_var_anchor : node_out_anchor->GetPeerInDataAnchors()) {
  239. ge::NodePtr dst_node = dst_in_var_anchor->GetOwnerNode();
  240. if ((dst_node->GetType() == ASSIGN) || (dst_node->GetType() == ASSIGNADD) || (dst_node->GetType() == ASSIGNSUB)) {
  241. GE_CHK_STATUS_RET(DealExportTransNode(dst_node, final_trans_node));
  242. }
  243. }
  244. GE_CHK_STATUS_RET(SetOutTransNodeToAssign(node, final_trans_node, 0));
  245. return SUCCESS;
  246. }
  247. Status VarMemAssignUtil::SetOutTransNodeToAssign(const ge::NodePtr &node, const ge::NodePtr &final_trans_node,
  248. size_t index) {
  249. GE_CHECK_NOTNULL(node->GetOpDesc());
  250. GE_CHECK_NOTNULL(final_trans_node->GetOpDesc());
  251. // get final_trans_node outputOffset
  252. vector<int64_t> final_trans_output_list = final_trans_node->GetOpDesc()->GetOutputOffset();
  253. GE_CHECK_SIZE(final_trans_output_list.size());
  254. // get assign_node outputOffset
  255. vector<int64_t> output_list = node->GetOpDesc()->GetOutputOffset();
  256. auto out_list_size = output_list.size();
  257. GE_CHECK_SIZE(out_list_size);
  258. GE_CHK_BOOL_RET_STATUS(index < out_list_size, FAILED, "index %zu >= output_list.size() %zu", index, out_list_size);
  259. // final_trans_node outputOffset[0] to assign_node outputOffset[0]
  260. GELOGI("final_trans_node outputOffset[0] is: %ld", final_trans_output_list[0]);
  261. output_list[index] = final_trans_output_list[0];
  262. GELOGI("Assign node outputOffset[0] is: %ld", output_list[index]);
  263. node->GetOpDesc()->SetOutputOffset(output_list);
  264. return SUCCESS;
  265. }
  266. Status VarMemAssignUtil::AssignMemory2HasRefAttrNode(ge::ComputeGraphPtr &compute_graph) {
  267. for (const ge::NodePtr &n : compute_graph->GetAllNodes()) {
  268. string ref_var_src_var_name;
  269. auto op_desc = n->GetOpDesc();
  270. GE_CHECK_NOTNULL(op_desc);
  271. for (uint32_t idx = 0; idx < op_desc->GetOutputsSize(); idx += 1) {
  272. const auto out_desc = op_desc->MutableOutputDesc(idx);
  273. if (ge::AttrUtils::GetStr(out_desc, REF_VAR_SRC_VAR_NAME, ref_var_src_var_name)) {
  274. GE_CHK_STATUS_RET(AssignData2VarRef(n, ref_var_src_var_name, compute_graph->GetSessionID(), idx));
  275. }
  276. }
  277. }
  278. return SUCCESS;
  279. }
  280. Status VarMemAssignUtil::AssignData2VarRef(const ge::NodePtr &has_ref_attr_node, const string &src_var_name,
  281. uint64_t session_id, uint32_t out_index) {
  282. // Get ref_var_src_var address
  283. auto root_graph = GraphUtils::FindRootGraph(has_ref_attr_node->GetOwnerComputeGraph());
  284. GE_CHECK_NOTNULL(root_graph);
  285. ge::NodePtr var_ref_src_var = root_graph->FindNode(src_var_name);
  286. if (var_ref_src_var == nullptr) {
  287. for (auto sub_graph : root_graph->GetAllSubgraphs()) {
  288. auto node_ptr = sub_graph->FindNode(src_var_name);
  289. if (node_ptr != nullptr) {
  290. var_ref_src_var = node_ptr;
  291. break;
  292. }
  293. }
  294. }
  295. GE_IF_BOOL_EXEC(var_ref_src_var == nullptr || var_ref_src_var->GetOpDesc() == nullptr, return FAILED);
  296. GeTensorDesc src_tensor_desc = var_ref_src_var->GetOpDesc()->GetOutputDesc(0);
  297. uint8_t *dev_ptr = nullptr;
  298. GE_CHK_STATUS_RET(VarManager::Instance(session_id)->GetVarAddr(src_var_name, src_tensor_desc, &dev_ptr));
  299. GE_CHECK_NOTNULL(has_ref_attr_node->GetOpDesc());
  300. vector<int64_t> ref_attr_node_output_list = has_ref_attr_node->GetOpDesc()->GetOutputOffset();
  301. GE_CHECK_SIZE(ref_attr_node_output_list.size());
  302. GE_CHK_BOOL_RET_STATUS(out_index < ref_attr_node_output_list.size(), FAILED,
  303. "out_index %u >= ref_attr_node_output_list.size() %zu", out_index,
  304. ref_attr_node_output_list.size());
  305. ref_attr_node_output_list[out_index] = static_cast<int64_t>(reinterpret_cast<uintptr_t>(dev_ptr));
  306. has_ref_attr_node->GetOpDesc()->SetOutputOffset(ref_attr_node_output_list);
  307. GELOGI("Refresh address successfully, ref node: [%s], addr: [%ld]", has_ref_attr_node->GetName().c_str(),
  308. ref_attr_node_output_list[out_index]);
  309. return SUCCESS;
  310. }
  311. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示