You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

merge_input_memcpy_pass.cc 5.7 kB

4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/passes/merge_input_memcpy_pass.h"
  17. #include "common/ge/ge_util.h"
  18. #include "external/ge/ge_api_types.h"
  19. #include "common/omg_util.h"
  20. namespace ge {
  21. Status MergeInputMemcpyPass::Run(ComputeGraphPtr graph) {
  22. GELOGD("MergeInputMemcpyPass Enter");
  23. std::unordered_map<NodePtr, std::vector<NodePtr>> switch_groups;
  24. for (const auto &node : graph->GetDirectNode()) {
  25. std::string type;
  26. GE_CHK_STATUS_RET(GetOriginalType(node, type),
  27. "[Get][OriginalType] of node in graph:%s failed.", graph->GetName().c_str());
  28. if ((type != MERGE) && (type != REFMERGE)) {
  29. continue;
  30. }
  31. GE_CHECK_NOTNULL(node->GetOpDesc());
  32. GE_CHK_STATUS_RET(AddMemcpyAsyncNodes(graph, node, node->GetOpDesc()->HasAttr(ATTR_INSERT_BY_MBATCH)),
  33. "[Add][MemcpyAsyncNodes] failed, graph:%s, node:%s.", graph->GetName().c_str(),
  34. node->GetName().c_str());
  35. }
  36. GELOGD("MergeInputMemcpyPass Leave");
  37. return SUCCESS;
  38. }
  39. ///
  40. /// @brief Add MemcpyAsync Op as Merge in_node
  41. /// @param [in] graph
  42. /// @param [in] node
  43. /// @param [in] multi_batch_flag
  44. /// @return Status
  45. ///
  46. Status MergeInputMemcpyPass::AddMemcpyAsyncNodes(const ComputeGraphPtr &graph, const NodePtr &node,
  47. bool multi_batch_flag) {
  48. for (const InDataAnchorPtr &in_data_anchor : node->GetAllInDataAnchors()) {
  49. OutDataAnchorPtr peer_out_anchor = in_data_anchor->GetPeerOutAnchor();
  50. GE_IF_BOOL_EXEC(peer_out_anchor == nullptr, continue);
  51. NodePtr in_node = peer_out_anchor->GetOwnerNode();
  52. const std::string &type = in_node->GetType();
  53. // For WhileLoop no need memcpy for merge.
  54. GE_IF_BOOL_EXEC((type == ENTER) || (type == REFENTER) || (type == NEXTITERATION) || (type == REFNEXTITERATION),
  55. continue);
  56. const std::string &memcpy_name = node->GetName() + "_input_" + std::to_string(in_data_anchor->GetIdx());
  57. NodePtr memcpy_node = CreateMemcpyAsyncNode(graph, memcpy_name, peer_out_anchor, multi_batch_flag);
  58. GE_CHK_BOOL_EXEC(memcpy_node != nullptr, return FAILED,
  59. "[Create][MemcpyAsyncNode] failed, memcpy_name:%s.", memcpy_name.c_str());
  60. GE_CHK_STATUS(GraphUtils::RemoveEdge(peer_out_anchor, in_data_anchor),
  61. "[Remove][Edge] between %s and %s failed.", peer_out_anchor->GetOwnerNode()->GetName().c_str(),
  62. node->GetName().c_str());
  63. GE_CHK_STATUS(GraphUtils::AddEdge(peer_out_anchor, memcpy_node->GetInDataAnchor(0)),
  64. "[Add][Edge] between %s and %s failed.", peer_out_anchor->GetOwnerNode()->GetName().c_str(),
  65. memcpy_node->GetName().c_str());
  66. GE_CHK_STATUS(GraphUtils::AddEdge(memcpy_node->GetOutDataAnchor(0), in_data_anchor),
  67. "[Add][Edge] between %s and %s failed.", memcpy_node->GetName().c_str(),
  68. node->GetName().c_str());
  69. }
  70. return SUCCESS;
  71. }
  72. ///
  73. /// @brief Add MemcpyAsync Node
  74. /// @param [in] graph
  75. /// @param [in] name
  76. /// @param [in] out_data_anchor
  77. /// @param [in] multi_batch_flag
  78. /// @return ge::NodePtr
  79. ///
  80. NodePtr MergeInputMemcpyPass::CreateMemcpyAsyncNode(const ComputeGraphPtr &graph, const std::string &name,
  81. const OutDataAnchorPtr &out_data_anchor, bool multi_batch_flag) {
  82. OpDescPtr pre_op_desc = out_data_anchor->GetOwnerNode()->GetOpDesc();
  83. GE_CHK_BOOL_EXEC(pre_op_desc != nullptr,
  84. REPORT_INNER_ERROR("E19999", "opdesc of pre node is nullptr, check invalid");
  85. return nullptr, "[Get][OpDesc] failed, OpDesc of pre node is invalid.");
  86. const std::string &memcpy_type = multi_batch_flag ? MEMCPYADDRASYNC : MEMCPYASYNC;
  87. const std::string &node_name = name + "_" + memcpy_type;
  88. GELOGI("Create MemcpyAsync op:%s.", node_name.c_str());
  89. OpDescPtr op_desc = MakeShared<OpDesc>(node_name, memcpy_type);
  90. if (op_desc == nullptr) {
  91. REPORT_CALL_ERROR("E19999", "Create OpDesc failed, node_name:%s", node_name.c_str());
  92. GELOGE(FAILED, "[Create][OpDesc] failed, MemcpyAsync:%s.", node_name.c_str());
  93. return nullptr;
  94. }
  95. GE_CHK_BOOL_EXEC(op_desc->AddInputDesc(pre_op_desc->GetOutputDesc(out_data_anchor->GetIdx())) == GRAPH_SUCCESS,
  96. REPORT_CALL_ERROR("E19999", "Add input to op:%s(%s) failed",
  97. op_desc->GetName().c_str(), op_desc->GetType().c_str());
  98. return nullptr,
  99. "[Add][InputDesc] to op:%s(%s) failed", op_desc->GetName().c_str(), op_desc->GetType().c_str());
  100. GE_CHK_BOOL_EXEC(op_desc->AddOutputDesc(pre_op_desc->GetOutputDesc(out_data_anchor->GetIdx())) == GRAPH_SUCCESS,
  101. REPORT_CALL_ERROR("E19999", "Add output to op:%s(%s) failed",
  102. op_desc->GetName().c_str(), op_desc->GetType().c_str());
  103. return nullptr,
  104. "[Add][OutputDesc] to op:%s(%s) failed", op_desc->GetName().c_str(), op_desc->GetType().c_str());
  105. return graph->AddNode(op_desc);
  106. }
  107. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示