You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

merge_pass.cc 8.5 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/passes/merge_pass.h"
  17. #include <memory>
  18. #include <string>
  19. #include <vector>
  20. #include "framework/common/debug/ge_log.h"
  21. #include "common/ge_inner_error_codes.h"
  22. #include "common/ge/ge_util.h"
  23. #include "graph/common/omg_util.h"
  24. #include "graph/debug/ge_attr_define.h"
  25. #include "graph/utils/graph_utils.h"
  26. #include "graph/passes/pass_utils.h"
  27. using domi::PARAM_INVALID;
  28. using domi::SUCCESS;
  29. namespace ge {
  30. const int kValueIndexOutputIndex = 1;
  31. bool IsEmptyTensor(const GeShape &shape) {
  32. const auto &dims = shape.GetDims();
  33. return std::any_of(dims.begin(), dims.end(), [](int64_t dim) { return dim == 0; });
  34. }
  35. Status MergePass::Run(NodePtr &node) {
  36. GELOGD("MergePass running");
  37. if (node == nullptr) {
  38. GELOGE(PARAM_INVALID, "param [node] must not be null.");
  39. return PARAM_INVALID;
  40. }
  41. std::string op_type;
  42. GE_CHK_STATUS_RET(GetOriginalType(node, op_type), "get original type failed");
  43. if (op_type != MERGE) {
  44. return SUCCESS;
  45. }
  46. auto out_data_anchors = node->GetAllOutDataAnchors();
  47. if (out_data_anchors.empty()) {
  48. GELOGE(PARAM_INVALID, "[%s] Merge node output anchor is empty", node->GetName().c_str());
  49. return PARAM_INVALID;
  50. }
  51. if (OptimizeEmptyTensorInput(node) != SUCCESS) {
  52. GELOGE(FAILED, "[%s] remove empty_tensor inputs failed.", node->GetName().c_str());
  53. return FAILED;
  54. }
  55. auto in_data_nodes = node->GetInDataNodes();
  56. switch (in_data_nodes.size()) {
  57. case 0: {
  58. /// Case A: input_count = 0, the output of merge node is inactive as well
  59. /// In which case the output branch can be removed
  60. /// until another merge node is met
  61. std::vector<NodePtr> del_nodes;
  62. std::vector<NodePtr> end_nodes;
  63. Status ret = PassUtils::RemoveBranch(node, del_nodes, end_nodes);
  64. for (auto &end_node : end_nodes) {
  65. AddRePassNode(end_node);
  66. }
  67. for (const auto &delete_node : del_nodes) {
  68. AddNodeDeleted(delete_node);
  69. }
  70. return ret;
  71. }
  72. case 1: { // Case B: input_count = 1, the merge node can be optimized out
  73. std::vector<int> merge_io_map = {PassUtils::GetUniqueInDataAnchorIndex(node), -1};
  74. if (merge_io_map[0] != -1 && IsNeedChangeIndexToConstant(node)) {
  75. int index = merge_io_map[0];
  76. if (ChangeIndexToConstant(node, index) != SUCCESS) {
  77. GELOGE(FAILED, "[%s] Change value index to be Constant failed.", node->GetName().c_str());
  78. return FAILED;
  79. }
  80. }
  81. auto in_node = in_data_nodes.at(0);
  82. if (IsMergeInputNeedOptimized(in_node)) {
  83. if (IsolateAndDeleteNode(in_node, {0}) != SUCCESS) {
  84. GELOGE(FAILED, "Isolate and delete node %s failed.", in_node->GetName().c_str());
  85. return FAILED;
  86. }
  87. }
  88. return IsolateAndDeleteNode(node, merge_io_map);
  89. }
  90. default: {
  91. // Case C: input_count > 1, the merge node can not be optimized
  92. return SUCCESS;
  93. }
  94. }
  95. }
  96. bool MergePass::IsNeedChangeIndexToConstant(NodePtr &node) const {
  97. /// value_index is the index 1 output of the Merge
  98. /// value_index link to other node, change it to be Constant
  99. GE_IF_BOOL_EXEC(node == nullptr, GELOGW("Node is nullptr"); return false);
  100. auto out_anchor = node->GetOutDataAnchor(kValueIndexOutputIndex);
  101. GE_IF_BOOL_EXEC(out_anchor == nullptr, GELOGW("Out_anchor is nullptr"); return false);
  102. for (const auto &peer_in_anchor : out_anchor->GetPeerInDataAnchors()) {
  103. if (peer_in_anchor != nullptr && peer_in_anchor->GetOwnerNode() != nullptr) {
  104. GELOGI(
  105. "[%s] MergePass, value_index link to other node, "
  106. "change it to be Constant.",
  107. node->GetName().c_str());
  108. return true;
  109. }
  110. }
  111. return false;
  112. }
  113. Status MergePass::ChangeIndexToConstant(NodePtr &node, int &value_index) {
  114. GE_CHECK_NOTNULL(node);
  115. ComputeGraphPtr graph = node->GetOwnerComputeGraph();
  116. if (graph == nullptr) {
  117. GELOGE(FAILED, "[%s] The owner graph must not be null.", node->GetName().c_str());
  118. return FAILED;
  119. }
  120. OpDescPtr constant_op_desc = nullptr;
  121. if (CreateConstByValue(node, value_index, constant_op_desc) != SUCCESS) {
  122. return FAILED;
  123. }
  124. NodePtr const_node = graph->AddNode(constant_op_desc);
  125. if (const_node == nullptr) {
  126. return FAILED;
  127. }
  128. // Change peer in anchors from value_index to new Constant node
  129. if (GraphUtils::ReplaceNodeAnchors(const_node, node, {}, {1}) != GRAPH_SUCCESS) {
  130. GELOGE(FAILED, "[%s] ReplaceNodeAnchors failed.", node->GetName().c_str());
  131. return FAILED;
  132. }
  133. auto out_control_anchor = node->GetOutControlAnchor();
  134. GE_CHECK_NOTNULL(out_control_anchor);
  135. // Add control anchor between Merge and Constant
  136. if (out_control_anchor->LinkTo(const_node->GetInControlAnchor()) != GRAPH_SUCCESS) {
  137. return FAILED;
  138. }
  139. return SUCCESS;
  140. }
  141. Status MergePass::CreateConstByValue(NodePtr &node, int value_index, OpDescPtr &op_desc) {
  142. std::string constant_name = node->GetName() + "_value_index";
  143. // 1. create Constant OpDesc
  144. op_desc = MakeShared<OpDesc>(constant_name, CONSTANT);
  145. if (op_desc == nullptr) {
  146. GELOGE(FAILED, "[%s] Make shared of Constant op desc failed.", constant_name.c_str());
  147. return FAILED;
  148. }
  149. // 2. get OpDesc of output number one of Merge(value_index)
  150. OpDescPtr original_op_desc = node->GetOpDesc();
  151. if (original_op_desc == nullptr) {
  152. GELOGE(FAILED, "[%s] Op desc must not be null.", constant_name.c_str());
  153. return FAILED;
  154. }
  155. GeTensorDesc original_out_tensor_desc = original_op_desc->GetOutputDesc(1);
  156. original_out_tensor_desc.SetDataType(DT_INT32);
  157. // 3. create attr value of Constant, is a tensor
  158. GeTensorPtr const_tensor_ptr =
  159. MakeShared<GeTensor>(original_out_tensor_desc, reinterpret_cast<uint8_t *>(&value_index), sizeof(int));
  160. if (const_tensor_ptr == nullptr) {
  161. GELOGE(FAILED, "[%s] Make shared of Constant tensor failed.", constant_name.c_str());
  162. return FAILED;
  163. }
  164. GE_IF_BOOL_EXEC(!AttrUtils::SetTensor(op_desc, ATTR_NAME_WEIGHTS, const_tensor_ptr),
  165. GELOGE(FAILED, "get ATTR_NAME_WEIGHTS failed"); return FAILED);
  166. // 4. set Constant output desc
  167. GE_CHK_STATUS_RET(op_desc->AddOutputDesc(original_out_tensor_desc), "add out put desc failed");
  168. return SUCCESS;
  169. }
  170. bool MergePass::IsMergeInputNeedOptimized(NodePtr &node) const {
  171. if (node == nullptr) {
  172. return false;
  173. }
  174. // node is not inserted by MergeInputMemcpyPass
  175. if ((node->GetType() != MEMCPYASYNC) && (node->GetType() != MEMCPYADDRASYNC)) {
  176. return false;
  177. }
  178. if (node->GetInDataNodes().size() != 1) {
  179. return false;
  180. }
  181. auto in_node = node->GetInDataNodes().at(0);
  182. if (in_node == nullptr) {
  183. return false;
  184. }
  185. // in_node may be global_step var
  186. if ((in_node->GetType() == VARIABLE) || (in_node->GetType() == VARIABLEV2)) {
  187. return false;
  188. }
  189. return true;
  190. }
  191. Status MergePass::OptimizeEmptyTensorInput(const NodePtr &node) {
  192. for (const auto &in_data_anchor : node->GetAllInDataAnchors()) {
  193. const auto &peer_data_anchor = in_data_anchor->GetPeerOutAnchor();
  194. if (peer_data_anchor == nullptr) {
  195. continue;
  196. }
  197. if ((peer_data_anchor->GetOwnerNode() == nullptr) ||
  198. (peer_data_anchor->GetOwnerNode()->GetOpDesc() == nullptr)) {
  199. continue;
  200. }
  201. const auto &op_desc = peer_data_anchor->GetOwnerNode()->GetOpDesc();
  202. if (IsEmptyTensor(op_desc->GetOutputDesc(peer_data_anchor->GetIdx()).GetShape())) {
  203. if (GraphUtils::RemoveEdge(peer_data_anchor, in_data_anchor) != GRAPH_SUCCESS) {
  204. GELOGE(FAILED, "Remove data edge %s:%d->%s:%d failed.",
  205. op_desc->GetName().c_str(), peer_data_anchor->GetIdx(),
  206. node->GetName().c_str(), in_data_anchor->GetIdx());
  207. return FAILED;
  208. }
  209. GELOGD("Remove data edge %s:%d->%s:%d",
  210. op_desc->GetName().c_str(), peer_data_anchor->GetIdx(),
  211. node->GetName().c_str(), in_data_anchor->GetIdx());
  212. }
  213. }
  214. return SUCCESS;
  215. }
  216. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示