You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

compile_nodes_pass.cc 7.3 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/passes/compile_nodes_pass.h"
  17. #include <utility>
  18. #include <vector>
  19. #include "common/ge/ge_util.h"
  20. #include "common/ge_inner_error_codes.h"
  21. #include "framework/common/debug/ge_log.h"
  22. #include "graph/debug/ge_attr_define.h"
  23. #include "graph/common/ge_call_wrapper.h"
  24. #include "graph/op_desc.h"
  25. using domi::ImplyType;
  26. namespace {
  27. const char *const kAICPUEngineName = "DNN_VM_AICPU";
  28. const char *const kAICPUKernelLibName = "aicpu_tf_kernel";
  29. } // namespace
  30. namespace ge {
  31. graphStatus CompileNodesPass::Run(ComputeGraphPtr graph) {
  32. GE_TIMESTAMP_START(CompileNodesPass);
  33. GELOGD("[CompileNodesPass]: optimize begin.");
  34. if (graph == nullptr) {
  35. return GRAPH_SUCCESS;
  36. }
  37. std::shared_ptr<GELib> instance = ge::GELib::GetInstance();
  38. if (instance == nullptr || !instance->InitFlag()) {
  39. GELOGE(ge::GE_CLI_GE_NOT_INITIALIZED, "Run CompileNodesPass failed.");
  40. return ge::GE_CLI_GE_NOT_INITIALIZED;
  41. }
  42. std::unordered_map<string, vector<NodePtr>> kernel_to_compile_nodes;
  43. for (auto &node : graph->GetDirectNode()) {
  44. if (node == nullptr) {
  45. continue;
  46. }
  47. auto op_desc = node->GetOpDesc();
  48. if (op_desc == nullptr) {
  49. continue;
  50. }
  51. auto node_need_compile = false;
  52. (void)ge::AttrUtils::GetBool(op_desc, ATTR_NEED_COMPILE, node_need_compile);
  53. if (!node_need_compile) {
  54. continue;
  55. }
  56. // collect all supported compile node
  57. string kernel_lib_name;
  58. auto ret = GetSupportedKernel(node, instance, kernel_lib_name);
  59. if (ret == GRAPH_SUCCESS) {
  60. auto iter = kernel_to_compile_nodes.find(kernel_lib_name);
  61. if (iter != kernel_to_compile_nodes.end()) {
  62. iter->second.emplace_back(node);
  63. } else {
  64. std::vector<NodePtr> node_vec{node};
  65. kernel_to_compile_nodes.insert(std::make_pair(kernel_lib_name, node_vec));
  66. }
  67. } else {
  68. GELOGE(GRAPH_FAILED, "Get node:%s, type:%s supported kernel failed.", node->GetName().c_str(),
  69. node->GetType().c_str());
  70. return GRAPH_FAILED;
  71. }
  72. }
  73. // compile node follow different kernel, currently only TBE kernel
  74. auto result = CompileNodes(instance, kernel_to_compile_nodes);
  75. if (result != GRAPH_SUCCESS) {
  76. GELOGE(result, "Compile op failed.");
  77. return result;
  78. }
  79. GELOGD("[CompileNodesPass]: Optimize success.");
  80. GE_TIMESTAMP_EVENT_END(CompileNodesPass, "OptimizeStage2::ControlAttrOptimize::CompileNodesPass");
  81. return GRAPH_SUCCESS;
  82. }
  83. graphStatus CompileNodesPass::GetSupportedKernel(const NodePtr &node, const std::shared_ptr<GELib> instance,
  84. string &kernel_lib_name) {
  85. auto op_desc = node->GetOpDesc();
  86. if (op_desc == nullptr) {
  87. GELOGE(ge::GE_GRAPH_PARAM_NULLPTR, "Get op %s opdesc failed", node->GetName().c_str());
  88. return ge::GE_GRAPH_PARAM_NULLPTR;
  89. }
  90. // reset op kernel lib, find supported kernel
  91. kernel_lib_name = op_desc->GetOpKernelLibName();
  92. if (kernel_lib_name.empty()) {
  93. (void)instance->DNNEngineManagerObj().GetDNNEngineName(node);
  94. kernel_lib_name = op_desc->GetOpKernelLibName();
  95. if (kernel_lib_name.empty()) {
  96. GELOGE(GRAPH_FAILED, "Get node:%s, type:%s kernel lib failed.", node->GetName().c_str(),
  97. op_desc->GetType().c_str());
  98. return GRAPH_FAILED;
  99. }
  100. }
  101. OpsKernelInfoStorePtr kernel_info = instance->OpsKernelManagerObj().GetOpsKernelInfoStore(kernel_lib_name);
  102. if (kernel_info == nullptr) {
  103. GELOGE(ge::GE_GRAPH_PARAM_NULLPTR, "Get op %s ops kernel info store failed", node->GetName().c_str());
  104. return ge::GE_GRAPH_PARAM_NULLPTR;
  105. }
  106. // begin accuracy supported check
  107. if (!CheckAccuracySupport(kernel_info, instance, op_desc)) {
  108. // if check accuracy support failed , try to go to other engine.
  109. GELOGD("Check Accuracy Supported return not support, node name is %s. Try to go to other engine.",
  110. op_desc->GetName().c_str());
  111. string kernel_name_origin = kernel_lib_name;
  112. OpsKernelManager &ops_kernel_manager = instance->OpsKernelManagerObj();
  113. auto kernel_map = ops_kernel_manager.GetAllOpsKernelInfoStores();
  114. for (auto it = kernel_map.begin(); it != kernel_map.end(); ++it) {
  115. string tmp_kernel_name = it->first;
  116. if (tmp_kernel_name == kernel_name_origin) {
  117. continue;
  118. }
  119. OpsKernelInfoStorePtr tmp_kernel_info = it->second;
  120. if (CheckAccuracySupport(tmp_kernel_info, instance, op_desc)) {
  121. kernel_lib_name = tmp_kernel_name;
  122. GELOGD("Find kernel lib %s support node:%s, type:%s , get kernel lib success.", tmp_kernel_name.c_str(),
  123. node->GetName().c_str(), op_desc->GetType().c_str());
  124. return GRAPH_SUCCESS;
  125. }
  126. }
  127. GELOGE(GRAPH_FAILED, "Cannot find kernel lib support node:%s, type:%s , get kernel lib failed.",
  128. node->GetName().c_str(), op_desc->GetType().c_str());
  129. return GRAPH_FAILED;
  130. }
  131. return GRAPH_SUCCESS;
  132. }
  133. bool CompileNodesPass::CheckAccuracySupport(const OpsKernelInfoStorePtr &kernel_info,
  134. const std::shared_ptr<GELib> instance, OpDescPtr &op_desc) {
  135. auto ge_desc = MakeShared<ge::OpDescPtr>(op_desc);
  136. if (ge_desc == nullptr) {
  137. GELOGE(GE_GRAPH_MEMORY_ALLOC_FAILED, "Fail to malloc op desc.");
  138. return false;
  139. }
  140. string reason;
  141. if (!(kernel_info->CheckAccuracySupported(*ge_desc, reason, true))) {
  142. return false;
  143. }
  144. return true;
  145. }
  146. graphStatus CompileNodesPass::CompileNodes(const std::shared_ptr<GELib> instance,
  147. std::unordered_map<string, vector<NodePtr>> &kernel_to_compile_nodes) {
  148. // compile nodes, if kernel is aicpu, check support and set engine info.
  149. OpsKernelInfoStorePtr kernel_info;
  150. for (auto &kernel_nodes : kernel_to_compile_nodes) {
  151. kernel_info = instance->OpsKernelManagerObj().GetOpsKernelInfoStore(kernel_nodes.first);
  152. if (kernel_info == nullptr) {
  153. GELOGE(ge::GE_GRAPH_PARAM_NULLPTR, "Get op %s ops kernel info store failed", kernel_nodes.first.c_str());
  154. return ge::GE_GRAPH_PARAM_NULLPTR;
  155. }
  156. string reason;
  157. if (kernel_nodes.first == kAICPUKernelLibName) {
  158. for (auto node : kernel_nodes.second) {
  159. // this node will go to aicpu engine ,no need compile
  160. node->GetOpDesc()->SetOpEngineName(kAICPUEngineName);
  161. node->GetOpDesc()->SetOpKernelLibName(kAICPUKernelLibName);
  162. AttrUtils::SetInt(node->GetOpDesc(), ATTR_NAME_IMPLY_TYPE, static_cast<int64_t>(ImplyType::AI_CPU));
  163. }
  164. continue;
  165. }
  166. auto ret = kernel_info->CompileOp(kernel_nodes.second);
  167. if (ret != GRAPH_SUCCESS) {
  168. GELOGE(ret, "Compile op failed, kernel name is %s", kernel_nodes.first.c_str());
  169. return GRAPH_FAILED;
  170. }
  171. }
  172. return GRAPH_SUCCESS;
  173. }
  174. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示