You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

compile_nodes_pass.cc 6.9 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/passes/compile_nodes_pass.h"
  17. #include <utility>
  18. #include <vector>
  19. #include "common/ge/ge_util.h"
  20. #include "common/ge_inner_error_codes.h"
  21. #include "framework/common/debug/ge_log.h"
  22. #include "graph/debug/ge_attr_define.h"
  23. #include "graph/common/ge_call_wrapper.h"
  24. #include "graph/op_desc.h"
  25. using domi::ImplyType;
  26. namespace {
  27. const char *const kAICPUEngineName = "DNN_VM_AICPU";
  28. const char *const kAICPUKernelLibName = "aicpu_tf_kernel";
  29. } // namespace
  30. namespace ge {
  31. graphStatus CompileNodesPass::Run(ComputeGraphPtr graph) {
  32. GE_TIMESTAMP_START(CompileNodesPass);
  33. GELOGI("[CompileNodesPass]: optimize begin.");
  34. if (graph == nullptr) {
  35. return GRAPH_SUCCESS;
  36. }
  37. std::shared_ptr<GELib> instance = ge::GELib::GetInstance();
  38. if (instance == nullptr || !instance->InitFlag()) {
  39. GELOGE(ge::GE_CLI_GE_NOT_INITIALIZED, "Run CompileNodesPass failed.");
  40. return ge::GE_CLI_GE_NOT_INITIALIZED;
  41. }
  42. std::unordered_map<string, vector<NodePtr>> kernel_to_compile_nodes;
  43. for (auto &node : graph->GetDirectNode()) {
  44. if (node == nullptr) {
  45. continue;
  46. }
  47. auto op_desc = node->GetOpDesc();
  48. if (op_desc == nullptr) {
  49. continue;
  50. }
  51. auto node_need_compile = false;
  52. (void)ge::AttrUtils::GetBool(op_desc, ATTR_NEED_COMPILE, node_need_compile);
  53. if (!node_need_compile) {
  54. continue;
  55. }
  56. // collect all supported compile node
  57. string kernel_lib_name;
  58. auto ret = GetSupportedKernel(node, instance, kernel_lib_name);
  59. if (ret == GRAPH_SUCCESS) {
  60. auto iter = kernel_to_compile_nodes.find(kernel_lib_name);
  61. if (iter != kernel_to_compile_nodes.end()) {
  62. iter->second.emplace_back(node);
  63. } else {
  64. std::vector<NodePtr> node_vec{node};
  65. kernel_to_compile_nodes.insert(std::make_pair(kernel_lib_name, node_vec));
  66. }
  67. } else {
  68. GELOGE(GRAPH_FAILED, "Get node:%s, type:%s supported kernel failed.", node->GetName().c_str(),
  69. node->GetType().c_str());
  70. return GRAPH_FAILED;
  71. }
  72. }
  73. // compile node follow different kernel, currently only TBE kernel
  74. auto result = CompileNodes(instance, kernel_to_compile_nodes);
  75. if (result != GRAPH_SUCCESS) {
  76. GELOGE(result, "Compile op failed.");
  77. return result;
  78. }
  79. GELOGI("[CompileNodesPass]: Optimize success.");
  80. GE_TIMESTAMP_EVENT_END(CompileNodesPass, "OptimizeStage2::ControlAttrOptimize::CompileNodesPass");
  81. return GRAPH_SUCCESS;
  82. }
  83. graphStatus CompileNodesPass::GetSupportedKernel(const NodePtr &node, const std::shared_ptr<GELib> instance,
  84. string &kernel_lib_name) {
  85. auto op_desc = node->GetOpDesc();
  86. if (op_desc == nullptr) {
  87. GELOGE(ge::GE_GRAPH_PARAM_NULLPTR, "Get op %s opdesc failed", node->GetName().c_str());
  88. return ge::GE_GRAPH_PARAM_NULLPTR;
  89. }
  90. // reset op kernel lib, find supported kernel
  91. kernel_lib_name = op_desc->GetOpKernelLibName();
  92. if (kernel_lib_name.empty()) {
  93. (void)instance->DNNEngineManagerObj().GetDNNEngineName(node);
  94. kernel_lib_name = op_desc->GetOpKernelLibName();
  95. if (kernel_lib_name.empty()) {
  96. GELOGE(GRAPH_FAILED, "Get node:%s, type:%s kernel lib failed.", node->GetName().c_str(),
  97. op_desc->GetType().c_str());
  98. return GRAPH_FAILED;
  99. }
  100. }
  101. OpsKernelInfoStorePtr kernel_info = instance->OpsKernelManagerObj().GetOpsKernelInfoStore(kernel_lib_name);
  102. if (kernel_info == nullptr) {
  103. GELOGE(ge::GE_GRAPH_PARAM_NULLPTR, "Get op %s ops kernel info store failed", node->GetName().c_str());
  104. return ge::GE_GRAPH_PARAM_NULLPTR;
  105. }
  106. // begin accuracy supported check
  107. if (!CheckAccuracySupport(kernel_info, instance, op_desc)) {
  108. // if check accuracy support failed , try to go to aicpu engine
  109. string aicpu_kernel_lib_name = kAICPUKernelLibName;
  110. OpsKernelInfoStorePtr aicpu_kernel_info =
  111. instance->OpsKernelManagerObj().GetOpsKernelInfoStore(aicpu_kernel_lib_name);
  112. if (!CheckAccuracySupport(aicpu_kernel_info, instance, op_desc)) {
  113. GELOGE(GRAPH_FAILED, "AICPU engine does not support node:%s, type:%s , get kernel lib failed.",
  114. node->GetName().c_str(), op_desc->GetType().c_str());
  115. return GRAPH_FAILED;
  116. }
  117. kernel_lib_name = kAICPUKernelLibName;
  118. }
  119. return GRAPH_SUCCESS;
  120. }
  121. bool CompileNodesPass::CheckAccuracySupport(const OpsKernelInfoStorePtr &kernel_info,
  122. const std::shared_ptr<GELib> instance, OpDescPtr &op_desc) {
  123. auto ge_desc = MakeShared<ge::OpDescPtr>(op_desc);
  124. if (ge_desc == nullptr) {
  125. GELOGE(GE_GRAPH_MEMORY_ALLOC_FAILED, "Fail to malloc op desc.");
  126. return false;
  127. }
  128. string reason;
  129. if (!(kernel_info->CheckAccuracySupported(*ge_desc, reason, true))) {
  130. GELOGW("Check Accuracy Supported return not support, node name is %s, reason: %s. Try to go to AICPU engine.",
  131. op_desc->GetName().c_str(), reason.c_str());
  132. return false;
  133. }
  134. return true;
  135. }
  136. graphStatus CompileNodesPass::CompileNodes(const std::shared_ptr<GELib> instance,
  137. std::unordered_map<string, vector<NodePtr>> &kernel_to_compile_nodes) {
  138. // compile nodes, if kernel is aicpu, check support and set engine info.
  139. OpsKernelInfoStorePtr kernel_info;
  140. for (auto &kernel_nodes : kernel_to_compile_nodes) {
  141. kernel_info = instance->OpsKernelManagerObj().GetOpsKernelInfoStore(kernel_nodes.first);
  142. if (kernel_info == nullptr) {
  143. GELOGE(ge::GE_GRAPH_PARAM_NULLPTR, "Get op %s ops kernel info store failed", kernel_nodes.first.c_str());
  144. return ge::GE_GRAPH_PARAM_NULLPTR;
  145. }
  146. string reason;
  147. if (kernel_nodes.first == kAICPUKernelLibName) {
  148. for (auto node : kernel_nodes.second) {
  149. // this node will go to aicpu engine ,no need compile
  150. node->GetOpDesc()->SetOpEngineName(kAICPUEngineName);
  151. node->GetOpDesc()->SetOpKernelLibName(kAICPUKernelLibName);
  152. AttrUtils::SetInt(node->GetOpDesc(), ATTR_NAME_IMPLY_TYPE, static_cast<int64_t>(ImplyType::AI_CPU));
  153. }
  154. continue;
  155. }
  156. auto ret = kernel_info->CompileOp(kernel_nodes.second);
  157. if (ret != GRAPH_SUCCESS) {
  158. GELOGE(ret, "Compile op failed, kernel name is %s", kernel_nodes.first.c_str());
  159. return GRAPH_FAILED;
  160. }
  161. }
  162. return GRAPH_SUCCESS;
  163. }
  164. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示