You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

compile_nodes_pass.cc 8.8 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/passes/compile_nodes_pass.h"
  17. #include <utility>
  18. #include <vector>
  19. #include "common/ge/ge_util.h"
  20. #include "framework/common/ge_inner_error_codes.h"
  21. #include "framework/common/debug/ge_log.h"
  22. #include "graph/debug/ge_attr_define.h"
  23. #include "common/ge_call_wrapper.h"
  24. #include "graph/op_desc.h"
  25. using domi::ImplyType;
  26. namespace {
  27. const char *const kAICPUEngineName = "DNN_VM_AICPU";
  28. const char *const kAICPUKernelLibName = "aicpu_tf_kernel";
  29. } // namespace
  30. namespace ge {
  31. graphStatus CompileNodesPass::Run(ComputeGraphPtr graph) {
  32. GE_TIMESTAMP_START(CompileNodesPass);
  33. GELOGD("[CompileNodesPass]: optimize begin.");
  34. if (graph == nullptr) {
  35. return GRAPH_SUCCESS;
  36. }
  37. std::shared_ptr<GELib> instance = ge::GELib::GetInstance();
  38. if (instance == nullptr || !instance->InitFlag()) {
  39. REPORT_INNER_ERROR("E19999", "Gelib not init before, check invalid");
  40. GELOGE(ge::GE_CLI_GE_NOT_INITIALIZED, "[Check][Param] Gelib not init before.");
  41. return ge::GE_CLI_GE_NOT_INITIALIZED;
  42. }
  43. std::unordered_map<string, vector<NodePtr>> kernel_to_compile_nodes;
  44. for (auto &node : graph->GetDirectNode()) {
  45. if (node == nullptr) {
  46. continue;
  47. }
  48. auto op_desc = node->GetOpDesc();
  49. if (op_desc == nullptr) {
  50. continue;
  51. }
  52. auto node_need_compile = false;
  53. (void)ge::AttrUtils::GetBool(op_desc, ATTR_NEED_COMPILE, node_need_compile);
  54. if (!node_need_compile) {
  55. continue;
  56. }
  57. // collect all supported compile node
  58. string kernel_lib_name;
  59. auto ret = GetSupportedKernel(node, instance, kernel_lib_name);
  60. if (ret == GRAPH_SUCCESS) {
  61. auto iter = kernel_to_compile_nodes.find(kernel_lib_name);
  62. if (iter != kernel_to_compile_nodes.end()) {
  63. iter->second.emplace_back(node);
  64. } else {
  65. std::vector<NodePtr> node_vec{node};
  66. kernel_to_compile_nodes.insert(std::make_pair(kernel_lib_name, node_vec));
  67. }
  68. } else {
  69. GELOGE(GRAPH_FAILED, "[Get][SupportedKernel] for node:%s(%s) failed.", node->GetName().c_str(),
  70. node->GetType().c_str());
  71. return GRAPH_FAILED;
  72. }
  73. }
  74. // compile node follow different kernel, currently only TBE kernel
  75. auto result = CompileNodes(instance, kernel_to_compile_nodes);
  76. if (result != GRAPH_SUCCESS) {
  77. GELOGE(result, "[Compile][Op] failed, ret:%u.", result);
  78. return result;
  79. }
  80. GELOGD("[CompileNodesPass]: Optimize success.");
  81. GE_TIMESTAMP_EVENT_END(CompileNodesPass, "OptimizeStage2::ControlAttrOptimize::CompileNodesPass");
  82. return GRAPH_SUCCESS;
  83. }
  84. graphStatus CompileNodesPass::GetSupportedKernel(const NodePtr &node, const std::shared_ptr<GELib> instance,
  85. string &kernel_lib_name) {
  86. auto op_desc = node->GetOpDesc();
  87. if (op_desc == nullptr) {
  88. GELOGE(ge::GE_GRAPH_PARAM_NULLPTR, "[Get][OpDesc] failed, op of param node is nullptr.");
  89. return ge::GE_GRAPH_PARAM_NULLPTR;
  90. }
  91. // reset op kernel lib, find supported kernel
  92. kernel_lib_name = op_desc->GetOpKernelLibName();
  93. if (kernel_lib_name.empty()) {
  94. (void)instance->DNNEngineManagerObj().GetDNNEngineName(node);
  95. kernel_lib_name = op_desc->GetOpKernelLibName();
  96. if (kernel_lib_name.empty()) {
  97. REPORT_INNER_ERROR("E19999", "kernel_lib_name in op:%s(%s) is empty, check invalid",
  98. op_desc->GetName().c_str(), op_desc->GetType().c_str());
  99. GELOGE(GRAPH_FAILED, "[Get][OpKernelLib] for node:%s(%s) failed.", node->GetName().c_str(),
  100. op_desc->GetType().c_str());
  101. return GRAPH_FAILED;
  102. }
  103. }
  104. OpsKernelInfoStorePtr kernel_info = instance->OpsKernelManagerObj().GetOpsKernelInfoStore(kernel_lib_name);
  105. if (kernel_info == nullptr) {
  106. REPORT_INNER_ERROR("E19999", "Find ops kernel by name:%s failed for op:%s(%s)",
  107. kernel_lib_name.c_str(), op_desc->GetName().c_str(), op_desc->GetType().c_str());
  108. GELOGE(ge::GE_GRAPH_PARAM_NULLPTR, "[Get][OpsKernelInfoStore] for op:%s failed", node->GetName().c_str());
  109. return ge::GE_GRAPH_PARAM_NULLPTR;
  110. }
  111. std::map<std::string, std::string> unsupported_reasons;
  112. std::string unsupported_reason;
  113. // begin accuracy supported check
  114. if (!CheckAccuracySupport(kernel_info, instance, node, unsupported_reason)) {
  115. // if check accuracy support failed , try to go to other engine.
  116. GELOGD("Check Accuracy Supported return not support, node name is %s. Try to go to other engine.",
  117. op_desc->GetName().c_str());
  118. string kernel_name_origin = kernel_lib_name;
  119. OpsKernelManager &ops_kernel_manager = instance->OpsKernelManagerObj();
  120. auto kernel_map = ops_kernel_manager.GetAllOpsKernelInfoStores();
  121. for (auto it = kernel_map.begin(); it != kernel_map.end(); ++it) {
  122. string tmp_kernel_name = it->first;
  123. if (tmp_kernel_name == kernel_name_origin) {
  124. continue;
  125. }
  126. OpsKernelInfoStorePtr tmp_kernel_info = it->second;
  127. if (CheckAccuracySupport(tmp_kernel_info, instance, node, unsupported_reason)) {
  128. kernel_lib_name = tmp_kernel_name;
  129. GELOGD("Find kernel lib %s support node:%s, type:%s , get kernel lib success.", tmp_kernel_name.c_str(),
  130. node->GetName().c_str(), op_desc->GetType().c_str());
  131. return GRAPH_SUCCESS;
  132. } else {
  133. unsupported_reasons.emplace(tmp_kernel_name, unsupported_reason);
  134. }
  135. }
  136. for (const auto &it : unsupported_reasons) {
  137. REPORT_INPUT_ERROR("E13002", std::vector<std::string>({"optype", "opskernel", "reason"}),
  138. std::vector<std::string>({op_desc->GetType(), it.first, it.second}));
  139. GELOGE(GE_GRAPH_ASSIGN_ENGINE_FAILED,
  140. "[Call][CheckAccuracySupport] for Op type %s of ops kernel %s is unsupported, reason:%s",
  141. op_desc->GetType().c_str(), it.first.c_str(), it.second.c_str());
  142. }
  143. REPORT_INPUT_ERROR("E13003", std::vector<std::string>({"opname", "optype"}),
  144. std::vector<std::string>({op_desc->GetName(), op_desc->GetType()}));
  145. GELOGE(GRAPH_FAILED, "[Check][Param] Cannot find kernel lib support node:%s, type:%s , get kernel lib failed.",
  146. node->GetName().c_str(), op_desc->GetType().c_str());
  147. return GRAPH_FAILED;
  148. }
  149. return GRAPH_SUCCESS;
  150. }
  151. bool CompileNodesPass::CheckAccuracySupport(
  152. const OpsKernelInfoStorePtr &kernel_info, const std::shared_ptr<GELib> instance,
  153. const NodePtr &node, string& unsupported_reason) {
  154. if (!(kernel_info->CheckAccuracySupported(node, unsupported_reason, true))) {
  155. return false;
  156. }
  157. return true;
  158. }
  159. graphStatus CompileNodesPass::CompileNodes(const std::shared_ptr<GELib> instance,
  160. std::unordered_map<string, vector<NodePtr>> &kernel_to_compile_nodes) {
  161. // compile nodes, if kernel is aicpu, check support and set engine info.
  162. OpsKernelInfoStorePtr kernel_info;
  163. for (auto &kernel_nodes : kernel_to_compile_nodes) {
  164. kernel_info = instance->OpsKernelManagerObj().GetOpsKernelInfoStore(kernel_nodes.first);
  165. if (kernel_info == nullptr) {
  166. REPORT_INNER_ERROR("E19999", "Find ops kernel by name:%s failed", kernel_nodes.first.c_str());
  167. GELOGE(ge::GE_GRAPH_PARAM_NULLPTR, "[Get][OpsKernelInfoStore] for op %s failed", kernel_nodes.first.c_str());
  168. return ge::GE_GRAPH_PARAM_NULLPTR;
  169. }
  170. string reason;
  171. if (kernel_nodes.first == kAICPUKernelLibName) {
  172. for (auto node : kernel_nodes.second) {
  173. // this node will go to aicpu engine ,no need compile
  174. node->GetOpDesc()->SetOpEngineName(kAICPUEngineName);
  175. node->GetOpDesc()->SetOpKernelLibName(kAICPUKernelLibName);
  176. AttrUtils::SetInt(node->GetOpDesc(), ATTR_NAME_IMPLY_TYPE, static_cast<int64_t>(ImplyType::AI_CPU));
  177. }
  178. continue;
  179. }
  180. auto ret = kernel_info->CompileOp(kernel_nodes.second);
  181. if (ret != GRAPH_SUCCESS) {
  182. REPORT_CALL_ERROR("E19999", "Call CompileOp failed, kernel_lib_name:%s, ret:%d",
  183. kernel_nodes.first.c_str(), ret);
  184. GELOGE(ret, "[Compile][Op] failed, kernel name is %s", kernel_nodes.first.c_str());
  185. return GRAPH_FAILED;
  186. }
  187. }
  188. return GRAPH_SUCCESS;
  189. }
  190. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示