You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

graph_optimize.cc 15 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/optimize/graph_optimize.h"
  17. #include "graph/ge_context.h"
  18. #include "graph/common/local_context.h"
  19. #include "graph/passes/dimension_adjust_pass.h"
  20. #include "inc/pass_manager.h"
  21. #include "init/gelib.h"
  22. namespace {
  23. const char *const kVectorCore = "VectorCore";
  24. const char *const kVectorEngine = "VectorEngine";
  25. const char *const kAicoreEngine = "AIcoreEngine";
  26. } // namespace
  27. namespace ge {
  28. GraphOptimize::GraphOptimize()
  29. : optimize_type_(domi::FrameworkType::TENSORFLOW),
  30. cal_config_(""),
  31. insert_op_config_(""),
  32. core_type_("") {}
  33. void AddNodeInputProperty(ComputeGraphPtr &compute_graph) {
  34. if (compute_graph == nullptr) {
  35. GELOGE(GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL, "[AddNodeInputProperty]: compute_graph is nullptr.");
  36. return;
  37. }
  38. for (ge::NodePtr &node : compute_graph->GetDirectNode()) {
  39. auto node_op_desc = node->GetOpDesc();
  40. GE_IF_BOOL_EXEC(node_op_desc == nullptr, GELOGW("node_op_desc is nullptr!"); return );
  41. auto in_control_anchor = node->GetInControlAnchor();
  42. vector<string> src_name_list;
  43. vector<string> input_name_list;
  44. vector<int64_t> src_index_list;
  45. GE_IF_BOOL_EXEC(
  46. in_control_anchor != nullptr, string src_name_temp; for (auto &out_control_anchor
  47. : in_control_anchor->GetPeerOutControlAnchors()) {
  48. ge::NodePtr src_node = out_control_anchor->GetOwnerNode();
  49. GE_IF_BOOL_EXEC(src_node == nullptr, GELOGW("src_node is nullptr!"); continue);
  50. src_name_temp = src_name_temp == "" ? src_node->GetName() : src_name_temp + ":" + src_node->GetName();
  51. } GE_IF_BOOL_EXEC(src_name_temp != "", src_name_list.emplace_back(src_name_temp);
  52. node_op_desc->SetSrcName(src_name_list);))
  53. for (auto &in_data_anchor : node->GetAllInDataAnchors()) {
  54. auto peer_out_anchor = in_data_anchor->GetPeerOutAnchor();
  55. GE_IF_BOOL_EXEC(
  56. peer_out_anchor == nullptr, GELOGW("peer_out_anchor is nullptr! node: %s", node->GetName().c_str()); continue);
  57. ge::NodePtr src_node = peer_out_anchor->GetOwnerNode();
  58. src_index_list = node_op_desc->GetSrcIndex();
  59. src_name_list.emplace_back(src_node->GetName());
  60. src_index_list.emplace_back(peer_out_anchor->GetIdx());
  61. node_op_desc->SetSrcName(src_name_list);
  62. node_op_desc->SetSrcIndex(src_index_list);
  63. GE_IF_BOOL_EXEC(!(node_op_desc->GetType() == NETOUTPUT && GetLocalOmgContext().type == domi::TENSORFLOW),
  64. ge::NodePtr peer_owner_node = peer_out_anchor->GetOwnerNode();
  65. input_name_list.emplace_back(
  66. peer_owner_node->GetName() +
  67. (peer_out_anchor->GetIdx() == 0 ? "" : ": " + to_string(peer_out_anchor->GetIdx())));
  68. node_op_desc->SetInputName(input_name_list);)
  69. }
  70. }
  71. }
  72. Status GraphOptimize::OptimizeSubGraph(ComputeGraphPtr &compute_graph, const ComputeGraphPtr &parent_graph,
  73. const std::string &engine_name) {
  74. if (compute_graph == nullptr) {
  75. GELOGE(GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL, "[OptimizeSubGraph]: compute_graph is nullptr.");
  76. return GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL;
  77. }
  78. Status ret = SUCCESS;
  79. vector<GraphOptimizerPtr> graph_optimizer;
  80. std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
  81. if (instance_ptr == nullptr || !instance_ptr->InitFlag()) {
  82. GELOGE(GE_CLI_GE_NOT_INITIALIZED, "GraphOptimzer: GE is not initialized");
  83. return GE_CLI_GE_NOT_INITIALIZED;
  84. }
  85. if (instance_ptr->DNNEngineManagerObj().IsEngineRegistered(engine_name)) {
  86. instance_ptr->OpsKernelManagerObj().GetGraphOptimizerByEngine(engine_name, graph_optimizer);
  87. AddNodeInputProperty(compute_graph);
  88. if (compute_graph->GetDirectNode().size() == 0) {
  89. GELOGW("[OptimizeSubGraph] compute_graph do not has any node.");
  90. return SUCCESS;
  91. }
  92. if (build_mode_ == BUILD_MODE_TUNING && (build_step_ == BUILD_STEP_AFTER_UB_MATCH
  93. || build_step_ == BUILD_STEP_AFTER_MERGE)) {
  94. for (auto iter = graph_optimizer.begin(); iter != graph_optimizer.end(); ++iter) {
  95. Status ret = (*iter)->OptimizeFusedGraphAfterGraphSlice(*(compute_graph));
  96. if (ret != SUCCESS) {
  97. auto root_graph = ge::GraphUtils::FindRootGraph(parent_graph);
  98. if (root_graph != nullptr) {
  99. ErrorManager::GetInstance().SaveMstuneCompileFailedMsg(root_graph->GetName());
  100. }
  101. GELOGE(ret, "[OptimizeSubGraph][OptimizeFusedGraphAfterGraphSlice]: graph optimize failed, ret:%d", ret);
  102. return ret;
  103. }
  104. }
  105. return SUCCESS;
  106. }
  107. for (auto iter = graph_optimizer.begin(); iter != graph_optimizer.end(); ++iter) {
  108. ret = (*iter)->OptimizeFusedGraph(*(compute_graph));
  109. if (ret != SUCCESS) {
  110. GELOGE(ret, "[OptimizeSubGraph][OptimizeFusedGraph]: graph optimize failed, ret:%d", ret);
  111. return ret;
  112. }
  113. }
  114. } else {
  115. GELOGI("Engine: %s is not registered. do nothing in subGraph Optimize by ATC.", engine_name.c_str());
  116. }
  117. return ret;
  118. }
  119. Status GraphOptimize::OptimizeOriginalGraph(ComputeGraphPtr &compute_graph) {
  120. if (compute_graph == nullptr) {
  121. GELOGE(GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL, "[OptimizeOriginalGraph]: compute_graph is nullptr.");
  122. return GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL;
  123. }
  124. Status ret = SUCCESS;
  125. std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
  126. if (instance_ptr == nullptr || !instance_ptr->InitFlag()) {
  127. GELOGE(GE_CLI_GE_NOT_INITIALIZED, "OptimizeOriginalGraph failed.");
  128. return GE_CLI_GE_NOT_INITIALIZED;
  129. }
  130. auto graph_optimizer = instance_ptr->OpsKernelManagerObj().GetAllGraphOptimizerObjsByPriority();
  131. GELOGI("optimize by opskernel in original graph optimize phase. num of graph_optimizer is %lu.",
  132. graph_optimizer.size());
  133. string exclude_core_Type = (core_type_ == kVectorCore) ? kAicoreEngine : kVectorEngine;
  134. GELOGD("[OptimizeOriginalGraph]: engine type will exclude: %s", exclude_core_Type.c_str());
  135. if (graph_optimizer.size() != 0) {
  136. for (auto iter = graph_optimizer.begin(); iter != graph_optimizer.end(); ++iter) {
  137. if (iter->first == exclude_core_Type) {
  138. continue;
  139. }
  140. ret = (iter->second)->OptimizeOriginalGraph(*compute_graph);
  141. if (ret != SUCCESS) {
  142. GELOGE(ret, "[OptimizeOriginalGraph]: graph optimize failed, ret:%d", ret);
  143. return ret;
  144. }
  145. }
  146. }
  147. return ret;
  148. }
  149. Status GraphOptimize::OptimizeOriginalGraphJudgeInsert(ComputeGraphPtr &compute_graph) {
  150. GELOGD("OptimizeOriginalGraphJudgeInsert in");
  151. if (GetContext().GetHostExecFlag()) {
  152. // graph exec on host, no need OptimizeOriginalGraph
  153. return SUCCESS;
  154. }
  155. GE_CHECK_NOTNULL(compute_graph);
  156. Status ret = SUCCESS;
  157. std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
  158. if (instance_ptr == nullptr || !instance_ptr->InitFlag()) {
  159. GELOGE(GE_CLI_GE_NOT_INITIALIZED, "OptimizeOriginalGraph failed.");
  160. return GE_CLI_GE_NOT_INITIALIZED;
  161. }
  162. auto graph_optimizer = instance_ptr->OpsKernelManagerObj().GetAllGraphOptimizerObjsByPriority();
  163. GELOGI("optimize by opskernel in original graph optimize phase. num of graph_optimizer is %lu.",
  164. graph_optimizer.size());
  165. string exclude_core_Type = (core_type_ == kVectorCore) ? kAicoreEngine : kVectorEngine;
  166. if (graph_optimizer.size() != 0) {
  167. for (auto iter = graph_optimizer.begin(); iter != graph_optimizer.end(); ++iter) {
  168. if (iter->first == exclude_core_Type) {
  169. GELOGI("[OptimizeOriginalGraphJudgeInsert]: engine type will exclude: %s", exclude_core_Type.c_str());
  170. continue;
  171. }
  172. GELOGI("Begin to refine running format by engine %s", iter->first.c_str());
  173. ret = (iter->second)->OptimizeOriginalGraphJudgeInsert(*compute_graph);
  174. if (ret != SUCCESS) {
  175. GELOGE(ret, "[OptimizeOriginalGraphJudgeInsert]: graph optimize failed, ret:%d", ret);
  176. return ret;
  177. }
  178. }
  179. }
  180. return ret;
  181. }
  182. Status GraphOptimize::OptimizeOriginalGraphForQuantize(ComputeGraphPtr &compute_graph) {
  183. if (compute_graph == nullptr) {
  184. GELOGE(GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL, "[OptimizeOriginalGraph]: compute_graph is nullptr.");
  185. return GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL;
  186. }
  187. std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
  188. if (instance_ptr == nullptr || !instance_ptr->InitFlag()) {
  189. GELOGE(GE_CLI_GE_NOT_INITIALIZED, "OptimizeOriginalGraph failed.");
  190. return GE_CLI_GE_NOT_INITIALIZED;
  191. }
  192. auto graph_optimizer = instance_ptr->OpsKernelManagerObj().GetAllGraphOptimizerObjsByPriority();
  193. GELOGI("optimize by opskernel in original graph optimize quantize phase. num of graph_optimizer is %zu.",
  194. graph_optimizer.size());
  195. Status ret = SUCCESS;
  196. string exclude_core_Type = (core_type_ == kVectorCore) ? kAicoreEngine : kVectorEngine;
  197. GELOGD("[OptimizeOriginalGraphForQuantize]: engine type will exclude: %s", exclude_core_Type.c_str());
  198. if (graph_optimizer.size() != 0) {
  199. for (auto iter = graph_optimizer.begin(); iter != graph_optimizer.end(); ++iter) {
  200. if (iter->first == exclude_core_Type || iter->second == nullptr) {
  201. continue;
  202. }
  203. ret = iter->second->OptimizeGraphPrepare(*compute_graph);
  204. if (ret != SUCCESS) {
  205. GELOGE(ret, "[OptimizeOriginalGraphForQuantize]: graph optimize failed, ret:%u", ret);
  206. return ret;
  207. }
  208. }
  209. }
  210. return ret;
  211. }
  212. Status GraphOptimize::OptimizeGraphBeforeBuildForRts(ComputeGraphPtr &compute_graph) {
  213. if (compute_graph == nullptr) {
  214. GELOGE(GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL, "[OptimizeGraphBeforeBuildForRts]: compute_graph is nullptr.");
  215. return GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL;
  216. }
  217. std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
  218. if (instance_ptr == nullptr || !instance_ptr->InitFlag()) {
  219. GELOGE(GE_CLI_GE_NOT_INITIALIZED, "OptimizeGraphBeforeBuildForRts failed.");
  220. return GE_CLI_GE_NOT_INITIALIZED;
  221. }
  222. auto graph_optimizer = instance_ptr->OpsKernelManagerObj().GetAllGraphOptimizerObjsByPriority();
  223. GELOGI("optimize by opskernel in graph optimize before build phase. num of graph_optimizer is %zu.",
  224. graph_optimizer.size());
  225. Status ret = SUCCESS;
  226. string exclude_core_Type = (core_type_ == kVectorCore) ? kAicoreEngine : kVectorEngine;
  227. GELOGI("[OptimizeGraphBeforeBuildForRts]: engine type will exclude: %s, core_type_: %s",
  228. exclude_core_Type.c_str(), core_type_.c_str());
  229. if (graph_optimizer.size() != 0) {
  230. for (auto iter = graph_optimizer.begin(); iter != graph_optimizer.end(); ++iter) {
  231. if (iter->first == exclude_core_Type || iter->second == nullptr) {
  232. continue;
  233. }
  234. ret = iter->second->OptimizeGraphBeforeBuild(*compute_graph);
  235. if (ret != SUCCESS) {
  236. GELOGE(ret, "[OptimizeGraphBeforeBuildForRts]: graph optimize failed, ret:%u", ret);
  237. return ret;
  238. }
  239. }
  240. }
  241. return ret;
  242. }
  243. Status GraphOptimize::SetOptions(const ge::GraphManagerOptions &options) {
  244. if (options.framework_type >= static_cast<int32_t>(domi::FrameworkType::FRAMEWORK_RESERVED)) {
  245. GELOGE(GE_GRAPH_OPTIONS_INVALID, "Optimize Type %d invalid.", options.framework_type);
  246. return GE_GRAPH_OPTIONS_INVALID;
  247. }
  248. optimize_type_ = static_cast<domi::FrameworkType>(options.framework_type);
  249. cal_config_ = options.calibration_conf_file;
  250. insert_op_config_ = options.insert_op_file;
  251. train_graph_flag_ = options.train_graph_flag;
  252. local_fmk_op_flag_ = options.local_fmk_op_flag;
  253. func_bin_path_ = options.func_bin_path;
  254. core_type_ = options.core_type;
  255. build_mode_ = options.build_mode;
  256. build_step_ = options.build_step;
  257. return SUCCESS;
  258. }
  259. void GraphOptimize::TranFrameOp(ComputeGraphPtr &compute_graph) {
  260. GE_CHECK_NOTNULL_JUST_RETURN(compute_graph);
  261. vector<string> local_framework_op_vec = {
  262. "TensorDataset", "QueueDataset", "DeviceQueueDataset", "ParallelMapDataset", "BatchDatasetV2",
  263. "IteratorV2", "MakeIterator", "IteratorGetNext", "FilterDataset", "MapAndBatchDatasetV2"};
  264. for (auto &nodePtr : compute_graph->GetAllNodes()) {
  265. OpDescPtr op = nodePtr->GetOpDesc();
  266. GE_IF_BOOL_EXEC(op == nullptr, GELOGW("op is nullptr!"); continue);
  267. // fwkop black-white sheet
  268. vector<string>::iterator iter =
  269. std::find(local_framework_op_vec.begin(), local_framework_op_vec.end(), op->GetType());
  270. if (iter != local_framework_op_vec.end()) {
  271. // set - original_type
  272. if (!AttrUtils::SetStr(op, ATTR_NAME_FRAMEWORK_ORIGINAL_TYPE, op->GetType())) {
  273. GELOGW("TranFrameOp SetStr ATTR_NAME_FRAMEWORK_ORIGINAL_TYPE failed");
  274. }
  275. // set - framework_type
  276. // [No need to verify return value]
  277. op->SetType("FrameworkOp");
  278. if (!AttrUtils::SetInt(op, ATTR_NAME_FRAMEWORK_FWK_TYPE, domi::FrameworkType::TENSORFLOW)) {
  279. GELOGW("TranFrameOp SetInt ATTR_NAME_FRAMEWORK_FWK_TYPE failed");
  280. }
  281. }
  282. }
  283. }
  284. Status GraphOptimize::IdentifyReference(ComputeGraphPtr &compute_graph) {
  285. for (auto &node : compute_graph->GetAllNodes()) {
  286. GE_CHECK_NOTNULL(node);
  287. auto op_desc = node->GetOpDesc();
  288. GE_CHECK_NOTNULL(op_desc);
  289. auto input_name_index = op_desc->GetAllInputName();
  290. bool is_ref = false;
  291. for (const auto &name_index : input_name_index) {
  292. const int out_index = op_desc->GetOutputIndexByName(name_index.first);
  293. if (out_index != -1) {
  294. auto input_desc = op_desc->GetInputDesc(name_index.second);
  295. input_desc.SetRefPortByIndex({name_index.second});
  296. op_desc->UpdateInputDesc(name_index.second, input_desc);
  297. GELOGI("SetRefPort: set op[%s] input desc[%u-%s] ref.",
  298. op_desc->GetName().c_str(), name_index.second, name_index.first.c_str());
  299. auto output_desc = op_desc->GetOutputDesc(static_cast<uint32_t>(out_index));
  300. output_desc.SetRefPortByIndex({name_index.second});
  301. op_desc->UpdateOutputDesc(static_cast<uint32_t>(out_index), output_desc);
  302. GELOGI("SetRefPort: set op[%s] output desc[%u-%s] ref.",
  303. op_desc->GetName().c_str(), out_index, name_index.first.c_str());
  304. is_ref = true;
  305. }
  306. }
  307. if (is_ref) {
  308. AttrUtils::SetBool(op_desc, ATTR_NAME_REFERENCE, is_ref);
  309. GELOGI("param [node] %s is reference node, set attribute %s to be true.",
  310. node->GetName().c_str(), ATTR_NAME_REFERENCE.c_str());
  311. }
  312. }
  313. return SUCCESS;
  314. }
  315. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示