You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

graph_optimize.cc 22 kB

5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
3 years ago
5 years ago
4 years ago
4 years ago
5 years ago
3 years ago
5 years ago
4 years ago
4 years ago
5 years ago
3 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
3 years ago
5 years ago
4 years ago
5 years ago
3 years ago
5 years ago
3 years ago
4 years ago
5 years ago
3 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
3 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/optimize/graph_optimize.h"
  17. #include "graph/ge_context.h"
  18. #include "graph/passes/dimension_adjust_pass.h"
  19. #include "inc/pass_manager.h"
  20. #include "init/gelib.h"
  21. #include "graph/partition/engine_place.h"
  22. namespace {
  23. const char *const kVectorCore = "VectorCore";
  24. const char *const kVectorEngine = "VectorEngine";
  25. const char *const kAicoreEngine = "AIcoreEngine";
  26. const char *const kHostCpuEngine = "DNN_VM_HOST_CPU";
  27. } // namespace
  28. namespace ge {
  29. GraphOptimize::GraphOptimize()
  30. : optimize_type_(domi::FrameworkType::TENSORFLOW),
  31. cal_config_(""),
  32. insert_op_config_(""),
  33. core_type_("") {}
  34. void AddNodeInputProperty(ComputeGraphPtr &compute_graph) {
  35. if (compute_graph == nullptr) {
  36. REPORT_INNER_ERROR("E19999", "Param compute_graph is nullptr, check invalid");
  37. GELOGE(GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL, "[Check][Param] compute_graph is nullptr.");
  38. return;
  39. }
  40. for (ge::NodePtr &node : compute_graph->GetDirectNode()) {
  41. auto node_op_desc = node->GetOpDesc();
  42. GE_IF_BOOL_EXEC(node_op_desc == nullptr, GELOGW("node_op_desc is nullptr!"); return );
  43. auto in_control_anchor = node->GetInControlAnchor();
  44. vector<string> src_name_list;
  45. vector<string> input_name_list;
  46. vector<int64_t> src_index_list;
  47. GE_IF_BOOL_EXEC(
  48. in_control_anchor != nullptr, string src_name_temp; for (auto &out_control_anchor
  49. : in_control_anchor->GetPeerOutControlAnchors()) {
  50. ge::NodePtr src_node = out_control_anchor->GetOwnerNode();
  51. GE_IF_BOOL_EXEC(src_node == nullptr, GELOGW("src_node is nullptr!"); continue);
  52. src_name_temp = src_name_temp == "" ? src_node->GetName() : src_name_temp + ":" + src_node->GetName();
  53. } GE_IF_BOOL_EXEC(src_name_temp != "", src_name_list.emplace_back(src_name_temp);
  54. node_op_desc->SetSrcName(src_name_list);))
  55. for (auto &in_data_anchor : node->GetAllInDataAnchors()) {
  56. auto peer_out_anchor = in_data_anchor->GetPeerOutAnchor();
  57. GE_IF_BOOL_EXEC(peer_out_anchor == nullptr, continue);
  58. ge::NodePtr src_node = peer_out_anchor->GetOwnerNode();
  59. src_index_list = node_op_desc->GetSrcIndex();
  60. src_name_list.emplace_back(src_node->GetName());
  61. src_index_list.emplace_back(peer_out_anchor->GetIdx());
  62. node_op_desc->SetSrcName(src_name_list);
  63. node_op_desc->SetSrcIndex(src_index_list);
  64. GE_IF_BOOL_EXEC(!(node_op_desc->GetType() == NETOUTPUT && GetLocalOmgContext().type == domi::TENSORFLOW),
  65. ge::NodePtr peer_owner_node = peer_out_anchor->GetOwnerNode();
  66. input_name_list.emplace_back(
  67. peer_owner_node->GetName() +
  68. (peer_out_anchor->GetIdx() == 0 ? "" : ": " + to_string(peer_out_anchor->GetIdx())));
  69. node_op_desc->SetInputName(input_name_list);)
  70. }
  71. }
  72. }
  73. Status GraphOptimize::OptimizeSubGraph(ComputeGraphPtr &compute_graph, const std::string &engine_name) {
  74. if (compute_graph == nullptr) {
  75. REPORT_INNER_ERROR("E19999", "Param compute_graph is nullptr, check invalid");
  76. GELOGE(GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL, "[Check][Param] compute_graph is nullptr.");
  77. return GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL;
  78. }
  79. vector<GraphOptimizerPtr> graph_optimizer;
  80. if (DNNEngineManager::GetInstance().IsEngineRegistered(engine_name)) {
  81. OpsKernelManager::GetInstance().GetGraphOptimizerByEngine(engine_name, graph_optimizer);
  82. AddNodeInputProperty(compute_graph);
  83. if (compute_graph->GetDirectNode().size() == 0) {
  84. GELOGW("[OptimizeSubGraph] compute_graph do not has any node.");
  85. return SUCCESS;
  86. }
  87. if (build_mode_ == BUILD_MODE_TUNING && (build_step_ == BUILD_STEP_AFTER_UB_MATCH
  88. || build_step_ == BUILD_STEP_AFTER_MERGE)) {
  89. for (auto iter = graph_optimizer.begin(); iter != graph_optimizer.end(); ++iter) {
  90. Status ret = (*iter)->OptimizeFusedGraphAfterGraphSlice(*(compute_graph));
  91. if (ret != SUCCESS) {
  92. REPORT_INNER_ERROR("E19999", "Call OptimizeFusedGraphAfterGraphSlice failed, ret:%d, engine_name:%s, "
  93. "graph_name:%s", ret, engine_name.c_str(),
  94. compute_graph->GetName().c_str());
  95. GELOGE(ret, "[Call][OptimizeFusedGraphAfterGraphSlice] failed, ret:%d, engine_name:%s, graph_name:%s",
  96. ret, engine_name.c_str(), compute_graph->GetName().c_str());
  97. return ret;
  98. }
  99. }
  100. return SUCCESS;
  101. }
  102. for (auto iter = graph_optimizer.begin(); iter != graph_optimizer.end(); ++iter) {
  103. Status ret = (*iter)->OptimizeFusedGraph(*(compute_graph));
  104. if (ret != SUCCESS) {
  105. REPORT_INNER_ERROR("E19999", "Call OptimizeFusedGraph failed, ret:%d, engine_name:%s, "
  106. "graph_name:%s", ret, engine_name.c_str(),
  107. compute_graph->GetName().c_str());
  108. GELOGE(ret, "[Optimize][FusedGraph] failed, ret:%d, engine_name:%s, graph_name:%s",
  109. ret, engine_name.c_str(), compute_graph->GetName().c_str());
  110. return ret;
  111. }
  112. }
  113. } else {
  114. GELOGI("Engine: %s is not registered. do nothing in subGraph Optimize by ATC.", engine_name.c_str());
  115. }
  116. return SUCCESS;
  117. }
  118. Status GraphOptimize::OptimizeOriginalGraph(ComputeGraphPtr &compute_graph) {
  119. if (compute_graph == nullptr) {
  120. REPORT_INNER_ERROR("E19999", "Param compute_graph is nullptr, check invalid");
  121. GELOGE(GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL, "[Check][Param] compute_graph is nullptr.");
  122. return GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL;
  123. }
  124. Status ret = SUCCESS;
  125. std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
  126. if (instance_ptr == nullptr || !instance_ptr->InitFlag()) {
  127. REPORT_INNER_ERROR("E19999", "Gelib not init before, check invalid, graph:%s.",
  128. compute_graph->GetName().c_str());
  129. GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Get][GELib] Gelib not init before, graph:%s.",
  130. compute_graph->GetName().c_str());
  131. return GE_CLI_GE_NOT_INITIALIZED;
  132. }
  133. auto graph_optimizer = instance_ptr->OpsKernelManagerObj().GetAllGraphOptimizerObjsByPriority();
  134. GELOGI("optimize by opskernel in original graph optimize phase. num of graph_optimizer is %zu.",
  135. graph_optimizer.size());
  136. string exclude_core_Type = (core_type_ == kVectorCore) ? kAicoreEngine : kVectorEngine;
  137. GELOGD("[OptimizeOriginalGraph]: engine type will exclude: %s", exclude_core_Type.c_str());
  138. if (graph_optimizer.size() != 0) {
  139. for (auto iter = graph_optimizer.begin(); iter != graph_optimizer.end(); ++iter) {
  140. if (iter->first == exclude_core_Type) {
  141. continue;
  142. }
  143. if (GetContext().GetHostExecFlag() && iter->first != kHostCpuEngine) {
  144. // graph exec on host, no need OptimizeOriginalGraph for other engine.
  145. continue;
  146. }
  147. ret = (iter->second)->OptimizeOriginalGraph(*compute_graph);
  148. if (ret != SUCCESS) {
  149. REPORT_INNER_ERROR("E19999", "Call OptimizeOriginalGraph failed, ret:%d, engine_name:%s, "
  150. "graph_name:%s", ret, iter->first.c_str(),
  151. compute_graph->GetName().c_str());
  152. GELOGE(ret, "[Optimize][OriginalGraph] failed, ret:%d, engine_name:%s, graph_name:%s",
  153. ret, iter->first.c_str(), compute_graph->GetName().c_str());
  154. return ret;
  155. }
  156. }
  157. }
  158. return ret;
  159. }
  160. Status GraphOptimize::OptimizeOriginalGraphJudgeInsert(ComputeGraphPtr &compute_graph) {
  161. GELOGD("OptimizeOriginalGraphJudgeInsert in");
  162. GE_CHECK_NOTNULL(compute_graph);
  163. Status ret = SUCCESS;
  164. std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
  165. if (instance_ptr == nullptr || !instance_ptr->InitFlag()) {
  166. REPORT_INNER_ERROR("E19999", "Gelib not init before, check invalid, graph:%s",
  167. compute_graph->GetName().c_str());
  168. GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Get][GELib] Gelib not init before, graph:%s",
  169. compute_graph->GetName().c_str());
  170. return GE_CLI_GE_NOT_INITIALIZED;
  171. }
  172. auto graph_optimizer = instance_ptr->OpsKernelManagerObj().GetAllGraphOptimizerObjsByPriority();
  173. GELOGI("optimize by opskernel in judging insert phase. num of graph_optimizer is %zu.",
  174. graph_optimizer.size());
  175. string exclude_core_Type = (core_type_ == kVectorCore) ? kAicoreEngine : kVectorEngine;
  176. if (graph_optimizer.size() != 0) {
  177. for (auto iter = graph_optimizer.begin(); iter != graph_optimizer.end(); ++iter) {
  178. if (iter->first == exclude_core_Type) {
  179. GELOGI("[OptimizeOriginalGraphJudgeInsert]: engine type will exclude: %s", exclude_core_Type.c_str());
  180. continue;
  181. }
  182. if (GetContext().GetHostExecFlag() && iter->first != kHostCpuEngine) {
  183. // graph exec on host, no need OptimizeOriginalGraphJudgeInsert for other engine.
  184. continue;
  185. }
  186. GELOGI("Begin to refine running format by engine %s", iter->first.c_str());
  187. ret = (iter->second)->OptimizeOriginalGraphJudgeInsert(*compute_graph);
  188. if (ret != SUCCESS) {
  189. REPORT_INNER_ERROR("E19999", "Call OptimizeOriginalGraphJudgeInsert failed, ret:%d, engine_name:%s, "
  190. "graph_name:%s", ret, iter->first.c_str(),
  191. compute_graph->GetName().c_str());
  192. GELOGE(ret, "[Call][OptimizeOriginalGraphJudgeInsert] failed, ret:%d, engine_name:%s, graph_name:%s",
  193. ret, iter->first.c_str(), compute_graph->GetName().c_str());
  194. return ret;
  195. }
  196. }
  197. }
  198. return ret;
  199. }
  200. Status GraphOptimize::OptimizeOriginalGraphForQuantize(ComputeGraphPtr &compute_graph) {
  201. if (compute_graph == nullptr) {
  202. REPORT_INNER_ERROR("E19999", "Param compute_graph is nullptr, check invalid");
  203. GELOGE(GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL, "[Check][Param] compute_graph is nullptr.");
  204. return GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL;
  205. }
  206. std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
  207. if (instance_ptr == nullptr || !instance_ptr->InitFlag()) {
  208. REPORT_INNER_ERROR("E19999", "Gelib not init before, check invalid, graph:%s.",
  209. compute_graph->GetName().c_str());
  210. GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Get][Gelib] Gelib not init before, graph:%s.",
  211. compute_graph->GetName().c_str());
  212. return GE_CLI_GE_NOT_INITIALIZED;
  213. }
  214. auto graph_optimizer = instance_ptr->OpsKernelManagerObj().GetAllGraphOptimizerObjsByPriority();
  215. GELOGI("optimize by opskernel in original graph optimize quantize phase. num of graph_optimizer is %zu.",
  216. graph_optimizer.size());
  217. Status ret = SUCCESS;
  218. string exclude_core_Type = (core_type_ == kVectorCore) ? kAicoreEngine : kVectorEngine;
  219. GELOGD("[OptimizeOriginalGraphForQuantize]: engine type will exclude: %s", exclude_core_Type.c_str());
  220. if (graph_optimizer.size() != 0) {
  221. for (auto iter = graph_optimizer.begin(); iter != graph_optimizer.end(); ++iter) {
  222. if (iter->first == exclude_core_Type || iter->second == nullptr) {
  223. continue;
  224. }
  225. ret = iter->second->OptimizeGraphPrepare(*compute_graph);
  226. if (ret != SUCCESS) {
  227. REPORT_INNER_ERROR("E19999", "Call OptimizeGraphPrepare failed, ret:%d, engine_name:%s, "
  228. "graph_name:%s", ret, iter->first.c_str(),
  229. compute_graph->GetName().c_str());
  230. GELOGE(ret, "[Call][OptimizeGraphPrepare] failed, ret:%d, engine_name:%s, graph_name:%s",
  231. ret, iter->first.c_str(), compute_graph->GetName().c_str());
  232. return ret;
  233. }
  234. }
  235. }
  236. return ret;
  237. }
  238. Status GraphOptimize::OptimizeGraphBeforeBuild(ComputeGraphPtr &compute_graph) {
  239. if (compute_graph == nullptr) {
  240. REPORT_INNER_ERROR("E19999", "Param compute_graph is nullptr, check invalid");
  241. GELOGE(GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL, "[Check][Param] compute_graph is nullptr.");
  242. return GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL;
  243. }
  244. EnginePlacer engine_place(compute_graph);
  245. Status ret = engine_place.Run();
  246. if (ret != SUCCESS) {
  247. REPORT_CALL_ERROR("E19999", "Assign atomic engine for graph %s failed", compute_graph->GetName().c_str());
  248. GELOGE(ret, "[Assign][Engine] Assign atomic engine for graph %s failed", compute_graph->GetName().c_str());
  249. return ret;
  250. }
  251. ret = engine_place.AssignCompositeEngine();
  252. if (ret != SUCCESS) {
  253. REPORT_CALL_ERROR("E19999", "Assign composite engine for graph %s failed", compute_graph->GetName().c_str());
  254. GELOGE(ret, "[Assign][Engine] Assign composite engine for graph %s failed", compute_graph->GetName().c_str());
  255. return ret;
  256. }
  257. auto graph_optimizer = OpsKernelManager::GetInstance().GetAllGraphOptimizerObjsByPriority();
  258. GELOGD("optimize by opskernel in graph optimize before build phase. num of graph_optimizer is %zu.",
  259. graph_optimizer.size());
  260. string exclude_core_Type = (core_type_ == kVectorCore) ? kAicoreEngine : kVectorEngine;
  261. GELOGD("[OptimizeGraphBeforeBuild]: engine type will exclude: %s, core_type_: %s",
  262. exclude_core_Type.c_str(), core_type_.c_str());
  263. if (graph_optimizer.size() != 0) {
  264. for (auto iter = graph_optimizer.begin(); iter != graph_optimizer.end(); ++iter) {
  265. if (iter->first == exclude_core_Type || iter->second == nullptr) {
  266. continue;
  267. }
  268. ret = iter->second->OptimizeGraphBeforeBuild(*compute_graph);
  269. if (ret != SUCCESS) {
  270. REPORT_INNER_ERROR("E19999", "Call OptimizeGraphBeforeBuild failed, ret:%d, engine_name:%s, "
  271. "graph_name:%s", ret, iter->first.c_str(),
  272. compute_graph->GetName().c_str());
  273. GELOGE(ret, "[Call][OptimizeGraphBeforeBuild] failed, ret:%d, engine_name:%s, graph_name:%s",
  274. ret, iter->first.c_str(), compute_graph->GetName().c_str());
  275. return ret;
  276. }
  277. }
  278. }
  279. return SUCCESS;
  280. }
  281. Status GraphOptimize::OptimizeAfterStage1(ComputeGraphPtr &compute_graph) {
  282. GE_CHECK_NOTNULL(compute_graph);
  283. GELOGD("OptimizeAfterStage1 in");
  284. if (GetContext().GetHostExecFlag()) {
  285. // graph exec on host, no need OptimizeAfterStage1
  286. return SUCCESS;
  287. }
  288. Status ret = SUCCESS;
  289. std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
  290. if (instance_ptr == nullptr || !instance_ptr->InitFlag()) {
  291. REPORT_INNER_ERROR("E19999", "Gelib not init before, check invalid");
  292. GELOGE(GE_CLI_GE_NOT_INITIALIZED, "OptimizeAfterStage1 failed.");
  293. return GE_CLI_GE_NOT_INITIALIZED;
  294. }
  295. auto graph_optimizer = instance_ptr->OpsKernelManagerObj().GetAllGraphOptimizerObjsByPriority();
  296. GELOGI("Optimize by ops kernel in after stage1 phase, num of graph_optimizer is %zu.", graph_optimizer.size());
  297. string exclude_core_type = (core_type_ == kVectorCore) ? kAicoreEngine : kVectorEngine;
  298. if (graph_optimizer.size() != 0) {
  299. for (auto iter = graph_optimizer.begin(); iter != graph_optimizer.end(); ++iter) {
  300. if (iter->first == exclude_core_type) {
  301. GELOGI("[OptimizeAfterStage1]: engine type will exclude:%s.", exclude_core_type.c_str());
  302. continue;
  303. }
  304. GELOGI("Begin to optimize graph after stage1 by engine %s.", iter->first.c_str());
  305. ret = (iter->second)->OptimizeAfterStage1(*compute_graph);
  306. if (ret != SUCCESS) {
  307. REPORT_INNER_ERROR("E19999", "Call OptimizeAfterStage1 failed, ret:%d, engine_name:%s, "
  308. "graph_name:%s.", ret, iter->first.c_str(), compute_graph->GetName().c_str());
  309. GELOGE(ret, "[OptimizeAfterStage1]: graph optimize failed, ret:%d.", ret);
  310. return ret;
  311. }
  312. }
  313. }
  314. return ret;
  315. }
  316. Status GraphOptimize::SetOptions(const ge::GraphManagerOptions &options) {
  317. if (options.framework_type >= static_cast<int32_t>(domi::FrameworkType::FRAMEWORK_RESERVED)) {
  318. REPORT_INNER_ERROR("E19999", "Param framework_type:%d in option check invalid",
  319. options.framework_type);
  320. GELOGE(GE_GRAPH_OPTIONS_INVALID, "Optimize Type %d invalid.", options.framework_type);
  321. return GE_GRAPH_OPTIONS_INVALID;
  322. }
  323. optimize_type_ = static_cast<domi::FrameworkType>(options.framework_type);
  324. cal_config_ = options.calibration_conf_file;
  325. insert_op_config_ = options.insert_op_file;
  326. train_graph_flag_ = options.train_graph_flag;
  327. local_fmk_op_flag_ = options.local_fmk_op_flag;
  328. func_bin_path_ = options.func_bin_path;
  329. core_type_ = options.core_type;
  330. build_mode_ = options.build_mode;
  331. build_step_ = options.build_step;
  332. return SUCCESS;
  333. }
  334. void GraphOptimize::TranFrameOp(ComputeGraphPtr &compute_graph) {
  335. GE_CHECK_NOTNULL_JUST_RETURN(compute_graph);
  336. vector<string> local_framework_op_vec = {
  337. "TensorDataset", "QueueDataset", "DeviceQueueDataset", "ParallelMapDataset", "BatchDatasetV2",
  338. "IteratorV2", "MakeIterator", "IteratorGetNext", "FilterDataset", "MapAndBatchDatasetV2"};
  339. for (auto &nodePtr : compute_graph->GetAllNodes()) {
  340. OpDescPtr op = nodePtr->GetOpDesc();
  341. GE_IF_BOOL_EXEC(op == nullptr, GELOGW("op is nullptr!"); continue);
  342. // fwkop black-white sheet
  343. vector<string>::iterator iter =
  344. std::find(local_framework_op_vec.begin(), local_framework_op_vec.end(), op->GetType());
  345. if (iter != local_framework_op_vec.end()) {
  346. // set - original_type
  347. if (!AttrUtils::SetStr(op, ATTR_NAME_FRAMEWORK_ORIGINAL_TYPE, op->GetType())) {
  348. GELOGW("TranFrameOp SetStr ATTR_NAME_FRAMEWORK_ORIGINAL_TYPE failed");
  349. }
  350. // set - framework_type
  351. // [No need to verify return value]
  352. op->SetType("FrameworkOp");
  353. if (!AttrUtils::SetInt(op, ATTR_NAME_FRAMEWORK_FWK_TYPE, domi::FrameworkType::TENSORFLOW)) {
  354. GELOGW("TranFrameOp SetInt ATTR_NAME_FRAMEWORK_FWK_TYPE failed");
  355. }
  356. }
  357. }
  358. }
  359. Status GraphOptimize::IdentifyReference(ComputeGraphPtr &compute_graph) {
  360. for (auto &node : compute_graph->GetAllNodes()) {
  361. GE_CHECK_NOTNULL(node);
  362. auto op_desc = node->GetOpDesc();
  363. GE_CHECK_NOTNULL(op_desc);
  364. auto input_name_index = op_desc->GetAllInputName();
  365. bool is_ref = false;
  366. for (const auto &name_index : input_name_index) {
  367. const int out_index = op_desc->GetOutputIndexByName(name_index.first);
  368. if (out_index != -1) {
  369. auto input_desc = op_desc->GetInputDesc(name_index.second);
  370. input_desc.SetRefPortByIndex({name_index.second});
  371. op_desc->UpdateInputDesc(name_index.second, input_desc);
  372. GELOGI("SetRefPort: set op[%s] input desc[%u-%s] ref.",
  373. op_desc->GetName().c_str(), name_index.second, name_index.first.c_str());
  374. auto output_desc = op_desc->GetOutputDesc(static_cast<uint32_t>(out_index));
  375. output_desc.SetRefPortByIndex({name_index.second});
  376. op_desc->UpdateOutputDesc(static_cast<uint32_t>(out_index), output_desc);
  377. GELOGI("SetRefPort: set op[%s] output desc[%u-%s] ref.",
  378. op_desc->GetName().c_str(), out_index, name_index.first.c_str());
  379. is_ref = true;
  380. }
  381. }
  382. if (is_ref) {
  383. AttrUtils::SetBool(op_desc, ATTR_NAME_REFERENCE, is_ref);
  384. GELOGI("param [node] %s is reference node, set attribute %s to be true.",
  385. node->GetName().c_str(), ATTR_NAME_REFERENCE.c_str());
  386. }
  387. }
  388. return SUCCESS;
  389. }
  390. Status GraphOptimize::OptimizeWholeGraph(ComputeGraphPtr &compute_graph) {
  391. if (compute_graph == nullptr) {
  392. REPORT_INNER_ERROR("E19999", "Param compute_graph is nullptr, check invalid");
  393. GELOGE(GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL, "[Check][Param] compute_graph is nullptr.");
  394. return GE_GRAPH_OPTIMIZE_COMPUTE_GRAPH_NULL;
  395. }
  396. std::shared_ptr<GELib> instance_ptr = ge::GELib::GetInstance();
  397. if (instance_ptr == nullptr || !instance_ptr->InitFlag()) {
  398. REPORT_INNER_ERROR("E19999", "Gelib not init before, check invalid, graph:%s.",
  399. compute_graph->GetName().c_str());
  400. GELOGE(GE_CLI_GE_NOT_INITIALIZED, "[Get][GELib] Gelib not init before, graph:%s.",
  401. compute_graph->GetName().c_str());
  402. return GE_CLI_GE_NOT_INITIALIZED;
  403. }
  404. auto graph_optimizer = instance_ptr->OpsKernelManagerObj().GetAllGraphOptimizerObjsByPriority();
  405. GELOGI("optimize by opskernel in OptimizeWholeGraph. num of graph_optimizer is %zu.", graph_optimizer.size());
  406. Status ret = SUCCESS;
  407. string exclude_core_type = (core_type_ == kVectorCore) ? kAicoreEngine : kVectorEngine;
  408. GELOGD("[OptimizeWholeGraph]: engine type will exclude: %s", exclude_core_type.c_str());
  409. if (!graph_optimizer.empty()) {
  410. for (auto &iter : graph_optimizer) {
  411. if (iter.first == exclude_core_type || iter.second == nullptr) {
  412. continue;
  413. }
  414. GELOGI("Begin to optimize whole graph by engine %s", iter.first.c_str());
  415. ret = iter.second->OptimizeWholeGraph(*compute_graph);
  416. GE_DUMP(compute_graph, "OptimizeWholeGraph" + iter.first);
  417. if (ret != SUCCESS) {
  418. REPORT_INNER_ERROR("E19999", "Call OptimizeWholeGraph failed, ret:%d, engine_name:%s, "
  419. "graph_name:%s", ret, iter.first.c_str(),
  420. compute_graph->GetName().c_str());
  421. GELOGE(ret, "[Call][OptimizeWholeGraph] failed, ret:%d, engine_name:%s, graph_name:%s",
  422. ret, iter.first.c_str(), compute_graph->GetName().c_str());
  423. return ret;
  424. }
  425. }
  426. }
  427. return ret;
  428. }
  429. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示