You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

pass_utils.cc 12 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/passes/pass_utils.h"
  17. #include <climits>
  18. #include <memory>
  19. #include <queue>
  20. #include <string>
  21. #include <vector>
  22. #include "framework/common/debug/ge_log.h"
  23. #include "common/ge_inner_error_codes.h"
  24. #include "common/ge/ge_util.h"
  25. #include "common/op/ge_op_utils.h"
  26. #include "common/types.h"
  27. #include "graph/common/omg_util.h"
  28. #include "graph/debug/ge_attr_define.h"
  29. #include "graph/ge_tensor.h"
  30. #include "graph/manager/graph_var_manager.h"
  31. #include "graph/utils/graph_utils.h"
  32. #include "graph/utils/op_desc_utils.h"
  33. #include "graph/utils/tensor_utils.h"
  34. #include "graph/utils/type_utils.h"
  35. namespace ge {
  36. namespace {
  37. const uint32_t kShapeDimSize = 1;
  38. const uint32_t DIM_SIZE_TWO = 2;
  39. } // namespace
  40. Status PassUtils::ConstructTensorDescWithData(const GeTensorDesc &out_desc, std::vector<int64_t> &data,
  41. std::vector<GeTensorPtr> &v_output, const bool scalar_output) {
  42. Status ret = SUCCESS;
  43. const uint32_t dim_size = static_cast<uint32_t>(data.size());
  44. DataType data_type = out_desc.GetDataType();
  45. if (data_type == DT_INT32) {
  46. unique_ptr<int32_t[]> buf(new (std::nothrow) int32_t[dim_size]());
  47. if (buf == nullptr) {
  48. GELOGE(MEMALLOC_FAILED, "new failed");
  49. return MEMALLOC_FAILED;
  50. }
  51. for (uint32_t i = 0; i < dim_size; i++) {
  52. if (data[i] >= INT_MAX) {
  53. GELOGE(PARAM_INVALID, "int32 overflow, data[%u]:%ld", i, data[i]);
  54. return PARAM_INVALID;
  55. }
  56. buf[i] = static_cast<int32_t>(data[i]);
  57. }
  58. ret = ConstructTensorDescWithData(out_desc, buf.get(), dim_size, v_output, scalar_output);
  59. } else if (data_type == DT_INT64) {
  60. unique_ptr<int64_t[]> buf(new (std::nothrow) int64_t[dim_size]());
  61. if (buf == nullptr) {
  62. GELOGE(MEMALLOC_FAILED, "new failed");
  63. return MEMALLOC_FAILED;
  64. }
  65. for (uint32_t i = 0; i < dim_size; i++) {
  66. buf[i] = data[i];
  67. }
  68. ret = ConstructTensorDescWithData(out_desc, buf.get(), dim_size, v_output, scalar_output);
  69. } else {
  70. GELOGE(PARAM_INVALID, "Only support DT_INT32 and DT_INT64. data_type:%s",
  71. TypeUtils::DataTypeToSerialString(data_type).c_str());
  72. return PARAM_INVALID;
  73. }
  74. if (ret != SUCCESS) {
  75. GELOGE(ret, "GetShapeTensor failed.");
  76. return ret;
  77. }
  78. return SUCCESS;
  79. }
  80. template <typename T>
  81. Status PassUtils::ConstructTensorDescWithData(const GeTensorDesc &out_desc, T *buf, uint32_t len,
  82. std::vector<GeTensorPtr> &v_output, const bool scalar_output) {
  83. // construct TensorDesc
  84. GeShape out_shape = (scalar_output ? GeShape() : GeShape({len}));
  85. GeTensorDesc output_tensor_desc(out_desc);
  86. output_tensor_desc.SetShape(out_shape);
  87. GeTensorPtr output_tensor_ptr = MakeShared<GeTensor>(
  88. output_tensor_desc, reinterpret_cast<uint8_t *>(buf), sizeof(T) * len);
  89. if (output_tensor_ptr == nullptr) {
  90. GELOGE(MEMALLOC_FAILED, "Make shared failed");
  91. return MEMALLOC_FAILED;
  92. }
  93. v_output.push_back(output_tensor_ptr);
  94. return SUCCESS;
  95. }
  96. bool PassUtils::IsConstant(const ConstNodePtr &node) {
  97. if (node == nullptr) {
  98. GELOGE(PARAM_INVALID, "node is null");
  99. return false;
  100. }
  101. auto src_node_type = node->GetType();
  102. bool is_constant = (src_node_type == CONSTANT) || (src_node_type == CONSTANTOP);
  103. return is_constant;
  104. }
  105. Status PassUtils::SetOutNodeWeight(const OutDataAnchorPtr &out_data_anchor, const NodePtr &src_node) {
  106. GE_IF_BOOL_EXEC(src_node == nullptr, GELOGE(PARAM_INVALID, "src_node is null"); return PARAM_INVALID);
  107. if (!IsConstant(src_node)) {
  108. return SUCCESS;
  109. }
  110. auto weights = OpDescUtils::MutableWeights(src_node);
  111. if (weights.empty()) {
  112. return PARAM_INVALID;
  113. }
  114. auto weight = weights.at(0);
  115. auto src_in_ctrl = src_node->GetInControlAnchor();
  116. if ((src_in_ctrl == nullptr) || (out_data_anchor == nullptr)) {
  117. GELOGE(FAILED, "parameter is null.");
  118. return FAILED;
  119. }
  120. auto src_out_control_anchors = src_in_ctrl->GetPeerAnchors();
  121. for (const auto &dst_in_data : out_data_anchor->GetPeerInDataAnchors()) {
  122. auto dst_node = dst_in_data->GetOwnerNode();
  123. auto dst_op_desc = dst_node->GetOpDesc();
  124. if (dst_op_desc == nullptr) {
  125. continue;
  126. }
  127. std::vector<bool> is_input_const = dst_op_desc->GetIsInputConst();
  128. auto input_index = static_cast<size_t>(dst_in_data->GetIdx());
  129. if (input_index < is_input_const.size()) {
  130. is_input_const[input_index] = true;
  131. dst_op_desc->SetIsInputConst(is_input_const);
  132. }
  133. GE_CHK_STATUS_RET(GraphUtils::RemoveEdge(out_data_anchor, dst_in_data), "remove edge failed");
  134. graphStatus ret = OpDescUtils::AddConstOpToAnchor(dst_in_data, weight);
  135. if (ret != SUCCESS) {
  136. return ret;
  137. }
  138. GE_CHECK_NOTNULL(dst_in_data->GetPeerOutAnchor());
  139. auto dynamic_const_node = dst_in_data->GetPeerOutAnchor()->GetOwnerNode();
  140. GE_CHECK_NOTNULL(dynamic_const_node->GetOpDesc());
  141. dynamic_const_node->GetOpDesc()->SetType(src_node->GetType());
  142. // restore control inputs to dynamically added constant ops, if any
  143. for (const auto &src_out_control_anchor : src_out_control_anchors) {
  144. GE_CHK_STATUS_RET(GraphUtils::AddEdge(src_out_control_anchor, dynamic_const_node->GetInControlAnchor()),
  145. "add edge failed");
  146. }
  147. }
  148. /// Before:
  149. /// Op1 - - - > Constant ------> Switch - - - > Op2
  150. /// After:
  151. /// Op1 - - - > Op2
  152. for (const auto &dst_in_ctrl : out_data_anchor->GetPeerInControlAnchors()) {
  153. for (const auto &src_out_control_anchor : src_out_control_anchors) {
  154. GE_CHK_STATUS_RET(GraphUtils::AddEdge(src_out_control_anchor, dst_in_ctrl), "add edge failed");
  155. }
  156. }
  157. return SUCCESS;
  158. }
  159. Status PassUtils::RemoveBranch(const NodePtr &node, std::vector<NodePtr> &delete_nodes,
  160. std::vector<NodePtr> &end_nodes) {
  161. if (node == nullptr) {
  162. GELOGE(FAILED, "parameter is null.");
  163. return FAILED;
  164. }
  165. GELOGI("Remove branch starting from node %s", node->GetName().c_str());
  166. std::queue<NodePtr> search_queue;
  167. search_queue.push(node);
  168. while (!search_queue.empty()) {
  169. const NodePtr src_node = search_queue.front();
  170. if (src_node == nullptr) {
  171. continue;
  172. }
  173. delete_nodes.push_back(src_node);
  174. search_queue.pop();
  175. for (const auto &src_out_anchor : src_node->GetAllOutAnchors()) {
  176. for (const auto &dst_in_anchor : src_out_anchor->GetPeerAnchors()) {
  177. if (dst_in_anchor == nullptr) {
  178. continue;
  179. }
  180. auto dst_node = dst_in_anchor->GetOwnerNode();
  181. std::string node_type;
  182. GE_CHK_STATUS_RET(GetOriginalType(dst_node, node_type), "get original type failed");
  183. if (node_type == NETOUTPUT) {
  184. if (dst_in_anchor->IsTypeOf<InDataAnchor>()) {
  185. GELOGE(INTERNAL_ERROR,
  186. "[%s] Inactive branch connected to "
  187. "NetOutput with data anchor.",
  188. node->GetName().c_str());
  189. return INTERNAL_ERROR;
  190. } else {
  191. // safe to unlink control edges
  192. GE_CHK_STATUS_RET(GraphUtils::RemoveEdge(src_out_anchor, dst_in_anchor), "remove edge failed");
  193. end_nodes.push_back(dst_node);
  194. }
  195. } else if (node_type == MERGE) {
  196. /// Unlink connection between the inactive branch and Merge/NetOutput.
  197. /// The removal of inactive nodes will be handled in PrunePass
  198. GE_CHK_STATUS_RET(GraphUtils::RemoveEdge(src_out_anchor, dst_in_anchor), "remove edge failed");
  199. end_nodes.push_back(dst_node);
  200. GELOGD("Reach the end merge node %s, the branch removing stop", dst_node->GetName().c_str());
  201. } else {
  202. search_queue.push(dst_node);
  203. }
  204. }
  205. }
  206. }
  207. return SUCCESS;
  208. }
  209. NodePtr PassUtils::GetInDataNode(const ConstNodePtr &node, int index) {
  210. if (node == nullptr) {
  211. return nullptr;
  212. }
  213. auto in_data_anchor = node->GetInDataAnchor(index);
  214. if (in_data_anchor == nullptr) {
  215. return nullptr;
  216. }
  217. auto peer_out_data_anchor = in_data_anchor->GetPeerOutAnchor();
  218. if (peer_out_data_anchor == nullptr) {
  219. return nullptr;
  220. }
  221. auto src_node = peer_out_data_anchor->GetOwnerNode();
  222. return src_node;
  223. }
  224. bool PassUtils::IsNeedTrainIteFlowCtrl(const ComputeGraphPtr &compute_graph) {
  225. if (compute_graph == nullptr) {
  226. return false;
  227. }
  228. if (compute_graph->GetParentGraph() != nullptr) {
  229. GELOGI("Subgraph %s no need flow ctrl.", compute_graph->GetName().c_str());
  230. return false;
  231. }
  232. if (GraphUtils::IsUnknownShapeGraph(compute_graph)) {
  233. GELOGI("Unknown shape graph %s no need flow ctrl.", compute_graph->GetName().c_str());
  234. return false;
  235. }
  236. if (!ge::VarManager::Instance(compute_graph->GetSessionID())->IsVarExist(NODE_NAME_FLOWCTRL_LOOP_PER_ITER)) {
  237. return false;
  238. }
  239. return compute_graph->GetNeedIteration();
  240. }
  241. int PassUtils::GetUniqueInDataAnchorIndex(const NodePtr &node_ptr) {
  242. const int invalid_index = -1;
  243. if (node_ptr == nullptr) {
  244. GELOGE(INTERNAL_ERROR, "GetUniqueInDataAnchorIndex: node is null");
  245. return invalid_index;
  246. }
  247. for (const auto &in_anchor : node_ptr->GetAllInDataAnchors()) {
  248. if ((in_anchor != nullptr) && (in_anchor->GetPeerOutAnchor() != nullptr) &&
  249. (in_anchor->GetPeerOutAnchor()->GetOwnerNode() != nullptr)) {
  250. return (in_anchor->GetIdx());
  251. }
  252. }
  253. GELOGE(INTERNAL_ERROR,
  254. "GetUniqueInDataAnchorIndex: [%s] failed to find "
  255. "in data anchor with a valid peer out node",
  256. node_ptr->GetName().c_str());
  257. return invalid_index;
  258. }
  259. Status PassUtils::UnlinkNodeWithControlCopy(NodePtr &node, int index) {
  260. if (node == nullptr) {
  261. GELOGE(PARAM_INVALID, "node is null.");
  262. return PARAM_INVALID;
  263. }
  264. auto in_data_anchor = node->GetInDataAnchor(index);
  265. if (in_data_anchor == nullptr) {
  266. GELOGW("[%s] in_data_anchor is null with index [%d].", node->GetName().c_str(), index);
  267. return SUCCESS;
  268. }
  269. auto out_data_anchor = in_data_anchor->GetPeerOutAnchor();
  270. if (out_data_anchor == nullptr) {
  271. GELOGE(FAILED, "[%s] peer out_data_anchor is null with index [%d].", node->GetName().c_str(), index);
  272. return FAILED;
  273. }
  274. // Remove link between father_node and node
  275. in_data_anchor->UnlinkAll();
  276. auto father_node = out_data_anchor->GetOwnerNode();
  277. // link father_node's in control nodes to node
  278. if (GraphUtils::CopyInCtrlEdges(father_node, node) != GRAPH_SUCCESS) {
  279. return FAILED;
  280. }
  281. return SUCCESS;
  282. }
  283. Status PassUtils::RemoveInactiveBranchToMerge(const OutDataAnchorPtr &inactive_output_anchor,
  284. std::vector<NodePtr> &delete_nodes, std::vector<NodePtr> &end_nodes) {
  285. if (inactive_output_anchor == nullptr) {
  286. GELOGE(FAILED, "parameter is null.");
  287. return FAILED;
  288. }
  289. for (const auto &dst_anchor : inactive_output_anchor->GetPeerAnchors()) {
  290. if (dst_anchor == nullptr) {
  291. continue;
  292. }
  293. auto dst_node = dst_anchor->GetOwnerNode();
  294. if (dst_node != nullptr) {
  295. std::string dst_node_type;
  296. GE_CHK_STATUS_RET(GetOriginalType(dst_node, dst_node_type), "get original type failed");
  297. if (dst_node_type == MERGE) {
  298. GELOGD("[%s] Switch connected directly to Merge", inactive_output_anchor->GetOwnerNode()->GetName().c_str());
  299. GE_CHK_STATUS_RET(GraphUtils::RemoveEdge(inactive_output_anchor, dst_anchor), "remove edge failed");
  300. continue;
  301. }
  302. Status ret = PassUtils::RemoveBranch(dst_node, delete_nodes, end_nodes);
  303. if (ret != SUCCESS) {
  304. return ret;
  305. }
  306. }
  307. }
  308. return SUCCESS;
  309. }
  310. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示