You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

permute_pass.cc 5.8 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/passes/permute_pass.h"
  17. #include <queue>
  18. #include <vector>
  19. #include "common/debug/log.h"
  20. #include "common/types.h"
  21. #include "graph/utils/attr_utils.h"
  22. #include "graph/utils/op_desc_utils.h"
  23. #include "inc/kernel.h"
  24. #include "inc/kernel_factory.h"
  25. #include "framework/omg/omg_inner_types.h"
  26. #include "graph/common/local_context.h"
  27. using domi::DOMI_TENSOR_ND;
  28. using domi::DOMI_TENSOR_NHWC;
  29. using domi::SUCCESS;
  30. using domi::TENSORFLOW;
  31. namespace ge {
  32. Status PermutePass::Run(ComputeGraphPtr graph) {
  33. GE_CHECK_NOTNULL(graph);
  34. std::vector<NodePtr> isolate_nodes;
  35. for (NodePtr &node : graph->GetDirectNode()) {
  36. OpDescPtr op_desc_ptr = node->GetOpDesc();
  37. GE_CHECK_NOTNULL(op_desc_ptr);
  38. GE_IF_BOOL_EXEC(
  39. op_desc_ptr->GetType() == PERMUTE && GetLocalOmgContext().type == domi::TENSORFLOW,
  40. /// Input format 5D means NHWC in 4D way. So if input origin foramt is NCHW and
  41. /// permute paramter list is [0,3,1,2], this permute can be optimised.
  42. GE_IF_BOOL_EXEC(
  43. GetLocalOmgContext().format != DOMI_TENSOR_ND,
  44. // Get input origin foramt
  45. for (NodePtr &n
  46. : graph->GetDirectNode()) {
  47. GE_IF_BOOL_EXEC(
  48. n->GetOpDesc()->GetType() == PERMUTE, std::queue<NodePtr> q_node; q_node.push(n); bool jump_out = false;
  49. while (!q_node.empty()) {
  50. NodePtr n_temp = q_node.back();
  51. q_node.pop();
  52. for (auto &inNode : n_temp->GetInDataNodes()) {
  53. int64_t cur_format = 0;
  54. GE_IF_BOOL_EXEC(AttrUtils::GetInt(inNode->GetOpDesc(), ATTR_NAME_FORMAT, cur_format),
  55. GE_IF_BOOL_EXEC(!AttrUtils::SetInt(n->GetOpDesc(), "permute_src_format", cur_format),
  56. GELOGW("set permute_src_format failed");
  57. continue);
  58. jump_out = true; break);
  59. q_node.push(inNode);
  60. }
  61. GE_IF_BOOL_EXEC(jump_out, break);
  62. });
  63. }
  64. int64_t permute_src_format = 0;
  65. GE_IF_BOOL_EXEC(!AttrUtils::GetInt(op_desc_ptr, "permute_src_format", permute_src_format), continue);
  66. // Get dim_index_
  67. std::vector<int64_t> index_list; GE_CHK_BOOL_RET_STATUS(
  68. AttrUtils::GetListInt(op_desc_ptr, PERMUTE_ATTR_ORDER, index_list), INTERNAL_ERROR, "get index list failed");
  69. size_t index_size = index_list.size(); GE_IF_BOOL_EXEC(index_size == 0, continue);
  70. GE_IF_BOOL_EXEC(index_size == 4 && (permute_src_format == DOMI_TENSOR_NHWC && index_list.at(0) == 0 &&
  71. index_list.at(1) == 3 && index_list.at(2) == 1 && index_list.at(3) == 2),
  72. isolate_nodes.push_back(node);
  73. continue);
  74. int64_t conv_format = 0; GE_IF_BOOL_EXEC(
  75. index_size == 4 &&
  76. (index_list.at(0) == 0 && index_list.at(1) == 2 && index_list.at(2) == 3 && index_list.at(3) == 1),
  77. GE_IF_BOOL_EXEC(
  78. (node->GetOutDataNodesSize() > 0 && node->GetOutDataNodes().at(0) != nullptr &&
  79. node->GetOutDataNodes().at(0)->GetOpDesc() != nullptr) &&
  80. ((node->GetOutDataNodesSize() != 0 &&
  81. CONVOLUTION == node->GetOutDataNodes().at(0)->GetOpDesc()->GetType() &&
  82. AttrUtils::GetInt(node->GetOutDataNodes().at(0)->GetOpDesc(), ATTR_NAME_FORMAT, conv_format) &&
  83. conv_format == DOMI_TENSOR_NHWC) ||
  84. (node->GetOutDataNodesSize() != 0 &&
  85. node->GetOutDataNodes().at(0)->GetOpDesc()->GetType() == DEPCONVOLUTION) ||
  86. (node->GetOutDataNodesSize() != 0 &&
  87. node->GetOutDataNodes().at(0)->GetOpDesc()->GetType() == DECONVOLUTION) ||
  88. (node->GetOutDataNodesSize() != 0 && node->GetOutDataNodes().at(0)->GetOpDesc()->GetType() == PAD &&
  89. node->GetOutDataNodes().at(0)->GetOutDataNodesSize() != 0 &&
  90. node->GetOutDataNodes().at(0)->GetOutDataNodes().at(0) != nullptr &&
  91. node->GetOutDataNodes().at(0)->GetOutDataNodes().at(0)->GetOpDesc() != nullptr &&
  92. node->GetOutDataNodes().at(0)->GetOutDataNodes().at(0)->GetOpDesc()->GetType() == CONVOLUTION)),
  93. isolate_nodes.push_back(node);
  94. continue););););
  95. }
  96. GE_IF_BOOL_EXEC(
  97. isolate_nodes.size() != 0, for (auto &node
  98. : isolate_nodes) {
  99. // Adding an attribute indicates that the predecessor Permute has been deleted for the Builder to process.
  100. for (auto &outNode : node->GetOutDataNodes()) {
  101. OpDescPtr op_desc_ptr = outNode->GetOpDesc();
  102. GE_CHECK_NOTNULL(op_desc_ptr);
  103. if (!AttrUtils::SetBool(op_desc_ptr, ATTR_NAME_PRED_PERMUTE_DELETED, true)) {
  104. GELOGE(INTERNAL_ERROR, "set ATTR_NAME_PRED_PERMUTE_DELETED failed");
  105. return INTERNAL_ERROR;
  106. }
  107. }
  108. GE_RETURN_WITH_LOG_IF_ERROR(graph->RemoveNode(node), "[%s]:remove permute node failed",
  109. node->GetOpDesc()->GetName().c_str());
  110. });
  111. return SUCCESS;
  112. }
  113. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示