You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

dynamic_shape_partition_unittest.cc 11 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <gtest/gtest.h>
  17. #define private public
  18. #define protected public
  19. #include "graph/partition/dynamic_shape_partition.h"
  20. #include "compute_graph.h"
  21. #include "graph/compute_graph_impl.h"
  22. #include "inc/framework/common/types.h"
  23. #include "utils/graph_utils.h"
  24. #include "graph/debug/ge_attr_define.h"
  25. #include "graph/common/omg_util.h"
  26. namespace ge {
  27. namespace {
  28. GeTensorDescPtr CreateTensorDesc(std::initializer_list<int64_t> shape, Format format = FORMAT_NCHW,
  29. DataType data_type = DT_FLOAT) {
  30. GeShape ge_shape{vector<int64_t>(shape)};
  31. GeTensorDescPtr tensor_desc = std::make_shared<GeTensorDesc>();
  32. tensor_desc->SetShape(ge_shape);
  33. tensor_desc->SetFormat(format);
  34. tensor_desc->SetDataType(data_type);
  35. return tensor_desc;
  36. }
  37. class NodeBuilder {
  38. public:
  39. NodeBuilder(const std::string &name, const std::string &type) { op_desc_ = std::make_shared<OpDesc>(name, type); }
  40. NodeBuilder &AddInputDesc(std::initializer_list<int64_t> shape = {1, 1, 224, 224}, Format format = FORMAT_NCHW,
  41. DataType data_type = DT_FLOAT) {
  42. op_desc_->AddInputDesc(CreateTensorDesc(shape, format, data_type)->Clone());
  43. return *this;
  44. }
  45. NodeBuilder &AddOutputDesc(std::initializer_list<int64_t> shape = {1, 1, 224, 224}, Format format = FORMAT_NCHW,
  46. DataType data_type = DT_FLOAT) {
  47. op_desc_->AddOutputDesc(CreateTensorDesc(shape, format, data_type)->Clone());
  48. return *this;
  49. }
  50. NodeBuilder &AddOutputDesc(GeTensorDescPtr tensor_desc) {
  51. op_desc_->AddOutputDesc(tensor_desc->Clone());
  52. return *this;
  53. }
  54. NodePtr Build(const ComputeGraphPtr &graph) {
  55. NodePtr node = graph->AddNode(op_desc_);
  56. return node;
  57. }
  58. private:
  59. OpDescPtr op_desc_;
  60. };
  61. } // namespace
  62. class UtestDynamicShapePartition : public testing::Test {
  63. protected:
  64. void SetUp() {}
  65. void TearDown() {}
  66. };
  67. TEST_F(UtestDynamicShapePartition, single_op_scene_success) {
  68. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("default");
  69. NodePtr node1 =
  70. NodeBuilder("node1", CONSTANTOP).AddInputDesc({1, 1, 224, 224}).AddOutputDesc({1, 1, 224, 224}).Build(graph);
  71. NodePtr add_n_node =
  72. NodeBuilder("add_n_node", ADDN).AddInputDesc({1, 1, 224, 224}).AddOutputDesc({1, 1, 224, 224}).Build(graph);
  73. NodePtr node2 =
  74. NodeBuilder("node2", RELU).AddInputDesc({1, 1, 224, 224}).AddOutputDesc({1, 1, 224, 224}).Build(graph);
  75. GraphUtils::AddEdge(node1->GetOutDataAnchor(0), add_n_node->GetInDataAnchor(0));
  76. GraphUtils::AddEdge(add_n_node->GetOutDataAnchor(0), node2->GetInDataAnchor(0));
  77. (void)AttrUtils::SetBool(add_n_node->GetOpDesc(), ATTR_SINGLE_OP_SCENE, true);
  78. DynamicShapePartitioner partitioner(graph);
  79. EXPECT_EQ(partitioner.Partition(), SUCCESS);
  80. }
  81. /*******************************************************************************
  82. * |
  83. * Merge1
  84. * Active / \ Active
  85. * / \.
  86. * / \.
  87. * Merge2 \.
  88. * Active/ \Active \.
  89. * / \ \.
  90. * Add Sub Relu
  91. * | | |
  92. * | | |
  93. * Switch_f2 Switch_t2 |
  94. * \ / |
  95. * \ / |
  96. * Less2 |
  97. * | |
  98. * | |
  99. * Switch_f Switch_t
  100. * | \ / |
  101. * | Active |
  102. * | | |
  103. * | Less1 |
  104. * | / \ |
  105. * | / \ |
  106. * Data Data
  107. ******************************************************************************/
  108. TEST_F(UtestDynamicShapePartition, merge_control_flow_group) {
  109. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("default");
  110. AttrUtils::SetStr(*graph, ATTR_NAME_SESSION_GRAPH_ID, "session_graph_id");
  111. auto data1 = NodeBuilder("data1", DATA).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
  112. auto data2 = NodeBuilder("data2", DATA).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
  113. auto less1 = NodeBuilder("less1", LESS).AddInputDesc({1}).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
  114. auto active1 = NodeBuilder("active1", STREAMACTIVE).Build(graph);
  115. auto switch_t = NodeBuilder("switch_t", STREAMSWITCH).AddInputDesc({1}).AddInputDesc({1}).Build(graph);
  116. auto switch_f = NodeBuilder("switch_f", STREAMSWITCH).AddInputDesc({1}).AddInputDesc({1}).Build(graph);
  117. auto const_01 = NodeBuilder("const_01", CONSTANT).AddOutputDesc({1}).Build(graph);
  118. auto const_11 = NodeBuilder("const_11", CONSTANT).AddOutputDesc({1}).Build(graph);
  119. auto less2 = NodeBuilder("less2", LESS).AddInputDesc({1}).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
  120. auto active2 = NodeBuilder("active2", STREAMACTIVE).Build(graph);
  121. auto switch_t2 = NodeBuilder("switch_t2", STREAMSWITCH).AddInputDesc({1}).AddInputDesc({1}).Build(graph);
  122. auto switch_f2 = NodeBuilder("switch_f2", STREAMSWITCH).AddInputDesc({1}).AddInputDesc({1}).Build(graph);
  123. auto const_02 = NodeBuilder("const_02", CONSTANT).AddOutputDesc({1}).Build(graph);
  124. auto const_12 = NodeBuilder("const_12", CONSTANT).AddOutputDesc({1}).Build(graph);
  125. auto add2 = NodeBuilder("add2", ADD).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
  126. auto sub2 = NodeBuilder("sub2", SUB).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
  127. auto merge2 = NodeBuilder("merge2", STREAMMERGE).AddInputDesc({1}).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
  128. auto active_f2 = NodeBuilder("active_f2", STREAMACTIVE).Build(graph);
  129. auto active_t2 = NodeBuilder("active_t2", STREAMACTIVE).Build(graph);
  130. auto relu1 = NodeBuilder("relu1", RELU).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
  131. auto merge1 = NodeBuilder("merge1", STREAMMERGE).AddInputDesc({1}).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
  132. auto active_f1 = NodeBuilder("active_f1", STREAMACTIVE).Build(graph);
  133. auto active_t1 = NodeBuilder("active_t1", STREAMACTIVE).Build(graph);
  134. auto output1 = NodeBuilder("noutput1", NETOUTPUT).AddInputDesc({1}).Build(graph);
  135. GraphUtils::AddEdge(data1->GetOutDataAnchor(0), less1->GetInDataAnchor(0));
  136. GraphUtils::AddEdge(data2->GetOutDataAnchor(0), less1->GetInDataAnchor(1));
  137. GraphUtils::AddEdge(less1->GetOutDataAnchor(0), switch_t->GetInDataAnchor(0));
  138. GraphUtils::AddEdge(less1->GetOutDataAnchor(0), switch_f->GetInDataAnchor(0));
  139. GraphUtils::AddEdge(const_01->GetOutDataAnchor(0), switch_t->GetInDataAnchor(1));
  140. GraphUtils::AddEdge(const_11->GetOutDataAnchor(0), switch_f->GetInDataAnchor(1));
  141. GraphUtils::AddEdge(less1->GetOutControlAnchor(), active1->GetInControlAnchor());
  142. GraphUtils::AddEdge(active1->GetOutControlAnchor(), switch_t->GetInControlAnchor());
  143. GraphUtils::AddEdge(active1->GetOutControlAnchor(), switch_f->GetInControlAnchor());
  144. GraphUtils::AddEdge(data1->GetOutDataAnchor(0), less2->GetInDataAnchor(0));
  145. GraphUtils::AddEdge(less1->GetOutDataAnchor(0), less2->GetInDataAnchor(1));
  146. GraphUtils::AddEdge(less2->GetOutDataAnchor(0), switch_t2->GetInDataAnchor(0));
  147. GraphUtils::AddEdge(less2->GetOutDataAnchor(0), switch_f2->GetInDataAnchor(0));
  148. GraphUtils::AddEdge(const_02->GetOutDataAnchor(0), switch_t2->GetInDataAnchor(1));
  149. GraphUtils::AddEdge(const_12->GetOutDataAnchor(0), switch_f2->GetInDataAnchor(1));
  150. GraphUtils::AddEdge(less2->GetOutControlAnchor(), active2->GetInControlAnchor());
  151. GraphUtils::AddEdge(active2->GetOutControlAnchor(), switch_t2->GetInControlAnchor());
  152. GraphUtils::AddEdge(active2->GetOutControlAnchor(), switch_f2->GetInControlAnchor());
  153. GraphUtils::AddEdge(switch_f2->GetOutControlAnchor(), add2->GetInControlAnchor());
  154. GraphUtils::AddEdge(less2->GetOutDataAnchor(0), add2->GetInDataAnchor(0));
  155. GraphUtils::AddEdge(add2->GetOutDataAnchor(0), merge2->GetInDataAnchor(0));
  156. GraphUtils::AddEdge(add2->GetOutControlAnchor(), active_f2->GetInControlAnchor());
  157. GraphUtils::AddEdge(active_f2->GetOutControlAnchor(), merge2->GetInControlAnchor());
  158. GraphUtils::AddEdge(switch_t2->GetOutControlAnchor(), sub2->GetInControlAnchor());
  159. GraphUtils::AddEdge(less2->GetOutDataAnchor(0), sub2->GetInDataAnchor(0));
  160. GraphUtils::AddEdge(sub2->GetOutDataAnchor(0), merge2->GetInDataAnchor(1));
  161. GraphUtils::AddEdge(sub2->GetOutControlAnchor(), active_t2->GetInControlAnchor());
  162. GraphUtils::AddEdge(active_t2->GetOutControlAnchor(), merge2->GetInControlAnchor());
  163. GraphUtils::AddEdge(switch_t->GetOutControlAnchor(), less2->GetInControlAnchor());
  164. GraphUtils::AddEdge(switch_f->GetOutControlAnchor(), relu1->GetInControlAnchor());
  165. GraphUtils::AddEdge(merge2->GetOutDataAnchor(0), merge1->GetInDataAnchor(0));
  166. GraphUtils::AddEdge(merge2->GetOutControlAnchor(), active_f1->GetInControlAnchor());
  167. GraphUtils::AddEdge(active_f1->GetOutControlAnchor(), merge1->GetInControlAnchor());
  168. GraphUtils::AddEdge(data2->GetOutDataAnchor(0), relu1->GetInDataAnchor(1));
  169. GraphUtils::AddEdge(relu1->GetOutDataAnchor(0), merge1->GetInDataAnchor(0));
  170. GraphUtils::AddEdge(relu1->GetOutControlAnchor(), active_t1->GetInControlAnchor());
  171. GraphUtils::AddEdge(active_t1->GetOutControlAnchor(), merge1->GetInControlAnchor());
  172. GraphUtils::AddEdge(merge1->GetOutDataAnchor(0), output1->GetInDataAnchor(0));
  173. AttrUtils::SetBool(merge2->GetOpDesc(), ATTR_NAME_FORCE_UNKNOWN_SHAPE, true);
  174. EXPECT_EQ(graph->TopologicalSorting(), GRAPH_SUCCESS);
  175. SetControlFlowGroup(merge2, merge2->GetOpDesc()->GetId());
  176. SetControlFlowGroup(switch_f2, merge2->GetOpDesc()->GetId());
  177. SetControlFlowGroup(switch_t2, merge2->GetOpDesc()->GetId());
  178. SetControlFlowGroup(active2, merge2->GetOpDesc()->GetId());
  179. SetControlFlowGroup(active_t2, merge2->GetOpDesc()->GetId());
  180. SetControlFlowGroup(active_f2, merge2->GetOpDesc()->GetId());
  181. SetControlFlowGroup(merge1, merge1->GetOpDesc()->GetId());
  182. SetControlFlowGroup(switch_f, merge1->GetOpDesc()->GetId());
  183. SetControlFlowGroup(switch_t, merge1->GetOpDesc()->GetId());
  184. SetControlFlowGroup(active1, merge1->GetOpDesc()->GetId());
  185. SetControlFlowGroup(active_f1, merge1->GetOpDesc()->GetId());
  186. SetControlFlowGroup(active_t1, merge1->GetOpDesc()->GetId());
  187. EXPECT_EQ(graph->impl_->sub_graph_.size(), 0);
  188. DynamicShapePartitioner partitioner(graph);
  189. EXPECT_EQ(partitioner.Partition(), SUCCESS);
  190. EXPECT_EQ(graph->impl_->sub_graph_.size(), 3); // input less1 uknown
  191. }
  192. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示