You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

dimension_compute_pass_unittest.cc 4.9 kB

5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/passes/dimension_compute_pass.h"
  17. #include <string>
  18. #include <vector>
  19. #include <gtest/gtest.h>
  20. #include "common/types.h"
  21. #include "graph/passes/base_pass.h"
  22. #include "graph_builder_utils.h"
  23. #include "inc/kernel.h"
  24. #include "inc/kernel_factory.h"
  25. namespace ge {
  26. namespace {
  27. const char *AddNYes = "AddNYes";
  28. const char *AddNNo = "AddNNo";
  29. const char *HuberLossYes = "HuberLossYes";
  30. const char *ShapeNo = "ShapeNo";
  31. const char *ShapeYes = "ShapeYes";
  32. const char *DataNo = "dataNo";
  33. } // namespace
  34. class UtestShapeYesKernel : public Kernel {
  35. public:
  36. Status Compute(const NodePtr &node, std::vector<GeTensorPtr> &v_output) override {
  37. auto output = std::make_shared<GeTensor>();
  38. std::vector<uint8_t> data{1, 2, 3};
  39. std::vector<int64_t> shape{3};
  40. output->MutableTensorDesc().SetShape(GeShape(shape));
  41. output->SetData(data);
  42. output->MutableTensorDesc().SetDataType(DT_UINT8);
  43. v_output.push_back(output);
  44. return SUCCESS;
  45. }
  46. };
  47. REGISTER_KERNEL(ShapeYes, UtestShapeYesKernel);
  48. class UtestGraphPassesDimensionAdjustPass : public testing::Test {
  49. protected:
  50. UtestGraphPassesDimensionAdjustPass() = default;
  51. };
  52. namespace {
  53. /// netoutput1
  54. /// |
  55. /// shapeNo1
  56. /// |
  57. /// addnNo1
  58. /// / \.
  59. /// / \.
  60. /// const1 const2
  61. ComputeGraphPtr BuildGraph8() {
  62. auto builder = ut::GraphBuilder("test");
  63. auto const1 = builder.AddNode("const1", CONSTANT, 0, 1);
  64. auto const2 = builder.AddNode("const2", CONSTANT, 0, 1);
  65. auto addn1 = builder.AddNode("addn1", AddNNo, 2, 1);
  66. auto shape1 = builder.AddNode("shape1", ShapeNo, 1, 1);
  67. auto netoutput1 = builder.AddNode("netoutput", NETOUTPUT, 1, 0);
  68. builder.AddDataEdge(const1, 0, addn1, 0);
  69. builder.AddDataEdge(const2, 0, addn1, 1);
  70. builder.AddDataEdge(addn1, 0, shape1, 0);
  71. builder.AddDataEdge(shape1, 0, netoutput1, 0);
  72. return builder.GetGraph();
  73. }
  74. /// netoutput1
  75. /// |
  76. /// shapeNo1
  77. /// |
  78. /// addnYes1
  79. /// / \.
  80. /// / \.
  81. ///const1 data1
  82. ComputeGraphPtr BuildGraph9() {
  83. auto builder = ut::GraphBuilder("test");
  84. auto const1 = builder.AddNode("const1", CONSTANT, 0, 1);
  85. auto data1 = builder.AddNode("data1", DataNo, 0, 1);
  86. auto addn1 = builder.AddNode("addn1", AddNYes, 2, 1);
  87. auto shape1 = builder.AddNode("shape1", ShapeNo, 1, 1);
  88. auto netoutput1 = builder.AddNode("netoutput", NETOUTPUT, 1, 0);
  89. builder.AddDataEdge(const1, 0, addn1, 0);
  90. builder.AddDataEdge(data1, 0, addn1, 1);
  91. builder.AddDataEdge(addn1, 0, shape1, 0);
  92. builder.AddDataEdge(shape1, 0, netoutput1, 0);
  93. return builder.GetGraph();
  94. }
  95. /// netoutput1
  96. /// |
  97. /// shapeYes1
  98. /// |
  99. /// addnNo1
  100. ComputeGraphPtr BuildGraph1() {
  101. auto builder = ut::GraphBuilder("test");
  102. auto addnNo1 = builder.AddNode("addnNo1", AddNNo, 2, 1);
  103. auto shapeYes1 = builder.AddNode("shapeYes1", ShapeYes, 1, 1);
  104. auto netoutput1 = builder.AddNode("netoutput1", NETOUTPUT, 1, 0);
  105. builder.AddDataEdge(addnNo1, 0, shapeYes1, 0);
  106. builder.AddDataEdge(shapeYes1, 0, netoutput1, 0);
  107. return builder.GetGraph();
  108. }
  109. } // namespace
  110. TEST_F(UtestGraphPassesDimensionAdjustPass, not_changed_no_kernel) {
  111. auto graph = BuildGraph8();
  112. NamesToPass names_to_pass;
  113. names_to_pass.push_back({"Test", new DimensionComputePass});
  114. GEPass pass(graph);
  115. EXPECT_EQ(pass.Run(names_to_pass), SUCCESS);
  116. EXPECT_EQ(graph->GetAllNodes().size(), 5);
  117. for (auto &name_to_pass : names_to_pass) {
  118. delete name_to_pass.second;
  119. }
  120. }
  121. TEST_F(UtestGraphPassesDimensionAdjustPass, not_changed_no_compute_kernel) {
  122. auto graph = BuildGraph9();
  123. NamesToPass names_to_pass;
  124. names_to_pass.push_back({"Test", new DimensionComputePass});
  125. GEPass pass(graph);
  126. EXPECT_EQ(pass.Run(names_to_pass), SUCCESS);
  127. EXPECT_EQ(graph->GetAllNodes().size(), 5);
  128. for (auto &name_to_pass : names_to_pass) {
  129. delete name_to_pass.second;
  130. }
  131. }
  132. TEST_F(UtestGraphPassesDimensionAdjustPass, success) {
  133. auto graph = BuildGraph1();
  134. NamesToPass names_to_pass;
  135. names_to_pass.push_back({"Test", new DimensionComputePass});
  136. GEPass pass(graph);
  137. EXPECT_EQ(pass.Run(names_to_pass), SUCCESS);
  138. EXPECT_EQ(graph->GetAllNodes().size(), 2);
  139. for (auto &name_to_pass : names_to_pass) {
  140. delete name_to_pass.second;
  141. }
  142. }
  143. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示