You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

dynamic_shape_partition_unittest.cc 4.8 kB

4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <gtest/gtest.h>
  17. #define private public
  18. #define protected public
  19. #include "graph/partition/dynamic_shape_partition.h"
  20. #include "compute_graph.h"
  21. #include "graph/compute_graph_impl.h"
  22. #include "inc/framework/common/types.h"
  23. #include "utils/graph_utils.h"
  24. #include "graph/debug/ge_attr_define.h"
  25. namespace ge {
  26. namespace {
  27. GeTensorDescPtr CreateTensorDesc(std::initializer_list<int64_t> shape, Format format = FORMAT_NCHW,
  28. DataType data_type = DT_FLOAT) {
  29. GeShape ge_shape{vector<int64_t>(shape)};
  30. GeTensorDescPtr tensor_desc = std::make_shared<GeTensorDesc>();
  31. tensor_desc->SetShape(ge_shape);
  32. tensor_desc->SetFormat(format);
  33. tensor_desc->SetDataType(data_type);
  34. return tensor_desc;
  35. }
  36. class NodeBuilder {
  37. public:
  38. NodeBuilder(const std::string &name, const std::string &type) { op_desc_ = std::make_shared<OpDesc>(name, type); }
  39. NodeBuilder &AddInputDesc(std::initializer_list<int64_t> shape = {1, 1, 224, 224}, Format format = FORMAT_NCHW,
  40. DataType data_type = DT_FLOAT) {
  41. op_desc_->AddInputDesc(CreateTensorDesc(shape, format, data_type)->Clone());
  42. return *this;
  43. }
  44. NodeBuilder &AddOutputDesc(std::initializer_list<int64_t> shape = {1, 1, 224, 224}, Format format = FORMAT_NCHW,
  45. DataType data_type = DT_FLOAT) {
  46. op_desc_->AddOutputDesc(CreateTensorDesc(shape, format, data_type)->Clone());
  47. return *this;
  48. }
  49. NodeBuilder &AddOutputDesc(GeTensorDescPtr tensor_desc) {
  50. op_desc_->AddOutputDesc(tensor_desc->Clone());
  51. return *this;
  52. }
  53. NodePtr Build(const ComputeGraphPtr &graph) {
  54. NodePtr node = graph->AddNode(op_desc_);
  55. return node;
  56. }
  57. private:
  58. OpDescPtr op_desc_;
  59. };
  60. } // namespace
  61. class UtestDynamicShapePartition : public testing::Test {
  62. protected:
  63. void SetUp() {}
  64. void TearDown() {}
  65. };
  66. TEST_F(UtestDynamicShapePartition, single_op_scene_success) {
  67. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("default");
  68. NodePtr node1 =
  69. NodeBuilder("node1", CONSTANTOP).AddInputDesc({1, 1, 224, 224}).AddOutputDesc({1, 1, 224, 224}).Build(graph);
  70. NodePtr add_n_node =
  71. NodeBuilder("add_n_node", ADDN).AddInputDesc({1, 1, 224, 224}).AddOutputDesc({1, 1, 224, 224}).Build(graph);
  72. NodePtr node2 =
  73. NodeBuilder("node2", RELU).AddInputDesc({1, 1, 224, 224}).AddOutputDesc({1, 1, 224, 224}).Build(graph);
  74. GraphUtils::AddEdge(node1->GetOutDataAnchor(0), add_n_node->GetInDataAnchor(0));
  75. GraphUtils::AddEdge(add_n_node->GetOutDataAnchor(0), node2->GetInDataAnchor(0));
  76. (void)AttrUtils::SetBool(add_n_node->GetOpDesc(), ATTR_SINGLE_OP_SCENE, true);
  77. DynamicShapePartitioner partitioner(graph);
  78. EXPECT_EQ(partitioner.Partition(), SUCCESS);
  79. }
  80. TEST_F(UtestDynamicShapePartition, merge_control_flow_group) {
  81. ComputeGraphPtr graph = std::make_shared<ComputeGraph>("default");
  82. AttrUtils::SetStr(*graph, ATTR_NAME_SESSION_GRAPH_ID, "session_graph_id");
  83. NodePtr data1 = NodeBuilder("data1", DATA).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
  84. NodePtr data2 = NodeBuilder("data2", DATA).AddInputDesc({1}).AddOutputDesc({1}).Build(graph);
  85. NodePtr merge = NodeBuilder("node2", MERGE).AddInputDesc({1}).AddInputDesc({1})
  86. .AddOutputDesc({1}).AddOutputDesc({}).Build(graph);
  87. GraphUtils::AddEdge(data1->GetOutDataAnchor(0), merge->GetInDataAnchor(0));
  88. GraphUtils::AddEdge(data2->GetOutDataAnchor(0), merge->GetInDataAnchor(1));
  89. (void)AttrUtils::SetBool(data1->GetOpDesc(), ATTR_NAME_FORCE_UNKNOWN_SHAPE, true);
  90. (void)AttrUtils::SetInt(data1->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, 3);
  91. (void)AttrUtils::SetBool(data2->GetOpDesc(), ATTR_NAME_FORCE_UNKNOWN_SHAPE, true);
  92. (void)AttrUtils::SetInt(data2->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, 3);
  93. (void)AttrUtils::SetBool(merge->GetOpDesc(), ATTR_NAME_FORCE_UNKNOWN_SHAPE, true);
  94. (void)AttrUtils::SetInt(merge->GetOpDesc(), ATTR_NAME_CONTROL_FLOW_GROUP, 3);
  95. EXPECT_EQ(graph->impl_->sub_graph_.size(), 0);
  96. DynamicShapePartitioner partitioner(graph);
  97. EXPECT_EQ(partitioner.Partition(), SUCCESS);
  98. EXPECT_EQ(graph->impl_->sub_graph_.size(), 1);
  99. }
  100. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示