You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

graph_mem_assigner.h 6.8 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_GRAPH_BUILD_MEMORY_GRAPH_MEM_ASSIGNER_H_
  17. #define GE_GRAPH_BUILD_MEMORY_GRAPH_MEM_ASSIGNER_H_
  18. #include <iostream>
  19. #include <map>
  20. #include <memory>
  21. #include <string>
  22. #include <utility>
  23. #include <vector>
  24. #include "framework/common/ge_inner_error_codes.h"
  25. #include "graph/node.h"
  26. #include "runtime/mem.h"
  27. #include "graph/build/memory/hybrid_mem_assigner.h"
  28. namespace ge {
  29. struct MemoryOffset {
  30. MemoryOffset(rtMemType_t mem_type, size_t mem_offset) : mem_type_(mem_type), mem_offset_(mem_offset) {}
  31. public:
  32. rtMemType_t mem_type_;
  33. size_t mem_offset_;
  34. };
  35. using MemoryOffsetList = vector<MemoryOffset>;
  36. class VariableMemoryAssigner {
  37. public:
  38. explicit VariableMemoryAssigner(ge::ComputeGraphPtr compute_graph) : compute_graph_(std::move(compute_graph)) {}
  39. VariableMemoryAssigner(const VariableMemoryAssigner &) = delete;
  40. VariableMemoryAssigner &operator=(const VariableMemoryAssigner &) = delete;
  41. virtual ~VariableMemoryAssigner() = default;
  42. ///
  43. /// @ingroup ge_graph
  44. /// @brief assign memory offset
  45. /// @return Status result of function
  46. ///
  47. ge::Status Assign();
  48. ///
  49. /// @ingroup ge_graph
  50. /// @brief assign variable attr to nodes
  51. /// @return Status result of function
  52. ///
  53. ge::Status AssignVarAttr2Nodes();
  54. private:
  55. ge::ComputeGraphPtr compute_graph_;
  56. };
  57. using VariableMemoryAssignerPtr = std::shared_ptr<VariableMemoryAssigner>;
  58. using BlockMemAssignerPtr = std::shared_ptr<BlockMemAssigner>;
  59. using HybridMemAssignerPtr = std::shared_ptr<HybridMemAssigner>;
  60. class GraphMemoryAssigner {
  61. public:
  62. explicit GraphMemoryAssigner(ge::ComputeGraphPtr compute_graph)
  63. : compute_graph_(std::move(compute_graph)), mem_assigner_(nullptr) {}
  64. GraphMemoryAssigner(const GraphMemoryAssigner &) = delete;
  65. GraphMemoryAssigner &operator=(const GraphMemoryAssigner &) = delete;
  66. virtual ~GraphMemoryAssigner() = default;
  67. ///
  68. /// @ingroup ge_graph
  69. /// @brief assign memory offset
  70. /// @return Status result of function
  71. ///
  72. ge::Status AssignMemory();
  73. ///
  74. /// @ingroup ge_graph
  75. /// @brief assign variable attr to nodes,
  76. /// must be called after all memory assigned.
  77. /// @return Status result of function
  78. ///
  79. ge::Status AssignVarAttr2Nodes();
  80. ge::Status ReAssignMemory(bool is_loop_graph, size_t &mem_offset);
  81. ge::Status AssignZeroCopyMemory(size_t &mem_offset, size_t &zero_mem_copy_size);
  82. ge::Status SetInputOffset();
  83. ge::Status UpdateOpInputOffset(const NodePtr &node) const;
  84. ge::Status CheckOffset();
  85. ge::Status AssignReferenceMemory();
  86. private:
  87. ///
  88. /// @ingroup ge_graph
  89. /// @brief assign memory offset
  90. /// @return Status result of function
  91. ///
  92. ge::Status ReAssignContinuousMemory(bool is_loop_graph);
  93. ge::Status ReAssignReuseAndNoPaddingContinuousInputMemory();
  94. ge::Status ReAssignReuseAndNoPaddingContinuousOutputMemory();
  95. ge::Status ReAssignVirtualInputNodeMemory(NodePtr node, size_t &mem_offset_reuse);
  96. ge::Status ReAssignVirtualOutputNodeMemory(NodePtr node, size_t &mem_offset_reuse);
  97. ge::Status ReAssignVirtualNodesMemory(map<string, vector<NodePtr>> &mem_reuse_nodes_map, int32_t mem_reuse_model);
  98. ge::Status GetMaxBatchLabel(const map<string, vector<NodePtr>> &mem_reuse_virtual_nodes_map, int32_t mem_reuse_model,
  99. string &max_batch_label);
  100. ge::Status CalculateTensorRealSizeAndOutSize(const ge::ConstGeTensorDescPtr &output_desc, int64_t dim_index,
  101. int64_t &output_mem_size, int64_t &batch_dim_num, int64_t &out_size);
  102. ge::Status ReAssignAtomicMemory(bool is_loop_graph);
  103. ge::Status FilterAtomicNodesForMemoryAssign(std::map<NodePtr, vector<NodePtr>> &normal_atomic_nodes_map,
  104. std::vector<NodePtr> &connecting_output_atomic_nodes);
  105. ge::Status AssignContinuousInputMemory(const ge::NodePtr &node, int64_t &continuous_mem_start,
  106. int64_t &continuous_mem_size);
  107. ge::Status AssignContinuousOutputMemory(const ge::NodePtr &node);
  108. ///
  109. /// @brief check the input of node whether support atomic attr
  110. /// @param node
  111. /// @return true:supported; false:not supported
  112. ///
  113. bool CheckInputIsSupportAtomic(const ge::NodePtr &node);
  114. ge::Status GetMemoryAssignmentStatus(const ge::NodePtr &node, int64_t output_index, bool &is_mem_assigned);
  115. ge::Status AssignAtomicOutputMemory(const ge::NodePtr &node, std::vector<int64_t> &mem_offset_end);
  116. ge::Status AssignOrdinaryAtomicWorkspaceMemory(const ge::OpDescPtr &op_desc,
  117. std::map<std::string, std::map<int64_t, int64_t>> &workspace_info,
  118. std::vector<int64_t> &mem_offset_end);
  119. ge::Status AssignFusionAtomicWorkspaceMemory(const ge::OpDescPtr &op_desc,
  120. std::map<std::string, std::map<int64_t, int64_t>> &workspace_info,
  121. std::vector<int64_t> &mem_offset_end);
  122. ge::Status AssignAtomicOutputAndWorkspaceMemory(const ge::NodePtr &node, std::vector<int64_t> &mem_offset_end);
  123. ge::Status AssignConnectNetOutputAtomicMemory(vector<NodePtr> &connect_netoutput_nodes);
  124. ge::Status SetIndependentAtomicAttr(const ge::NodePtr &node, int64_t atomic_mem_start,
  125. const std::vector<int64_t> &mem_offset_end);
  126. ge::Status SetAtomicCleanAttr(const ge::NodePtr &node, const std::vector<int64_t> &atomic_mem_start,
  127. const std::vector<int64_t> &atomic_mem_size);
  128. ge::Status IsIndependentAtomicClean(const ge::NodePtr &node, bool &is_independent_atomic_clean_node);
  129. void AlignMemOffset(const int64_t &mem_align_size);
  130. ge::Status UpdateOpInputOffset(const NodePtr &node, vector<int64_t> &input_list) const;
  131. ge::Status UpdateConstArgsOffset(const NodePtr &node, vector<int64_t> &input_list) const;
  132. NodePtr GetKnownInputNode(const NodePtr &node) const;
  133. MemoryOffsetList memory_offset_;
  134. ge::ComputeGraphPtr compute_graph_;
  135. HybridMemAssignerPtr mem_assigner_;
  136. };
  137. } // namespace ge
  138. #endif // GE_GRAPH_BUILD_MEMORY_GRAPH_MEM_ASSIGNER_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示