You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

node_executor.h 6.9 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_HYBRID_NODE_EXECUTOR_NODE_EXECUTOR_H_
  17. #define GE_HYBRID_NODE_EXECUTOR_NODE_EXECUTOR_H_
  18. #include "external/ge/ge_api_error_codes.h"
  19. #include "common/opskernel/ops_kernel_builder.h"
  20. #include "graph/node.h"
  21. #include "task_context.h"
  22. namespace ge {
  23. const uint32_t MEMORY_ALIGN_RATIO = 2;
  24. const uint32_t MEMORY_ALIGN_SIZE = 32;
  25. namespace hybrid {
  26. class HybridModel;
  27. // Base class of Node Task
  28. class NodeTask {
  29. public:
  30. NodeTask() = default;
  31. virtual ~NodeTask() = default;
  32. /**
  33. * Update tiling data
  34. * @param context instance of TaskContext
  35. * @return SUCCESS on success, error code otherwise
  36. */
  37. virtual Status UpdateTilingData(TaskContext &context) {
  38. return SUCCESS;
  39. }
  40. /**
  41. * Init
  42. * @param context instance of TaskContext
  43. * @return SUCCESS on success, error code otherwise
  44. */
  45. virtual Status Init(TaskContext &context) {
  46. return SUCCESS;
  47. }
  48. /**
  49. * Whether this task supports dynamic shape
  50. * @return true if this task supports dynamic shape, false otherwise
  51. */
  52. virtual bool IsSupportDynamicShape() {
  53. return true;
  54. }
  55. /**
  56. * Update args for execution
  57. * @param context instance of TaskContext
  58. * @return SUCCESS on success, error code otherwise
  59. */
  60. virtual Status UpdateArgs(TaskContext &context) = 0;
  61. /**
  62. * Execute task async
  63. * @param context instance of TaskContext
  64. * @param done_callback callback function, will be invoked after task is done
  65. * @return SUCCESS on success, error code otherwise
  66. */
  67. virtual Status ExecuteAsync(TaskContext &context, std::function<void()> done_callback) = 0;
  68. };
  69. // Node executor
  70. class NodeExecutor {
  71. public:
  72. NodeExecutor() = default;
  73. virtual ~NodeExecutor() = default;
  74. /**
  75. * Initialize node executor
  76. * @return SUCCESS on success, error code otherwise
  77. */
  78. virtual Status Initialize() {
  79. return SUCCESS;
  80. }
  81. /**
  82. * Finalize node executor
  83. * @return SUCCESS on success, error code otherwise
  84. */
  85. virtual Status Finalize() {
  86. return SUCCESS;
  87. }
  88. /**
  89. * Load task in load stage
  90. * @param model instance of HybridModel
  91. * @param node node
  92. * @param task generated node task
  93. * @return SUCCESS on success, error code otherwise
  94. */
  95. virtual Status LoadTask(const HybridModel &model,
  96. const NodePtr &node,
  97. std::shared_ptr<NodeTask> &task) const;
  98. /**
  99. * Compile task in run stage
  100. * @param model instance of HybridModel
  101. * @param node node
  102. * @param task generated node task
  103. * @return SUCCESS on success, error code otherwise
  104. */
  105. virtual Status CompileTask(const HybridModel &model,
  106. const NodePtr &node,
  107. std::shared_ptr<NodeTask> &task) const;
  108. /**
  109. * Preparation actions before execution
  110. * @param task instance of NodeTask
  111. * @param context instance of TaskContext
  112. * @return SUCCESS on success, error code otherwise
  113. */
  114. virtual Status PrepareTask(NodeTask &task, TaskContext &context) const;
  115. /**
  116. * Execute task
  117. * @param task instance of NodeTask
  118. * @param context instance of TaskContext
  119. * @param callback callback function which will be invoked after computation is done
  120. * @return SUCCESS on success, error code otherwise
  121. */
  122. virtual Status ExecuteTask(NodeTask &task, TaskContext &context, const std::function<void()> &callback) const;
  123. };
  124. class NodeExecutorManager {
  125. public:
  126. enum class ExecutorType {
  127. AICORE,
  128. AICPU_TF,
  129. AICPU_CUSTOM,
  130. COMPILED_SUBGRAPH,
  131. DYNAMIC_SUBGRAPH,
  132. GE_LOCAL,
  133. CONTROL_OP,
  134. HCCL,
  135. RTS,
  136. HOST_CPU,
  137. RESERVED
  138. };
  139. static NodeExecutorManager &GetInstance() {
  140. static NodeExecutorManager instance;
  141. return instance;
  142. }
  143. /**
  144. * Register build of executor
  145. * @param executor_type type of executor
  146. * @param builder build function
  147. */
  148. void RegisterExecutorBuilder(ExecutorType executor_type, const std::function<NodeExecutor *()> &builder);
  149. /**
  150. * Initialize executor if needed
  151. * @return SUCCESS on success, error code otherwise
  152. */
  153. Status EnsureInitialized();
  154. Status InitializeExecutors();
  155. void FinalizeExecutors();
  156. /**
  157. * CalcOpRunningParam
  158. * @param node node
  159. * @return SUCCESS on success, error code otherwise
  160. */
  161. Status CalcOpRunningParam(Node &node) const;
  162. /**
  163. * Get executor by node
  164. * @param node node
  165. * @param executor executor
  166. * @return SUCCESS on success, error code otherwise
  167. */
  168. Status GetExecutor(Node &node, const NodeExecutor **executor) const;
  169. /**
  170. * Resolve executor type by node
  171. * @param node node
  172. * @return executor type
  173. */
  174. ExecutorType ResolveExecutorType(Node &node) const;
  175. private:
  176. std::map<ExecutorType, std::unique_ptr<NodeExecutor>> executors_;
  177. std::map<ExecutorType, std::function<NodeExecutor *()>> builders_;
  178. std::map<std::string, NodeExecutorManager::ExecutorType> engine_mapping_;
  179. std::mutex mu_;
  180. bool initialized_ = false;
  181. bool executor_initialized_ = false;
  182. int ref_count_ = 0;
  183. };
  184. class NodeExecutorRegistrar {
  185. public:
  186. NodeExecutorRegistrar(NodeExecutorManager::ExecutorType executor_type,
  187. NodeExecutor *(*builder)());
  188. ~NodeExecutorRegistrar() = default;
  189. };
  190. } // namespace hybrid
  191. } // namespace ge
  192. #define REGISTER_NODE_EXECUTOR_BUILDER(engine_type, executor) \
  193. REGISTER_NODE_EXECUTOR_BUILDER_UNIQ_HELPER(__COUNTER__, engine_type, executor)
  194. #define REGISTER_NODE_EXECUTOR_BUILDER_UNIQ_HELPER(ctr, engine_type, executor) \
  195. REGISTER_NODE_EXECUTOR_BUILDER_UNIQ(ctr, engine_type, executor)
  196. #define REGISTER_NODE_EXECUTOR_BUILDER_UNIQ(ctr, engine_type, executor) \
  197. static ::ge::hybrid::NodeExecutorRegistrar register_##ctr \
  198. __attribute__((unused)) = \
  199. ::ge::hybrid::NodeExecutorRegistrar(engine_type, []()->::ge::hybrid::NodeExecutor* { \
  200. return new (std::nothrow) executor(); \
  201. })
  202. #endif // GE_HYBRID_NODE_EXECUTOR_NODE_EXECUTOR_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示