You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

graph_manager.h 16 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_GRAPH_MANAGER_GRAPH_MANAGER_H_
  17. #define GE_GRAPH_MANAGER_GRAPH_MANAGER_H_
  18. #include <iostream>
  19. #include <map>
  20. #include <memory>
  21. #include <set>
  22. #include <string>
  23. #include <thread>
  24. #include <vector>
  25. #include "common/blocking_queue.h"
  26. #include "common/ge_inner_error_codes.h"
  27. #include "common/helper/model_cache_helper.h"
  28. #include "external/graph/types.h"
  29. #include "ge/ge_api_types.h"
  30. #include "graph/build/graph_builder.h"
  31. #include "graph/execute/graph_execute.h"
  32. #include "graph/ge_local_context.h"
  33. #include "graph/load/graph_loader.h"
  34. #include "graph/manager/graph_manager_utils.h"
  35. #include "graph/manager/util/variable_accelerate_ctrl.h"
  36. #include "graph/optimize/graph_optimize.h"
  37. #include "graph/partition/graph_partition.h"
  38. #include "graph/preprocess/graph_preprocess.h"
  39. #include "graph/tuning_utils.h"
  40. #include "model/ge_model.h"
  41. namespace ge {
  42. class GraphManager {
  43. public:
  44. GraphManager();
  45. ~GraphManager() = default;
  46. ///
  47. /// @ingroup ge_graph
  48. /// @brief graph manager init
  49. /// @param [in] options user config params
  50. /// @return Status result of function
  51. ///
  52. Status Initialize(const std::map<string, string> &options);
  53. ///
  54. /// @ingroup ge_graph
  55. /// @brief graph manager finalize
  56. /// @return Status result of function
  57. ///
  58. Status Finalize();
  59. ///
  60. /// @ingroup ge_graph
  61. /// @brief add specific graph
  62. /// @param [in] graph_id graph id
  63. /// @param [out] Graph output graph
  64. /// @return Status result of function
  65. ///
  66. Status AddGraph(const GraphId &graph_id, const Graph &graph, const std::map<std::string, std::string> &options,
  67. const OmgContext &omg_context);
  68. ///
  69. /// @ingroup ge_graph
  70. /// @brief remove specific graph
  71. /// @param [in] graph_id graph id
  72. /// @return Status result of function
  73. ///
  74. Status RemoveGraph(const GraphId &graph_id);
  75. ///
  76. /// @ingroup ge_graph
  77. /// @brief run specific graph
  78. /// @param [in] graph_id graph id
  79. /// @param [in] inputs input data
  80. /// @param [out] outputs output data
  81. /// @return Status result of function
  82. ///
  83. Status RunGraph(const GraphId &graph_id, const std::vector<GeTensor> &inputs, std::vector<GeTensor> &outputs,
  84. uint64_t session_id = INVALID_SESSION_ID);
  85. ///
  86. /// @ingroup ge_graph
  87. /// @brief build specific graph
  88. /// @param [in] graph_id graph id
  89. /// @param [in] inputs input data
  90. /// @param [out] models build result
  91. /// @return Status result of function
  92. ///
  93. ge::Status BuildGraph(const GraphId &graph_id, const std::vector<GeTensor> &inputs, GeRootModelPtr &models,
  94. uint64_t session_id = 0, bool async = false);
  95. Status BuildGraphForUnregisteredOp(const GraphId &graph_id, const std::vector<GeTensor> &inputs,
  96. GeRootModelPtr &ge_root_model, uint64_t session_id);
  97. ///
  98. /// @ingroup ge_graph
  99. /// @brief Save extra attribute to Model
  100. /// @param [in] model: Model attribues will save to.
  101. /// @param [in] type: type of OpDesc.
  102. /// @param [in] attrs: attributes of OpDesc
  103. /// @param [in] inputs: input tensor
  104. /// @param [in] outputs: output tensor
  105. /// @return: Status
  106. ///
  107. Status SaveParams(ge::GeModel &model, const std::string &type, const std::map<string, GeAttrValue> &attrs,
  108. const std::vector<GeTensor> &inputs, const std::vector<GeTensor> &outputs);
  109. ///
  110. /// @ingroup ge_graph
  111. /// @brief get variable value from the session with specific session id
  112. /// @param [in] sessionId session id
  113. /// @param [in] name op name
  114. /// @param [out] val out value tensor
  115. /// @return Status result of function
  116. ///
  117. Status GetVariable(const std::string &name, Tensor &val);
  118. ///
  119. /// @ingroup ge_graph
  120. /// @brief run graph async on session with specific session id
  121. /// @param [in] graph_id graph id
  122. /// @param [in] inputs input data
  123. /// @param [out] callback: callback while run graph async finish
  124. /// @return Status result of function
  125. ///
  126. Status RunGraphAsync(const GraphId &graph_id, const std::vector<ge::InputTensorInfo> &inputs,
  127. uint64_t session_id, RunAsyncCallback callback);
  128. ///
  129. /// @ingroup ge_graph
  130. /// @brief me register the callback function to get the result of summary or checkpoin
  131. /// @param [in] key: summary or checkpoint
  132. /// @param [in] callbak: The real callback object of me
  133. /// @return Status result of function
  134. ///
  135. Status RegisterCallBackFunc(
  136. const std::string &key,
  137. const std::function<Status(uint32_t, const std::map<std::string, ge::Tensor> &)> &callback);
  138. Status RegisterCallBackFunc(
  139. const std::string &key,
  140. const std::function<Status(uint32_t, const std::map<ge::AscendString, ge::Tensor> &)> &callback);
  141. const bool GetTrainFlag() const { return options_.train_graph_flag; }
  142. bool IsGraphNeedRebuild(uint32_t graph_id);
  143. Status GenerateInfershapeGraph(GraphId &graph_id);
  144. const std::map<std::string, std::string> *GetGraphOptions(uint32_t graph_id);
  145. void SetOptionsRunGraphFlag(bool run_graph_flag);
  146. Status GenCheckPointGraph(const std::map<std::string, GeTensorDesc> &all_variables, Graph &graph);
  147. Status SaveVariables(const Graph &graph, const std::vector<std::string> &var_names,
  148. const std::vector<Tensor> &outputs, std::vector<Tensor> &var_values);
  149. Status SaveCheckPointResult(const Graph &graph, const std::vector<Tensor> &outputs, map<string, Tensor> &var_results);
  150. private:
  151. struct CompilerStages {
  152. GraphPrepare preparer;
  153. GraphOptimize optimizer;
  154. GraphPartitioner partitioner;
  155. GraphBuilder builder;
  156. };
  157. struct PreRunArgs {
  158. GraphId graph_id;
  159. std::vector<ge::InputTensorInfo> input_tensor;
  160. uint64_t session_id;
  161. GEThreadLocalContext context;
  162. RunAsyncCallback callback;
  163. };
  164. struct RunArgs {
  165. GraphNodePtr graph_node;
  166. GraphId graph_id;
  167. uint64_t session_id;
  168. std::vector<ge::InputTensorInfo> input_tensor;
  169. GeRootModelPtr ge_root_model;
  170. GEThreadLocalContext context;
  171. RunAsyncCallback callback;
  172. };
  173. void AddGraphNode(GraphId graph_id, const GraphNodePtr &graph_node);
  174. void RemoveGraphNode(GraphId graph_id);
  175. bool HasGraphNode(GraphId graph_id);
  176. Status GetGraphNode(const GraphId &graph_id, GraphNodePtr &out);
  177. std::shared_ptr<GraphModelListener> GetModelListener() const { return graph_run_listener_; }
  178. static Status ProcessSubGraphWithMultiThreads(GraphManager *graph_manager, GraphId root_graph_id,
  179. const SubGraphInfoPtr &sub_graph_info_ptr, uint64_t session_id,
  180. const GEThreadLocalContext &ge_context);
  181. Status PreRun(const GraphNodePtr &graph_node, const std::vector<GeTensor> &inputs, GeRootModelPtr &ge_root_model,
  182. uint64_t session_id = INVALID_SESSION_ID);
  183. Status OptimizeSubgraph(const GraphNodePtr &graph_node, ComputeGraphPtr &compute_graph, uint64_t session_id);
  184. Status Build(const GraphNodePtr &graph_node, ComputeGraphPtr &compute_graph,
  185. GeRootModelPtr &ge_root_model, uint64_t session_id);
  186. Status StartForRunGraph(const GraphNodePtr &graph_node, const std::vector<GeTensor> &inputs,
  187. GeRootModelPtr &ge_root_model, uint64_t session_id = INVALID_SESSION_ID);
  188. Status InnerRunGraph(GraphNodePtr &graph_node, const GraphId &graph_id, const std::vector<GeTensor> &inputs,
  189. std::vector<GeTensor> &outputs);
  190. Status ParseOptions(const std::map<std::string, std::string> &options);
  191. static void ParseOption(const std::map<std::string, std::string> &options, const std::string &key,
  192. std::string &option);
  193. static Status ParseOption(const std::map<std::string, std::string> &options, const std::string &key, bool &option);
  194. static Status ParseOption(const std::map<std::string, std::string> &options, const std::string &key, int &option);
  195. static Status ParseOption(const std::map<std::string, std::string> &options, const std::string &key,
  196. std::map<std::string, int> &option);
  197. static void Trim(std::string &str);
  198. static Status CheckEngineName(const std::string &engine_name, const std::string &key,
  199. const std::map<std::string, int> &option);
  200. static Status ParseParallelNum(const std::string &parallel_num, const std::string &key, int &num);
  201. static Status ParseTrainGraphFlag(bool &options, bool &option);
  202. static bool IsPerfLevelInvalid(int32_t perf_level);
  203. Status SummaryHandle(const GraphId &graph_id, std::vector<GeTensor> &outputs);
  204. Status CheckpointHandle(const GraphId &graph_id, const ComputeGraphPtr &compute_graph,
  205. const std::vector<GeTensor> &outputs);
  206. // call the callback function of ME to push summary result data to ME
  207. Status PushSummaryData2ME(const GraphId &graph_id, const std::map<std::string, ge::Tensor> &summary_data);
  208. // call the callback function of ME to push save result data to ME
  209. Status PushSaveData2ME(const GraphId &graph_id, const std::map<std::string, ge::Tensor> &save_data);
  210. bool IsCheckpointGraph(ComputeGraphPtr &compute_graph);
  211. bool CheckNetOutputForCheckpointGraph(NodePtr &node);
  212. bool CheckVariableForCheckpointGraph(NodePtr &node);
  213. bool CheckTransOpForCheckpointGraph(NodePtr &node);
  214. Status MergeSubGraph(ComputeGraphPtr &compute_graph, const ge::ComputeGraphPtr &original_compute_graph,
  215. GraphId root_graph_id);
  216. Status ConvertGraphToFile(ComputeGraphPtr &compute_graph, GraphPartitioner &partitioner, std::string file_path,
  217. bool exe_flag = false);
  218. Status SetSubgraph(uint64_t session_id, ComputeGraphPtr compute_graph, GraphPartitioner &partitioner);
  219. void SetAttrForHcomBroadCastOp(ge::ComputeGraphPtr &compute_graph);
  220. bool IsBroadCastOpData(const ge::NodePtr &var_node);
  221. void AdjustBroadCastOpData(const ge::NodePtr &var_node);
  222. bool IsAssignOpData(const ge::NodePtr &var_node);
  223. void AdjustAssignOpData(const ge::NodePtr &var_node);
  224. bool ConfirmUseOpAndIndexByAnchor(const ge::InDataAnchorPtr &in_anchor, const map<string, std::set<int>> &confirm_ops,
  225. ge::NodePtr &use_node);
  226. bool ConfirmUseOpAndIndexByNode(const ge::NodePtr &var_node, const map<string, std::set<int>> &confirm_ops,
  227. ge::NodePtr &use_node);
  228. // graph context
  229. std::shared_ptr<GraphContext> GetGraphContext() const { return graph_context_; }
  230. Status RemoveIsolatedConst(ge::ComputeGraphPtr &compute_graph);
  231. Status RemoveIsolatedConstInThisGraph(ge::ComputeGraphPtr &compute_graph);
  232. Status OptimizeStage1(ComputeGraphPtr &compute_graph);
  233. Status OptimizeStage2(ComputeGraphPtr &compute_graph);
  234. Status SubexpressionMigration(ComputeGraphPtr &compute_graph);
  235. Status LoadGraphAsync(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node);
  236. Status CheckAndReleaseMemory(const GeModelPtr &ge_model, const GraphNodePtr &graph_node);
  237. bool CheckModelLoad(const GeRootModelPtr &ge_model, bool load_flag);
  238. Status LoadGraph(const GeRootModelPtr &ge_root_model, const GraphNodePtr &graph_node);
  239. bool IsGraphNeedBuild(const GraphNodePtr &graph_node);
  240. Status LoadFromCache(const GraphNodePtr &graph_node, const ModelCacheHelperPtr &cache_helper, GeModelPtr &ge_model);
  241. Status SaveCacheBeforeBuild(uint32_t graph_id, const ModelCacheHelperPtr &cache_helper);
  242. Status SaveCacheAfterBuild(uint32_t graph_id, ComputeGraphPtr graph, GeModelPtr &ge_model);
  243. void AddModelCacheHelperToMap(const GraphId &graph_id, uint64_t session_id, ComputeGraphPtr &compute_graph);
  244. Status IncreBuild(const GraphNodePtr &graph_node, GeModelPtr &ge_model);
  245. void RemoveModelCacheHelper(const GraphId &graph_id);
  246. ModelCacheHelperPtr FindModelCacheHelper(GraphId graph_id);
  247. static void ConstructGeInput(std::vector<ge::GeTensor> &ge_inputs, PreRunArgs &args);
  248. static void PreRunThread(GraphManager *graph_manager);
  249. static void RunThread(GraphManager *graph_manager);
  250. static void StopQueue(GraphManager *graph_manager);
  251. static void ReturnError(GraphManager *graph_manager, RunAsyncCallback callback, Status ret, const string &log);
  252. static void ReturnError(GraphManager *graph_manager, GraphNodePtr &graph_node, RunAsyncCallback callback,
  253. Status ret, const string &log);
  254. void ChangeConstTypeWhenTraining(const ComputeGraphPtr &compute_graph);
  255. Status PreRunOptimizeOriginalGraph(const GraphNodePtr &graph_node, const std::vector<GeTensor> &inputs,
  256. ge::ComputeGraphPtr &compute_graph, uint64_t session_id);
  257. Status PreRunOptimizeSubGraph(const GraphNodePtr &graph_node,
  258. ge::ComputeGraphPtr &compute_graph,
  259. uint64_t session_id);
  260. Status PreRunAfterOptimizeSubGraph(const GraphNodePtr &graph_node,
  261. ComputeGraphPtr &compute_graph,
  262. GeRootModelPtr &ge_root_model,
  263. uint64_t session_id);
  264. Status CopySubGraphAndMarkFusion(const ComputeGraphPtr &compute_graph,
  265. Graph2SubGraphInfoList &sub_graph_map,
  266. std::unordered_map<std::string, ComputeGraphPtr> &copy_graphs);
  267. Status OptimizeSubGraphWithMultiThreads(ComputeGraphPtr compute_graph,
  268. Graph2SubGraphInfoList &sub_graph_map,
  269. uint64_t session_id);
  270. bool CheckAllFusionOptimizeSuccess(const ComputeGraphPtr &compute_graph, Graph2SubGraphInfoList &sub_graph_map);
  271. Status ReplaceSubgraphWithOriGraph(const ComputeGraphPtr &compute_graph,
  272. Graph2SubGraphInfoList &sub_graph_map,
  273. std::unordered_map<std::string, ComputeGraphPtr> &copy_graphs);
  274. Status SetRtContext(rtContext_t rt_context, rtCtxMode_t mode, uint64_t session_id, uint32_t graph_id);
  275. void AddLocalOmgContext(GraphId graph_id, const OmgContext &omg_context);
  276. void UpdateLocalOmgContext(GraphId graph_id);
  277. CompilerStages &GetCompilerStages(GraphId graph_id);
  278. void RemoveCompilerStages(GraphId graph_id);
  279. std::atomic_bool thread_run_flag_;
  280. BlockingQueue<PreRunArgs> prerun_args_q_{};
  281. BlockingQueue<RunArgs> run_args_q_{};
  282. std::thread prerun_thread_;
  283. std::thread run_thread_;
  284. std::map<GraphId, GraphNodePtr> graph_map_;
  285. std::map<GraphId, ModelCacheHelperPtr> cache_helper_map_;
  286. // for run graph synchronous return
  287. std::mutex sync_run_mutex_;
  288. std::condition_variable condition_;
  289. // run graph synchronization call back listener
  290. std::shared_ptr<GraphModelListener> graph_run_listener_;
  291. // summary and checkpoint callback function list for ME, key is summary or checkpoint
  292. std::map<std::string, std::function<Status(uint32_t, const std::map<std::string, ge::Tensor> &)>> me_callback_map_;
  293. std::map<std::string, std::function<Status(uint32_t, const std::map<ge::AscendString, ge::Tensor> &)>> callback_map_;
  294. bool init_flag_;
  295. GraphManagerOptions options_;
  296. GraphContextPtr graph_context_ = nullptr;
  297. map<GraphId, OmgContext> omg_contexts_;
  298. map<GraphId, CompilerStages> compiler_stages_;
  299. GraphExecutor graph_executor_;
  300. VarAccelerateCtrl var_acc_ctrl_;
  301. std::mutex run_mutex_;
  302. std::mutex member_mutex_;
  303. std::mutex unload_model_mutex_;
  304. };
  305. } // namespace ge
  306. #endif // GE_GRAPH_MANAGER_GRAPH_MANAGER_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示