You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

graph_optimize.h 3.1 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_GRAPH_OPTIMIZE_GRAPH_OPTIMIZE_H_
  17. #define GE_GRAPH_OPTIMIZE_GRAPH_OPTIMIZE_H_
  18. #include <iostream>
  19. #include <map>
  20. #include <memory>
  21. #include <mutex>
  22. #include <string>
  23. #include <unordered_map>
  24. #include <vector>
  25. #include "common/ge_inner_error_codes.h"
  26. #include "common/ge_types.h"
  27. #include "common/optimizer/graph_optimizer.h"
  28. #include "graph/compute_graph.h"
  29. #include "graph/manager/graph_context.h"
  30. #include "graph/manager/graph_manager_utils.h"
  31. #include "omg/omg_inner_types.h"
  32. namespace ge {
  33. using ComputeGraphPtr = std::shared_ptr<ge::ComputeGraph>;
  34. using GraphOptimizerPtr = std::shared_ptr<ge::GraphOptimizer>;
  35. class GraphOptimize {
  36. public:
  37. GraphOptimize();
  38. ~GraphOptimize() = default;
  39. // subgraph optimize
  40. Status OptimizeSubGraph(ComputeGraphPtr &compute_graph, const std::string &engine_name);
  41. // original graph optimize
  42. Status OptimizeOriginalGraph(ComputeGraphPtr &compute_graph);
  43. Status OptimizeOriginalGraphJudgeInsert(ComputeGraphPtr &compute_graph);
  44. // for fe prepare optimize in quantize scene
  45. Status OptimizeOriginalGraphForQuantize(ComputeGraphPtr &compute_graph);
  46. // for engine to optimize merged whole graph before ge Optimize2
  47. Status OptimizeWholeGraph(ComputeGraphPtr &compute_graph);
  48. // for rts optimize before build to add attr and insert memcpy op
  49. Status OptimizeGraphBeforeBuildForRts(ComputeGraphPtr &compute_graph);
  50. // set options
  51. Status SetOptions(const GraphManagerOptions &options);
  52. const std::map<uint32_t, std::map<string, size_t>> &GetSummaryOutputIndexes() const {
  53. return summary_output_indexes_;
  54. } // lint !e1073
  55. // handle summary node before preRun graph
  56. Status HandleSummaryOp(ComputeGraphPtr &compute_graph);
  57. // Identify reference node before optimize subgraph
  58. Status IdentifyReference(ComputeGraphPtr &compute_graph);
  59. Status HandleMemoryRWConflict(ComputeGraphPtr &compute_graph);
  60. Status CheckRWConflict(ComputeGraphPtr &compute_graph, bool &has_conflict);
  61. void TranFrameOp(ComputeGraphPtr &compute_graph);
  62. private:
  63. std::mutex mutex_;
  64. domi::FrameworkType optimize_type_;
  65. std::string cal_config_;
  66. std::string insert_op_config_;
  67. std::string core_type_;
  68. bool train_graph_flag_ = false;
  69. bool local_fmk_op_flag_ = false;
  70. // record the summary names for filter sumarry result.
  71. std::map<uint32_t, std::map<string, size_t>> summary_output_indexes_ = {};
  72. std::string func_bin_path_;
  73. std::string build_mode_;
  74. std::string build_step_;
  75. };
  76. } // namespace ge
  77. #endif // GE_GRAPH_OPTIMIZE_GRAPH_OPTIMIZE_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示