You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

task_info.h 3.9 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_TASK_INFO_H_
  17. #define GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_TASK_INFO_H_
  18. #include <vector>
  19. #include <sstream>
  20. #include "cce/customize.h"
  21. #include "framework/common/taskdown_common.h"
  22. #include "framework/common/ge_inner_error_codes.h"
  23. #include "graph/load/model_manager/ts_mem_mall.h"
  24. #include "graph/load/model_manager/task_info/task_info_factory.h"
  25. #include "proto/task.pb.h"
  26. namespace ge {
  27. struct MemInfo {
  28. size_t memory_size = 0;
  29. uint64_t logic_memory_base = 0;
  30. uint8_t *memory_base = nullptr;
  31. uint32_t memory_type = RT_MEMORY_HBM;
  32. std::string memory_key = "";
  33. };
  34. struct RuntimeParam {
  35. RuntimeParam() {
  36. ts_mem_mall = std::unique_ptr<TsMemMall>(new (std::nothrow) TsMemMall());
  37. aicpu_mem_mall = std::unique_ptr<TsMemMall>(new (std::nothrow) TsMemMall(RT_MEMORY_HBM));
  38. }
  39. ~RuntimeParam() = default;
  40. std::string ToString() {
  41. std::stringstream ss;
  42. ss << "session_id:" << session_id << ", stream_num:" << stream_num << ", event_num:" << event_num
  43. << ", label_num:" << label_num << ", logic_mem_base:" << logic_mem_base
  44. << ", logic_weight_base:" << logic_weight_base << ", logic_var_base:" << logic_var_base
  45. << ", memory_size:" << mem_size << ", weight_size:" << weight_size << ", var_size:" << var_size
  46. << ", ex_memory_info:";
  47. for (auto it : memory_infos) {
  48. ss << "[memory_type:" << it.first << ", memory_size:" << it.second.memory_size << "]";
  49. }
  50. return ss.str();
  51. }
  52. uint64_t mem_size = 0;
  53. uint64_t logic_mem_base = 0;
  54. uint8_t *mem_base = nullptr;
  55. uint64_t weight_size = 0;
  56. uint64_t logic_weight_base = 0;
  57. uint8_t *weight_base = nullptr;
  58. uint64_t var_size = 0;
  59. uint64_t logic_var_base = 0;
  60. uint8_t *var_base = nullptr;
  61. std::map<uint64_t, MemInfo> memory_infos;
  62. uint32_t batch_num = 0;
  63. uint32_t stream_num = 0;
  64. uint32_t event_num = 0;
  65. uint32_t label_num = 0;
  66. uint64_t session_id = 0;
  67. uint32_t graph_id = 0;
  68. bool is_single_op = false;
  69. std::unique_ptr<TsMemMall> ts_mem_mall;
  70. std::unique_ptr<TsMemMall> aicpu_mem_mall;
  71. };
  72. typedef struct FusionOpInfo {
  73. std::vector<std::string> original_op_names;
  74. std::string op_name;
  75. uint32_t op_index;
  76. uint32_t stream_id;
  77. } FusionOpInfo;
  78. class DavinciModel;
  79. class TaskInfo {
  80. public:
  81. TaskInfo() : stream_(nullptr) {}
  82. virtual ~TaskInfo() { stream_ = nullptr; }
  83. virtual Status Init(const domi::TaskDef &task_def, DavinciModel *davinci_model) = 0;
  84. virtual Status Distribute() = 0;
  85. virtual Status UpdateArgs() { return SUCCESS; }
  86. virtual Status CalculateArgs(const domi::TaskDef &task_def, DavinciModel *davinci_model) { return SUCCESS; }
  87. virtual Status Release() { return SUCCESS; }
  88. virtual ccOpContext *GetCtx() { return nullptr; }
  89. virtual uint32_t GetTaskID() { return 0xFFFFFFFF; }
  90. virtual bool CallSaveDumpInfo() { return false; }
  91. virtual uint32_t GetStreamId() { return 0xFFFFFFFF; }
  92. virtual uintptr_t GetDumpArgs() { return 0; }
  93. virtual uint32_t GetSktTaskID() { return 0xFFFFFFFF; }
  94. virtual FusionOpInfo *GetFusionOpInfo() { return nullptr; }
  95. protected:
  96. Status SetStream(uint32_t stream_id, const std::vector<rtStream_t> &stream_list);
  97. void *stream_;
  98. };
  99. } // namespace ge
  100. #endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_TASK_INFO_TASK_INFO_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示