You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

davinci_model.h 4.6 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef INC_FRAMEWORK_GE_RUNTIME_DAVINCI_MODEL_H_
  17. #define INC_FRAMEWORK_GE_RUNTIME_DAVINCI_MODEL_H_
  18. #include <memory>
  19. #include <vector>
  20. #include "ge_runtime/op_info.h"
  21. #include "ge_runtime/task_info.h"
  22. namespace ge {
  23. namespace model_runner {
  24. class DavinciModel {
  25. public:
  26. DavinciModel(const std::vector<std::shared_ptr<TaskInfo>> &task_info_list,
  27. const std::vector<std::shared_ptr<OpInfo>> &data_info_list,
  28. const std::vector<std::shared_ptr<OpInfo>> &output_info_list,
  29. const std::vector<std::shared_ptr<OpInfo>> &constant_info_list,
  30. const std::vector<model_runner::OpInfoPtr> &variable_info_list,
  31. const std::vector<uint32_t> &wait_active_stream_list,
  32. const std::vector<uint32_t> &force_copy_stream_list, uint64_t mem_size = 0, uint64_t weight_size = 0,
  33. uint64_t var_size = 0, uintptr_t logic_mem_base = 0, uintptr_t logic_weight_base = 0,
  34. uintptr_t logic_var_base = 0, uint32_t stream_num = 0, uint32_t batch_num = 0, uint32_t event_num = 0,
  35. int32_t priority = 0)
  36. : task_info_list_(task_info_list),
  37. data_info_list_(data_info_list),
  38. output_info_list_(output_info_list),
  39. constant_info_list_(constant_info_list),
  40. variable_info_list_(variable_info_list),
  41. wait_active_stream_list_(wait_active_stream_list),
  42. force_copy_stream_list_(force_copy_stream_list),
  43. mem_size_(mem_size),
  44. weight_size_(weight_size),
  45. var_size_(var_size),
  46. logic_mem_base_(logic_mem_base),
  47. logic_weight_base_(logic_weight_base),
  48. logic_var_base_(logic_var_base),
  49. stream_num_(stream_num),
  50. batch_num_(batch_num),
  51. event_num_(event_num),
  52. priority_(priority) {}
  53. ~DavinciModel() {}
  54. uint64_t GetMemSize() const { return mem_size_; }
  55. uint64_t GetWeightSize() const { return weight_size_; }
  56. uint64_t GetVarSize() const { return var_size_; }
  57. uintptr_t GetLogicMemBase() const { return logic_mem_base_; }
  58. uintptr_t GetLogicWeightBase() const { return logic_weight_base_; }
  59. uintptr_t GetLogicVarBase() const { return logic_var_base_; }
  60. uint32_t GetStreamNum() const { return stream_num_; }
  61. uint32_t GetBatchNum() const { return batch_num_; }
  62. uint32_t GetEventNum() const { return event_num_; }
  63. const std::vector<uint32_t> &GetWaitActiveStreams() const { return wait_active_stream_list_; }
  64. const std::vector<uint32_t> &GetForceCopyStreams() const { return force_copy_stream_list_; }
  65. int32_t GetPriority() const { return priority_; }
  66. const std::vector<std::shared_ptr<TaskInfo>> &GetTaskInfoList() const { return task_info_list_; }
  67. const std::vector<std::shared_ptr<OpInfo>> &GetDataInfoList() const { return data_info_list_; }
  68. const std::vector<std::shared_ptr<OpInfo>> &GetOutputInfoList() const { return output_info_list_; }
  69. const std::vector<std::shared_ptr<OpInfo>> &GetConstantInfoList() const { return output_info_list_; }
  70. const std::vector<model_runner::OpInfoPtr> &GetVariableInfoList() const { return variable_info_list_; }
  71. private:
  72. std::vector<std::shared_ptr<TaskInfo>> task_info_list_;
  73. std::vector<std::shared_ptr<OpInfo>> data_info_list_;
  74. std::vector<std::shared_ptr<OpInfo>> output_info_list_;
  75. std::vector<std::shared_ptr<OpInfo>> constant_info_list_;
  76. std::vector<model_runner::OpInfoPtr> variable_info_list_;
  77. std::vector<uint32_t> wait_active_stream_list_;
  78. std::vector<uint32_t> force_copy_stream_list_;
  79. uint64_t mem_size_;
  80. uint64_t weight_size_;
  81. uint64_t var_size_;
  82. uintptr_t logic_mem_base_;
  83. uintptr_t logic_weight_base_;
  84. uintptr_t logic_var_base_;
  85. uint32_t stream_num_;
  86. uint32_t batch_num_;
  87. uint32_t event_num_;
  88. int32_t priority_;
  89. // Disable to copy constructor and assignment operator
  90. DavinciModel &operator=(const DavinciModel &) = delete;
  91. DavinciModel(const DavinciModel &) = delete;
  92. };
  93. } // namespace model_runner
  94. } // namespace ge
  95. #endif // INC_FRAMEWORK_GE_RUNTIME_DAVINCI_MODEL_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示