You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

model_runner.cc 5.6 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "framework/ge_runtime/model_runner.h"
  17. #include "ge_runtime/runtime_model.h"
  18. #include "framework/common/debug/ge_log.h"
  19. #include "framework/common/ge_inner_error_codes.h"
  20. #include "common/ge/ge_util.h"
  21. #include "framework/ge_runtime/davinci_model.h"
  22. #include "graph/op_desc.h"
  23. namespace ge {
  24. namespace model_runner {
  25. using RuntimeModelPtr = std::shared_ptr<RuntimeModel>;
  26. using DavinciModelPtr = std::shared_ptr<DavinciModel>;
  27. ModelRunner &ModelRunner::Instance() {
  28. static ModelRunner instance; // Guaranteed to be destroyed.
  29. return instance;
  30. }
  31. bool ModelRunner::LoadDavinciModel(uint32_t device_id, uint64_t session_id, uint32_t model_id,
  32. std::shared_ptr<DavinciModel> davinci_model,
  33. std::shared_ptr<ModelListener> listener) {
  34. std::shared_ptr<RuntimeModel> model = MakeShared<RuntimeModel>();
  35. if (model == nullptr) {
  36. return false;
  37. }
  38. bool status = model->Load(device_id, session_id, davinci_model);
  39. if (!status) {
  40. return false;
  41. }
  42. runtime_models_[model_id] = model;
  43. return true;
  44. }
  45. bool ModelRunner::DistributeTask(uint32_t model_id) {
  46. auto model_iter = runtime_models_.find(model_id);
  47. if (model_iter == runtime_models_.end()) {
  48. GELOGE(PARAM_INVALID, "Model id %u not found.", model_id);
  49. return false;
  50. }
  51. return model_iter->second->DistributeTask();
  52. }
  53. bool ModelRunner::LoadModelComplete(uint32_t model_id) {
  54. auto model_iter = runtime_models_.find(model_id);
  55. if (model_iter == runtime_models_.end()) {
  56. GELOGE(PARAM_INVALID, "Model id %u not found.", model_id);
  57. return false;
  58. }
  59. return model_iter->second->LoadComplete();
  60. }
  61. const std::vector<uint32_t> &ModelRunner::GetTaskIdList(uint32_t model_id) const {
  62. auto model_iter = runtime_models_.find(model_id);
  63. if (model_iter == runtime_models_.end()) {
  64. GELOGE(PARAM_INVALID, "Model id %u not found.", model_id);
  65. static const std::vector<uint32_t> empty_ret;
  66. return empty_ret;
  67. }
  68. return model_iter->second->GetTaskIdList();
  69. }
  70. const std::vector<uint32_t> &ModelRunner::GetStreamIdList(uint32_t model_id) const {
  71. auto model_iter = runtime_models_.find(model_id);
  72. if (model_iter == runtime_models_.end()) {
  73. GELOGE(PARAM_INVALID, "Model id %u not found.", model_id);
  74. static const std::vector<uint32_t> empty_ret;
  75. return empty_ret;
  76. }
  77. return model_iter->second->GetStreamIdList();
  78. }
  79. const std::map<std::string, std::shared_ptr<RuntimeInfo>> &ModelRunner::GetRuntimeInfoMap(uint32_t model_id) const {
  80. auto model_iter = runtime_models_.find(model_id);
  81. if (model_iter == runtime_models_.end()) {
  82. GELOGW("Model id %u not found.", model_id);
  83. static const std::map<std::string, std::shared_ptr<RuntimeInfo>> empty_ret;
  84. return empty_ret;
  85. }
  86. return model_iter->second->GetRuntimeInfoMap();
  87. }
  88. void *ModelRunner::GetModelHandle(uint32_t model_id) const {
  89. auto model_iter = runtime_models_.find(model_id);
  90. if (model_iter == runtime_models_.end()) {
  91. GELOGW("Model id %u not found.", model_id);
  92. return nullptr;
  93. }
  94. return model_iter->second->GetModelHandle();
  95. }
  96. bool ModelRunner::UnloadModel(uint32_t model_id) {
  97. auto iter = runtime_models_.find(model_id);
  98. if (iter != runtime_models_.end()) {
  99. (void)runtime_models_.erase(iter);
  100. return true;
  101. }
  102. return false;
  103. }
  104. bool ModelRunner::RunModel(uint32_t model_id, const InputData &input_data, OutputData *output_data) {
  105. if (output_data == nullptr) {
  106. GELOGW("Output data point is null.");
  107. }
  108. auto model_iter = runtime_models_.find(model_id);
  109. if (model_iter == runtime_models_.end()) {
  110. GELOGE(PARAM_INVALID, "Model id %u not found.", model_id);
  111. return false;
  112. }
  113. bool status = model_iter->second->CopyInputData(input_data);
  114. if (!status) {
  115. GELOGE(FAILED, "Copy input data fail.");
  116. return false;
  117. }
  118. status = model_iter->second->Run();
  119. if (!status) {
  120. GELOGE(FAILED, "Run model fail.");
  121. return false;
  122. }
  123. return true;
  124. }
  125. bool ModelRunner::GetInputOutputDescInfo(uint32_t model_id, bool zero_copy,
  126. std::vector<InputOutputDescInfo> *input_desc,
  127. std::vector<InputOutputDescInfo> *output_desc,
  128. std::vector<uint32_t> *input_format, std::vector<uint32_t> *output_format) {
  129. if (runtime_models_.find(model_id) == runtime_models_.end()) {
  130. GELOGE(PARAM_INVALID, "Model id %u not found.", model_id);
  131. return false;
  132. }
  133. auto model = runtime_models_[model_id];
  134. if (input_desc == nullptr || output_desc == nullptr) {
  135. GELOGE(PARAM_INVALID, "input_desc or output_desc is null.");
  136. return false;
  137. }
  138. bool status = model->GetInputOutputDescInfo(zero_copy, input_desc, output_desc, input_format, output_format);
  139. if (!status) {
  140. GELOGE(FAILED, "Get input output desc info fail.");
  141. return false;
  142. }
  143. return true;
  144. }
  145. } // namespace model_runner
  146. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示