You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

aicpu_task.cc 6.6 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "ge_runtime/task/aicpu_task.h"
  17. #include <vector>
  18. #include "ge_runtime/task/task_factory.h"
  19. #include "aicpu/common/aicpu_task_struct.h"
  20. namespace ge {
  21. namespace model_runner {
  22. AicpuTask::AicpuTask(const ModelContext &model_context, const std::shared_ptr<AicpuTaskInfo> &task_info)
  23. : TaskRepeater<AicpuTaskInfo>(model_context, task_info),
  24. task_info_(task_info),
  25. stream_(nullptr),
  26. args_(nullptr),
  27. ext_info_(nullptr),
  28. input_output_addr_(nullptr) {
  29. if (task_info_ == nullptr) {
  30. GELOGW("task_info_ is null!");
  31. }
  32. auto stream_list = model_context.stream_list();
  33. if (stream_list.size() == 1) {
  34. stream_ = stream_list[0];
  35. } else if (stream_list.size() > task_info->stream_id()) {
  36. stream_ = stream_list[task_info->stream_id()];
  37. } else {
  38. GELOGW("index: %u >= stream_list.size(): %zu.", task_info->stream_id(), stream_list.size());
  39. }
  40. }
  41. AicpuTask::~AicpuTask() {
  42. ReleaseRtMem(&args_);
  43. ReleaseRtMem(&ext_info_);
  44. }
  45. bool AicpuTask::Distribute() {
  46. GELOGI("InitAicpuTask start.");
  47. vector<void *> io_addrs;
  48. io_addrs.insert(io_addrs.end(), task_info_->input_data_addrs().begin(), task_info_->input_data_addrs().end());
  49. io_addrs.insert(io_addrs.end(), task_info_->output_data_addrs().begin(), task_info_->output_data_addrs().end());
  50. auto io_addrs_num = static_cast<uint32_t>(io_addrs.size());
  51. auto io_addrs_size = static_cast<uint32_t>(io_addrs_num * sizeof(void *));
  52. constexpr uint32_t io_addr_offset = sizeof(aicpu::AicpuParamHead);
  53. uint32_t node_def_len_offset = io_addr_offset + io_addrs_size;
  54. uint32_t node_def_addr_offset = node_def_len_offset + sizeof(uint32_t);
  55. uint32_t args_size = sizeof(aicpu::AicpuParamHead) + io_addrs_size +
  56. static_cast<uint32_t>(task_info_->node_def().size()) + sizeof(uint32_t);
  57. aicpu::AicpuParamHead aicpu_param_head;
  58. aicpu_param_head.length = args_size;
  59. aicpu_param_head.ioAddrNum = io_addrs_num;
  60. auto ext_info = task_info_->ext_info();
  61. uint32_t ext_size = ext_info.size();
  62. if (ext_info.empty()) {
  63. aicpu_param_head.extInfoLength = 0;
  64. aicpu_param_head.extInfoAddr = 0;
  65. } else {
  66. rtError_t flag = rtMalloc(&ext_info_, ext_size, RT_MEMORY_HBM);
  67. if (flag != ACL_RT_SUCCESS) {
  68. GELOGE(RT_FAILED, "Call rt api(rtMalloc) failed, ret: 0x%X.", flag);
  69. return false;
  70. }
  71. flag = rtMemcpy(ext_info_, ext_size, const_cast<void *>(reinterpret_cast<const void *>(ext_info.data())), ext_size,
  72. RT_MEMCPY_HOST_TO_DEVICE);
  73. if (flag != ACL_RT_SUCCESS) {
  74. GELOGE(RT_FAILED, "Call rt api(rtMemCpy) failed, ret: 0x%X.", flag);
  75. return false;
  76. }
  77. GELOGI("ext info size:", ext_size);
  78. aicpu_param_head.extInfoLength = ext_size;
  79. aicpu_param_head.extInfoAddr = reinterpret_cast<uintptr_t>(ext_info_);
  80. }
  81. // Malloc device memory for args
  82. rtError_t rt_ret = rtMalloc(&args_, args_size, RT_MEMORY_HBM);
  83. if (rt_ret != ACL_RT_SUCCESS) {
  84. GELOGE(RT_FAILED, "Call rt api(rtMalloc) failed, ret: 0x%X.", rt_ret);
  85. return false;
  86. }
  87. GE_PRINT_DYNAMIC_MEMORY(rtMalloc, "task args data.", args_size)
  88. // Memcpy AicpuParamHead
  89. rt_ret = rtMemcpy(args_, sizeof(aicpu::AicpuParamHead), reinterpret_cast<void *>(&aicpu_param_head),
  90. sizeof(aicpu::AicpuParamHead), RT_MEMCPY_HOST_TO_DEVICE);
  91. if (rt_ret != ACL_RT_SUCCESS) {
  92. GELOGE(RT_FAILED, "Call rt api(rtMemcpy) failed, ret: 0x%X.", rt_ret);
  93. return false;
  94. }
  95. // Memcpy io addrs
  96. if (io_addrs_num != 0) {
  97. rt_ret = rtMemcpy(reinterpret_cast<void *>(reinterpret_cast<uint8_t *>(args_) + io_addr_offset), io_addrs_size,
  98. reinterpret_cast<void *>(io_addrs.data()), io_addrs_size, RT_MEMCPY_HOST_TO_DEVICE);
  99. if (rt_ret != ACL_RT_SUCCESS) {
  100. GELOGE(RT_FAILED, "Call rt api(rtMemcpy) failed, ret: 0x%X.", rt_ret);
  101. return false;
  102. }
  103. }
  104. // Memcpy node def
  105. auto size = task_info_->node_def().size();
  106. rt_ret =
  107. rtMemcpy(reinterpret_cast<void *>(reinterpret_cast<uint8_t *>(args_) + node_def_len_offset), sizeof(uint32_t),
  108. reinterpret_cast<const void *>(&size), sizeof(uint32_t), RT_MEMCPY_HOST_TO_DEVICE);
  109. if (rt_ret != ACL_RT_SUCCESS) {
  110. GELOGE(RT_FAILED, "Call rt api(rtMemcpy) failed, ret: 0x%X.", rt_ret);
  111. return false;
  112. }
  113. // Memcpy node def
  114. rt_ret = rtMemcpy(reinterpret_cast<void *>(reinterpret_cast<uint8_t *>(args_) + node_def_addr_offset),
  115. task_info_->node_def().size(), reinterpret_cast<const void *>(task_info_->node_def().data()),
  116. task_info_->node_def().size(), RT_MEMCPY_HOST_TO_DEVICE);
  117. if (rt_ret != ACL_RT_SUCCESS) {
  118. GELOGE(RT_FAILED, "Call rt api(rtMemcpy) failed, ret: 0x%X.", rt_ret);
  119. return false;
  120. }
  121. input_output_addr_ = reinterpret_cast<void *>(reinterpret_cast<uint8_t *>(args_) + io_addr_offset);
  122. auto dump_flag = task_info_->dump_flag() ? RT_KERNEL_DUMPFLAG : RT_KERNEL_DEFAULT;
  123. GELOGI(
  124. "Distribute AicpuTask start, args_size = %u, io_addrs_num = %u, so_name = %s, kernel_name = %s, dump_flag = %d.",
  125. args_size, io_addrs_num, task_info_->so_name().data(), task_info_->kernel_name().data(), dump_flag);
  126. rt_ret = rtCpuKernelLaunchWithFlag(reinterpret_cast<const void *>(task_info_->so_name().data()),
  127. reinterpret_cast<const void *>(task_info_->kernel_name().data()), 1, args_,
  128. args_size, nullptr, stream_, dump_flag);
  129. if (rt_ret != ACL_RT_SUCCESS) {
  130. GELOGE(RT_FAILED, "Call rt api failed, ret: 0x%X", rt_ret);
  131. return false;
  132. }
  133. GELOGI("Distribute AicpuTask end.");
  134. return true;
  135. }
  136. void AicpuTask::ReleaseRtMem(void **ptr) noexcept {
  137. if (ptr == nullptr || *ptr == nullptr) {
  138. return;
  139. }
  140. rtError_t rt_ret = rtFree(*ptr);
  141. if (rt_ret != ACL_RT_SUCCESS) {
  142. GELOGE(RT_FAILED, "ReleaseRtMem failed, ret: 0x%X", rt_ret);
  143. return;
  144. }
  145. *ptr = nullptr;
  146. }
  147. REGISTER_TASK(TaskInfoType::AICPU, AicpuTask, AicpuTaskInfo);
  148. } // namespace model_runner
  149. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示