You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

cpu_queue_schedule.cc 16 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/load/model_manager/cpu_queue_schedule.h"
  17. #include "common/debug/ge_log.h"
  18. #include "common/debug/log.h"
  19. namespace {
  20. const uint32_t kCoreDim = 1; // for rtCpuKernelLaunch
  21. const char *const kCpuTaskModelEnqueue = "modelEnqueue";
  22. const char *const kCpuTaskWaitEndGraph = "modelWaitEndGraph";
  23. const char *const kCpuTaskPrepareOutput = "bufferPrepareOutput";
  24. const char *const kCpuTaskModelDequeue = "modelDequeue";
  25. const char *const kCpuTaskModelRepeat = "modelRepeat";
  26. const char *const kCpuTaskZeroCopy = "zeroCpy";
  27. } // namespace
  28. namespace ge {
  29. CpuTaskInfo::CpuTaskInfo(rtStream_t stream) : args_(nullptr), args_size_(0) { stream_ = stream; }
  30. CpuTaskInfo::~CpuTaskInfo() {
  31. if (args_ == nullptr) {
  32. return;
  33. }
  34. rtError_t status = rtFree(args_);
  35. if (status != RT_ERROR_NONE) {
  36. GELOGW("Call rt free failed, status: 0x%x", status);
  37. }
  38. args_ = nullptr;
  39. }
  40. ///
  41. /// @ingroup ge
  42. /// @brief definiteness queue schedule, bind input queue to task.
  43. /// @param [in] queue_id: input queue id from user.
  44. /// @param [out] in_mbuf: input mbuf addr for input data.
  45. /// @return: 0 for success / others for failed
  46. ///
  47. Status CpuTaskModelDequeue::Init(uint32_t queue_id, uintptr_t &in_mbuf) {
  48. if ((args_ != nullptr) || (args_size_ > 0)) {
  49. GELOGE(FAILED, "Task already initialized, size: %u", args_size_);
  50. return FAILED;
  51. }
  52. args_size_ = sizeof(MbufQueueInfo) + sizeof(uintptr_t); // sizeof(uintptr_t) for save in_mbuf.
  53. rtError_t status = rtMalloc(&args_, args_size_, RT_MEMORY_HBM);
  54. if (status != RT_ERROR_NONE) {
  55. GELOGE(RT_FAILED, "Call rt malloc failed, status: 0x%x", status);
  56. return RT_ERROR_TO_GE_STATUS(status);
  57. }
  58. in_mbuf = reinterpret_cast<uintptr_t>(args_) + sizeof(MbufQueueInfo);
  59. GE_PRINT_DYNAMIC_MEMORY(rtMalloc, "args data.", args_size_)
  60. MbufQueueInfo queue_info;
  61. queue_info.queue_id = queue_id;
  62. queue_info.in_mbuf = in_mbuf; // Placeholder, input mbuf addr will save to this place.
  63. status = rtMemcpy(args_, args_size_, &queue_info, sizeof(MbufQueueInfo), RT_MEMCPY_HOST_TO_DEVICE);
  64. if (status != RT_ERROR_NONE) {
  65. GELOGE(RT_FAILED, "Call rt memcpy failed, status: 0x%x", status);
  66. return RT_ERROR_TO_GE_STATUS(status);
  67. }
  68. return SUCCESS;
  69. }
  70. Status CpuTaskModelDequeue::Distribute() {
  71. if ((args_ == nullptr) || (args_size_ == 0) || (stream_ == nullptr)) {
  72. GELOGE(FAILED, "Task not initialized, distribute failed, size: %u", args_size_);
  73. return FAILED;
  74. }
  75. rtError_t status = rtCpuKernelLaunch(nullptr, kCpuTaskModelDequeue, kCoreDim, args_, args_size_, nullptr, stream_);
  76. if (status != RT_ERROR_NONE) {
  77. GELOGE(RT_FAILED, "Call rt CpuKernelLaunch ModelDequeue failed, status: 0x%X", status);
  78. return RT_ERROR_TO_GE_STATUS(status);
  79. }
  80. GELOGI("Cpu kernel launch model dequeue task success.");
  81. return SUCCESS;
  82. }
  83. ///
  84. /// @ingroup ge
  85. /// @brief definiteness queue schedule, zero copy.
  86. /// @param [in] mbuf_list: input/output mbuf addr list for input/output data.
  87. /// @param [in] outside_addrs: model input/output memory addr
  88. /// @return: 0 for success / others for failed
  89. ///
  90. Status CpuTaskZeroCopy::Init(std::vector<uintptr_t> &mbuf_list, const map<uint32_t, ZeroCopyOffset> &outside_addrs) {
  91. if ((args_ != nullptr) || (args_size_ > 0)) {
  92. GELOGE(FAILED, "Task already initialized, size: %u", args_size_);
  93. return FAILED;
  94. }
  95. args_size_ = sizeof(AddrMapInfo);
  96. GE_CHK_RT_RET(rtMalloc(&args_, args_size_, RT_MEMORY_HBM));
  97. GE_PRINT_DYNAMIC_MEMORY(rtMalloc, "args data.", args_size_)
  98. AddrMapInfo addr_map_info;
  99. // init src_addrs/dst_addrs
  100. vector<uint64_t> src_addrs;
  101. vector<uint64_t> dst_addrs;
  102. for (const auto &addrs : outside_addrs) {
  103. const auto &addrs_mapping_list = addrs.second.GetOutsideAddrs();
  104. GE_CHK_BOOL_EXEC(!addrs_mapping_list.empty(), return PARAM_INVALID, "not set outside_addrs");
  105. std::map<const void *, std::vector<void *>> virtual_args_addrs = addrs_mapping_list[0];
  106. for (const auto &virtual_args_addr : virtual_args_addrs) {
  107. addr_map_info.addr_num += virtual_args_addr.second.size();
  108. for (size_t i = 0; i < virtual_args_addr.second.size(); ++i) {
  109. src_addrs.emplace_back(mbuf_list.at(addrs.first));
  110. dst_addrs.push_back(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(virtual_args_addr.second.at(i))));
  111. }
  112. }
  113. }
  114. GELOGI("addr_map_info.addr_num is %u", addr_map_info.addr_num);
  115. // malloc mem for src_addrs/dst_addrs, and copy data of src_addrs/dst_addrs
  116. GE_CHK_RT_RET(rtMalloc(&src_addr_, src_addrs.size() * sizeof(uint64_t), RT_MEMORY_HBM));
  117. rtError_t status = rtMemcpy(src_addr_, src_addrs.size() * sizeof(uint64_t), src_addrs.data(),
  118. src_addrs.size() * sizeof(uint64_t), RT_MEMCPY_HOST_TO_DEVICE);
  119. GE_IF_BOOL_EXEC(status != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMemcpy error, ret: Ox%X", status);
  120. return RT_ERROR_TO_GE_STATUS(status);)
  121. GE_CHK_RT_RET(rtMalloc(&dst_addr_, dst_addrs.size() * sizeof(uint64_t), RT_MEMORY_HBM));
  122. status = rtMemcpy(dst_addr_, dst_addrs.size() * sizeof(uint64_t), dst_addrs.data(),
  123. dst_addrs.size() * sizeof(uint64_t), RT_MEMCPY_HOST_TO_DEVICE);
  124. GE_IF_BOOL_EXEC(status != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMemcpy error, ret: Ox%X", status);
  125. return RT_ERROR_TO_GE_STATUS(status);)
  126. // src_addr_list is init to src_addr, which is the point to src_addrs
  127. if (!src_addrs.empty() && !dst_addrs.empty()) {
  128. addr_map_info.src_addr_list = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(src_addr_));
  129. addr_map_info.dst_addr_list = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(dst_addr_));
  130. GELOGI("src_addr_list is %lu, dst_addr_list is %lu", addr_map_info.src_addr_list, addr_map_info.dst_addr_list);
  131. }
  132. status = rtMemcpy(args_, args_size_, &addr_map_info, sizeof(AddrMapInfo), RT_MEMCPY_HOST_TO_DEVICE);
  133. GE_IF_BOOL_EXEC(status != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMemcpy error, ret: Ox%X", status);
  134. return RT_ERROR_TO_GE_STATUS(status);)
  135. return SUCCESS;
  136. }
  137. Status CpuTaskZeroCopy::Distribute() {
  138. if ((args_ == nullptr) || (args_size_ == 0) || (stream_ == nullptr)) {
  139. GELOGE(FAILED, "Task not initialized, distribute failed, size: %u", args_size_);
  140. return FAILED;
  141. }
  142. rtError_t status = rtCpuKernelLaunch(nullptr, kCpuTaskZeroCopy, kCoreDim, args_, args_size_, nullptr, stream_);
  143. if (status != RT_ERROR_NONE) {
  144. GELOGE(RT_FAILED, "Call rt CpuKernelLaunch ZeroCopy failed, status: 0x%X", status);
  145. return RT_ERROR_TO_GE_STATUS(status);
  146. }
  147. GELOGI("Cpu kernel launch zero copy task success.");
  148. return SUCCESS;
  149. }
  150. CpuTaskZeroCopy::~CpuTaskZeroCopy() {
  151. if (src_addr_ == nullptr && dst_addr_ == nullptr) {
  152. return;
  153. }
  154. if (src_addr_ != nullptr) {
  155. rtError_t status = rtFree(src_addr_);
  156. if (status != RT_ERROR_NONE) {
  157. GELOGW("Call rt free failed, status: 0x%x", status);
  158. }
  159. }
  160. if (dst_addr_ != nullptr) {
  161. rtError_t status = rtFree(dst_addr_);
  162. if (status != RT_ERROR_NONE) {
  163. GELOGW("Call rt free failed, status: 0x%x", status);
  164. }
  165. }
  166. src_addr_ = nullptr;
  167. dst_addr_ = nullptr;
  168. }
  169. ///
  170. /// @ingroup ge
  171. /// @brief definiteness queue schedule, bind output queue to task.
  172. /// @param [in] addr: NetOutput Op input tensor address.
  173. /// @param [in] size: NetOutput Op input tensor size.
  174. /// @param [in] in_mbuf: input mbuf addr for input data.
  175. /// @param [out] out_mbuf: output mbuf addr for output data.
  176. /// @return: 0 for success / others for failed
  177. ///
  178. Status CpuTaskPrepareOutput::Init(uintptr_t addr, uint32_t size, uintptr_t in_mbuf, uintptr_t &out_mbuf) {
  179. if ((args_ != nullptr) || (args_size_ > 0)) {
  180. GELOGE(FAILED, "Task already initialized, size: %u", args_size_);
  181. return FAILED;
  182. }
  183. args_size_ = sizeof(PrepareOutputInfo) + sizeof(uintptr_t); // sizeof(uintptr_t) for save out_mbuf.
  184. rtError_t status = rtMalloc(&args_, args_size_, RT_MEMORY_HBM);
  185. if (status != RT_ERROR_NONE) {
  186. GELOGE(RT_FAILED, "Call rt malloc failed, status: 0x%x", status);
  187. return RT_ERROR_TO_GE_STATUS(status);
  188. }
  189. out_mbuf = reinterpret_cast<uintptr_t>(args_) + sizeof(PrepareOutputInfo);
  190. GE_PRINT_DYNAMIC_MEMORY(rtMalloc, "args data.", args_size_)
  191. // Get NetOutput Input address and bind to queue.
  192. PrepareOutputInfo prepare;
  193. prepare.data_size = size;
  194. prepare.data_addr = addr;
  195. prepare.in_mbuf = in_mbuf;
  196. prepare.out_mbuf = out_mbuf; // Placeholder, output mbuf addr will save to this place.
  197. status = rtMemcpy(args_, args_size_, &prepare, sizeof(PrepareOutputInfo), RT_MEMCPY_HOST_TO_DEVICE);
  198. if (status != RT_ERROR_NONE) {
  199. GELOGE(RT_FAILED, "Call rt memcpy failed, status: 0x%x", status);
  200. return RT_ERROR_TO_GE_STATUS(status);
  201. }
  202. return SUCCESS;
  203. }
  204. Status CpuTaskPrepareOutput::Distribute() {
  205. if ((args_ == nullptr) || (args_size_ == 0) || (stream_ == nullptr)) {
  206. GELOGE(FAILED, "Task not initialized, distribute failed, size: %u", args_size_);
  207. return FAILED;
  208. }
  209. rtError_t status = rtCpuKernelLaunch(nullptr, kCpuTaskPrepareOutput, kCoreDim, args_, args_size_, nullptr, stream_);
  210. if (status != RT_ERROR_NONE) {
  211. GELOGE(RT_FAILED, "Call rt CpuKernelLaunch PrepareOutput failed, status: 0x%X", status);
  212. return RT_ERROR_TO_GE_STATUS(status);
  213. }
  214. GELOGI("Cpu kernel launch prepare output task success.");
  215. return SUCCESS;
  216. }
  217. ///
  218. /// @ingroup ge
  219. /// @brief definiteness queue schedule, bind output queue to task.
  220. /// @param [in] queue_id: output queue id from user.
  221. /// @param [in] out_mbuf: mbuf for output data.
  222. /// @return: 0 for success / others for failed
  223. ///
  224. Status CpuTaskModelEnqueue::Init(uint32_t queue_id, uintptr_t out_mbuf) {
  225. if ((args_ != nullptr) || (args_size_ > 0)) {
  226. GELOGE(FAILED, "Task already initialized, size: %u", args_size_);
  227. return FAILED;
  228. }
  229. // Get NetOutput Input address and bind to queue.
  230. args_size_ = sizeof(MbufQueueInfo);
  231. rtError_t status = rtMalloc(&args_, args_size_, RT_MEMORY_HBM);
  232. if (status != RT_ERROR_NONE) {
  233. GELOGE(RT_FAILED, "Call rt malloc failed, status: 0x%x", status);
  234. return RT_ERROR_TO_GE_STATUS(status);
  235. }
  236. GE_PRINT_DYNAMIC_MEMORY(rtMalloc, "args data.", args_size_)
  237. MbufQueueInfo queue_info;
  238. queue_info.queue_id = queue_id;
  239. queue_info.in_mbuf = out_mbuf;
  240. status = rtMemcpy(args_, args_size_, &queue_info, args_size_, RT_MEMCPY_HOST_TO_DEVICE);
  241. if (status != RT_ERROR_NONE) {
  242. GELOGE(RT_FAILED, "Call rt memcpy failed, status: 0x%x", status);
  243. return RT_ERROR_TO_GE_STATUS(status);
  244. }
  245. return SUCCESS;
  246. }
  247. Status CpuTaskModelEnqueue::Distribute() {
  248. if ((args_ == nullptr) || (args_size_ == 0) || (stream_ == nullptr)) {
  249. GELOGE(FAILED, "Task not initialized, distribute failed, size: %u", args_size_);
  250. return FAILED;
  251. }
  252. rtError_t status = rtCpuKernelLaunch(nullptr, kCpuTaskModelEnqueue, kCoreDim, args_, args_size_, nullptr, stream_);
  253. if (status != RT_ERROR_NONE) {
  254. GELOGE(RT_FAILED, "Call rt CpuKernelLaunch ModelEnqueue failed, status: 0x%X", status);
  255. return RT_ERROR_TO_GE_STATUS(status);
  256. }
  257. GELOGI("Cpu kernel launch model enqueue task success.");
  258. return SUCCESS;
  259. }
  260. ///
  261. /// @ingroup ge
  262. /// @brief definiteness queue schedule, active entry stream.
  263. /// @param [in] stream: stream to be active.
  264. /// @return: 0 for success / others for failed
  265. ///
  266. Status CpuTaskActiveEntry::Init(rtStream_t stream) {
  267. if (stream == nullptr) {
  268. GELOGE(FAILED, "Task active stream not valid");
  269. return FAILED;
  270. }
  271. active_stream_ = stream;
  272. return SUCCESS;
  273. }
  274. Status CpuTaskActiveEntry::Distribute() {
  275. if ((active_stream_ == nullptr) || (stream_ == nullptr)) {
  276. GELOGE(FAILED, "Task not initialized, distribute failed, size: %u", args_size_);
  277. return FAILED;
  278. }
  279. rtError_t ret = rtStreamActive(active_stream_, stream_);
  280. if (ret != RT_ERROR_NONE) {
  281. GELOGE(RT_FAILED, "Call rt StreamActive failed, ret: 0x%X", ret);
  282. return RT_ERROR_TO_GE_STATUS(ret);
  283. }
  284. GELOGI("Cpu kernel launch active entry task success.");
  285. return SUCCESS;
  286. }
  287. ///
  288. /// @ingroup ge
  289. /// @brief definiteness queue schedule, wait for end graph.
  290. /// @param [in] model_id: model id for wait end graph.
  291. /// @return: 0 for success / others for failed
  292. ///
  293. Status CpuTaskWaitEndGraph::Init(uint32_t model_id) {
  294. if ((args_ != nullptr) || (args_size_ > 0)) {
  295. GELOGE(FAILED, "Task already initialized, size: %u", args_size_);
  296. return FAILED;
  297. }
  298. args_size_ = sizeof(model_id);
  299. rtError_t status = rtMalloc(&args_, args_size_, RT_MEMORY_HBM);
  300. if (status != RT_ERROR_NONE) {
  301. GELOGE(RT_FAILED, "Call rt malloc failed, status: 0x%x", status);
  302. return RT_ERROR_TO_GE_STATUS(status);
  303. }
  304. GE_PRINT_DYNAMIC_MEMORY(rtMalloc, "args data.", args_size_)
  305. status = rtMemcpy(args_, args_size_, &model_id, args_size_, RT_MEMCPY_HOST_TO_DEVICE);
  306. if (status != RT_ERROR_NONE) {
  307. GELOGE(RT_FAILED, "Call rt memcpy failed, status: 0x%x", status);
  308. return RT_ERROR_TO_GE_STATUS(status);
  309. }
  310. return SUCCESS;
  311. }
  312. Status CpuTaskWaitEndGraph::Distribute() {
  313. if ((args_ == nullptr) || (args_size_ == 0) || (stream_ == nullptr)) {
  314. GELOGE(FAILED, "Task not initialized, distribute failed, size: %u", args_size_);
  315. return FAILED;
  316. }
  317. rtError_t status = rtCpuKernelLaunch(nullptr, kCpuTaskWaitEndGraph, kCoreDim, args_, args_size_, nullptr, stream_);
  318. if (status != RT_ERROR_NONE) {
  319. GELOGE(RT_FAILED, "Call rt CpuKernelLaunch WaitEndGraph failed, status: 0x%X", status);
  320. return RT_ERROR_TO_GE_STATUS(status);
  321. }
  322. GELOGI("Cpu kernel launch wait end task success.");
  323. return SUCCESS;
  324. }
  325. ///
  326. /// @ingroup ge
  327. /// @brief definiteness queue schedule, repeat run model.
  328. /// @param [in] model_id: model id for repeat run.
  329. /// @return: 0 for success / others for failed
  330. ///
  331. Status CpuTaskModelRepeat::Init(uint32_t model_id) {
  332. if ((args_ != nullptr) || (args_size_ > 0)) {
  333. GELOGE(FAILED, "Task already initialized, size: %u", args_size_);
  334. return FAILED;
  335. }
  336. args_size_ = sizeof(model_id);
  337. rtError_t status = rtMalloc(&args_, args_size_, RT_MEMORY_HBM);
  338. if (status != RT_ERROR_NONE) {
  339. GELOGE(RT_FAILED, "Call rt malloc failed, status: 0x%x", status);
  340. return RT_ERROR_TO_GE_STATUS(status);
  341. }
  342. GE_PRINT_DYNAMIC_MEMORY(rtMalloc, "args data.", args_size_)
  343. status = rtMemcpy(args_, args_size_, &model_id, args_size_, RT_MEMCPY_HOST_TO_DEVICE);
  344. if (status != RT_ERROR_NONE) {
  345. GELOGE(RT_FAILED, "Call rt memcpy failed, status: 0x%x", status);
  346. return RT_ERROR_TO_GE_STATUS(status);
  347. }
  348. return SUCCESS;
  349. }
  350. Status CpuTaskModelRepeat::Distribute() {
  351. if ((args_ == nullptr) || (args_size_ == 0) || (stream_ == nullptr)) {
  352. GELOGE(FAILED, "Task not initialized, distribute failed, size: %u", args_size_);
  353. return FAILED;
  354. }
  355. rtError_t status = rtCpuKernelLaunch(nullptr, kCpuTaskModelRepeat, kCoreDim, args_, args_size_, nullptr, stream_);
  356. if (status != RT_ERROR_NONE) {
  357. GELOGE(RT_FAILED, "Call rt CpuKernelLaunch ModelRepeat failed, status: 0x%x", status);
  358. return RT_ERROR_TO_GE_STATUS(status);
  359. }
  360. GELOGI("Cpu kernel launch repeat task success.");
  361. return SUCCESS;
  362. }
  363. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示