You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

cpu_queue_schedule.cc 16 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/load/new_model_manager/cpu_queue_schedule.h"
  17. #include "common/debug/ge_log.h"
  18. #include "common/debug/log.h"
  19. namespace {
  20. const uint32_t kCoreDim = 1; // for rtCpuKernelLaunch
  21. const char *const kCpuTaskModelEnqueue = "modelEnqueue";
  22. const char *const kCpuTaskWaitEndGraph = "modelWaitEndGraph";
  23. const char *const kCpuTaskPrepareOutput = "bufferPrepareOutput";
  24. const char *const kCpuTaskModelDequeue = "modelDequeue";
  25. const char *const kCpuTaskModelRepeat = "modelRepeat";
  26. const char *const kCpuTaskZeroCopy = "zeroCpy";
  27. } // namespace
  28. namespace ge {
  29. CpuTaskInfo::CpuTaskInfo(rtStream_t stream) : args_(nullptr), args_size_(0) { stream_ = stream; }
  30. CpuTaskInfo::~CpuTaskInfo() {
  31. if (args_ == nullptr) {
  32. return;
  33. }
  34. rtError_t status = rtFree(args_);
  35. if (status != RT_ERROR_NONE) {
  36. GELOGW("Call rt free failed, status: 0x%x", status);
  37. }
  38. args_ = nullptr;
  39. }
  40. ///
  41. /// @ingroup ge
  42. /// @brief definiteness queue schedule, bind input queue to task.
  43. /// @param [in] queue_id: input queue id from user.
  44. /// @param [out] in_mbuf: input mbuf addr for input data.
  45. /// @return: 0 for success / others for failed
  46. ///
  47. Status CpuTaskModelDequeue::Init(uint32_t queue_id, uintptr_t &in_mbuf) {
  48. if ((args_ != nullptr) || (args_size_ > 0)) {
  49. GELOGE(FAILED, "Task already initialized, size: %u", args_size_);
  50. return FAILED;
  51. }
  52. args_size_ = sizeof(MbufQueueInfo) + sizeof(uintptr_t); // sizeof(uintptr_t) for save in_mbuf.
  53. rtError_t status = rtMalloc(&args_, args_size_, RT_MEMORY_HBM);
  54. if (status != RT_ERROR_NONE) {
  55. GELOGE(RT_FAILED, "Call rt malloc failed, status: 0x%x", status);
  56. return RT_ERROR_TO_GE_STATUS(status);
  57. }
  58. in_mbuf = reinterpret_cast<uintptr_t>(args_) + sizeof(MbufQueueInfo);
  59. GE_PRINT_DYNAMIC_MEMORY(rtMalloc, "args data.", args_size_)
  60. MbufQueueInfo queue_info;
  61. queue_info.queue_id = queue_id;
  62. queue_info.in_mbuf = in_mbuf; // Placeholder, input mbuf addr will save to this place.
  63. status = rtMemcpy(args_, args_size_, &queue_info, sizeof(MbufQueueInfo), RT_MEMCPY_HOST_TO_DEVICE);
  64. if (status != RT_ERROR_NONE) {
  65. GELOGE(RT_FAILED, "Call rt memcpy failed, status: 0x%x", status);
  66. return RT_ERROR_TO_GE_STATUS(status);
  67. }
  68. return SUCCESS;
  69. }
  70. Status CpuTaskModelDequeue::Distribute() {
  71. if ((args_ == nullptr) || (args_size_ == 0) || (stream_ == nullptr)) {
  72. GELOGE(FAILED, "Task not initialized, distribute failed, size: %u", args_size_);
  73. return FAILED;
  74. }
  75. rtError_t status = rtCpuKernelLaunch(nullptr, kCpuTaskModelDequeue, kCoreDim, args_, args_size_, nullptr, stream_);
  76. if (status != RT_ERROR_NONE) {
  77. GELOGE(RT_FAILED, "Call rt CpuKernelLaunch ModelDequeue failed, status: 0x%X", status);
  78. return RT_ERROR_TO_GE_STATUS(status);
  79. }
  80. GELOGI("Cpu kernel launch model dequeue task success.");
  81. return SUCCESS;
  82. }
  83. ///
  84. /// @ingroup ge
  85. /// @brief definiteness queue schedule, zero copy.
  86. /// @param [in] mbuf_list: input/output mbuf addr list for input/output data.
  87. /// @param [in] outside_addrs: model input/output memory addr
  88. /// @return: 0 for success / others for failed
  89. ///
  90. Status CpuTaskZeroCopy::Init(std::vector<uintptr_t> &mbuf_list, std::map<const void *, ZeroCopyOffset> &outside_addrs) {
  91. if ((args_ != nullptr) || (args_size_ > 0)) {
  92. GELOGE(FAILED, "Task already initialized, size: %u", args_size_);
  93. return FAILED;
  94. }
  95. args_size_ = sizeof(AddrMapInfo);
  96. GE_CHK_RT_RET(rtMalloc(&args_, args_size_, RT_MEMORY_HBM));
  97. GE_PRINT_DYNAMIC_MEMORY(rtMalloc, "args data.", args_size_)
  98. AddrMapInfo addr_map_info;
  99. for (auto &addrs : outside_addrs) {
  100. auto &addrs_mapping_list = addrs.second.GetOutsideAddrs();
  101. GE_CHK_BOOL_EXEC(!addrs_mapping_list.empty(), return PARAM_INVALID, "not set outside_addrs");
  102. std::map<const void *, std::vector<void *>> virtual_args_addrs = addrs_mapping_list[0];
  103. for (const auto &virtual_args_addr : virtual_args_addrs) {
  104. addr_map_info.addr_num += virtual_args_addr.second.size();
  105. }
  106. }
  107. GELOGI("addr_map_info.addr_num is %u", addr_map_info.addr_num);
  108. // init src_addrs/dst_addrs
  109. size_t index = 0;
  110. vector<uint64_t> src_addrs;
  111. vector<uint64_t> dst_addrs;
  112. for (auto &addrs : outside_addrs) {
  113. auto &addrs_mapping_list = addrs.second.GetOutsideAddrs();
  114. GE_CHK_BOOL_EXEC(!addrs_mapping_list.empty(), return PARAM_INVALID, "not set outside_addrs");
  115. std::map<const void *, std::vector<void *>> virtual_args_addrs = addrs_mapping_list[0];
  116. for (const auto &virtual_args_addr : virtual_args_addrs) {
  117. for (size_t i = 0; i < virtual_args_addr.second.size(); ++i) {
  118. src_addrs.push_back(mbuf_list.at(index));
  119. dst_addrs.push_back(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(virtual_args_addr.second.at(i))));
  120. }
  121. }
  122. index++;
  123. }
  124. // malloc mem for src_addrs/dst_addrs, and copy data of src_addrs/dst_addrs
  125. GE_CHK_RT_RET(rtMalloc(&src_addr_, src_addrs.size() * sizeof(uint64_t), RT_MEMORY_HBM));
  126. rtError_t status = rtMemcpy(src_addr_, src_addrs.size() * sizeof(uint64_t), src_addrs.data(),
  127. src_addrs.size() * sizeof(uint64_t), RT_MEMCPY_HOST_TO_DEVICE);
  128. GE_IF_BOOL_EXEC(status != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMemcpy error, ret: Ox%X", status);
  129. return RT_ERROR_TO_GE_STATUS(status);)
  130. GE_CHK_RT_RET(rtMalloc(&dst_addr_, dst_addrs.size() * sizeof(uint64_t), RT_MEMORY_HBM));
  131. status = rtMemcpy(dst_addr_, dst_addrs.size() * sizeof(uint64_t), dst_addrs.data(),
  132. dst_addrs.size() * sizeof(uint64_t), RT_MEMCPY_HOST_TO_DEVICE);
  133. GE_IF_BOOL_EXEC(status != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMemcpy error, ret: Ox%X", status);
  134. return RT_ERROR_TO_GE_STATUS(status);)
  135. // src_addr_list is init to src_addr, which is the point to src_addrs
  136. if (!src_addrs.empty() && !dst_addrs.empty()) {
  137. addr_map_info.src_addr_list = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(src_addr_));
  138. addr_map_info.dst_addr_list = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(dst_addr_));
  139. GELOGI("src_addr_list is %lu, dst_addr_list is %lu", addr_map_info.src_addr_list, addr_map_info.dst_addr_list);
  140. }
  141. status = rtMemcpy(args_, args_size_, &addr_map_info, sizeof(AddrMapInfo), RT_MEMCPY_HOST_TO_DEVICE);
  142. GE_IF_BOOL_EXEC(status != RT_ERROR_NONE, GELOGE(RT_FAILED, "rtMemcpy error, ret: Ox%X", status);
  143. return RT_ERROR_TO_GE_STATUS(status);)
  144. return SUCCESS;
  145. }
  146. Status CpuTaskZeroCopy::Distribute() {
  147. if ((args_ == nullptr) || (args_size_ == 0) || (stream_ == nullptr)) {
  148. GELOGE(FAILED, "Task not initialized, distribute failed, size: %u", args_size_);
  149. return FAILED;
  150. }
  151. rtError_t status = rtCpuKernelLaunch(nullptr, kCpuTaskZeroCopy, kCoreDim, args_, args_size_, nullptr, stream_);
  152. if (status != RT_ERROR_NONE) {
  153. GELOGE(RT_FAILED, "Call rt CpuKernelLaunch ZeroCopy failed, status: 0x%X", status);
  154. return RT_ERROR_TO_GE_STATUS(status);
  155. }
  156. GELOGI("Cpu kernel launch zero copy task success.");
  157. return SUCCESS;
  158. }
  159. CpuTaskZeroCopy::~CpuTaskZeroCopy() {
  160. if (src_addr_ == nullptr && dst_addr_ == nullptr) {
  161. return;
  162. }
  163. if (src_addr_ != nullptr) {
  164. rtError_t status = rtFree(src_addr_);
  165. if (status != RT_ERROR_NONE) {
  166. GELOGW("Call rt free failed, status: 0x%x", status);
  167. }
  168. }
  169. if (dst_addr_ != nullptr) {
  170. rtError_t status = rtFree(dst_addr_);
  171. if (status != RT_ERROR_NONE) {
  172. GELOGW("Call rt free failed, status: 0x%x", status);
  173. }
  174. }
  175. src_addr_ = nullptr;
  176. dst_addr_ = nullptr;
  177. }
  178. ///
  179. /// @ingroup ge
  180. /// @brief definiteness queue schedule, bind output queue to task.
  181. /// @param [in] addr: NetOutput Op input tensor address.
  182. /// @param [in] size: NetOutput Op input tensor size.
  183. /// @param [in] in_mbuf: input mbuf addr for input data.
  184. /// @param [out] out_mbuf: output mbuf addr for output data.
  185. /// @return: 0 for success / others for failed
  186. ///
  187. Status CpuTaskPrepareOutput::Init(uintptr_t addr, uint32_t size, uintptr_t in_mbuf, uintptr_t &out_mbuf) {
  188. if ((args_ != nullptr) || (args_size_ > 0)) {
  189. GELOGE(FAILED, "Task already initialized, size: %u", args_size_);
  190. return FAILED;
  191. }
  192. args_size_ = sizeof(PrepareOutputInfo) + sizeof(uintptr_t); // sizeof(uintptr_t) for save out_mbuf.
  193. rtError_t status = rtMalloc(&args_, args_size_, RT_MEMORY_HBM);
  194. if (status != RT_ERROR_NONE) {
  195. GELOGE(RT_FAILED, "Call rt malloc failed, status: 0x%x", status);
  196. return RT_ERROR_TO_GE_STATUS(status);
  197. }
  198. out_mbuf = reinterpret_cast<uintptr_t>(args_) + sizeof(PrepareOutputInfo);
  199. GE_PRINT_DYNAMIC_MEMORY(rtMalloc, "args data.", args_size_)
  200. // Get NetOutput Input address and bind to queue.
  201. PrepareOutputInfo prepare;
  202. prepare.data_size = size;
  203. prepare.data_addr = addr;
  204. prepare.in_mbuf = in_mbuf;
  205. prepare.out_mbuf = out_mbuf; // Placeholder, output mbuf addr will save to this place.
  206. status = rtMemcpy(args_, args_size_, &prepare, sizeof(PrepareOutputInfo), RT_MEMCPY_HOST_TO_DEVICE);
  207. if (status != RT_ERROR_NONE) {
  208. GELOGE(RT_FAILED, "Call rt memcpy failed, status: 0x%x", status);
  209. return RT_ERROR_TO_GE_STATUS(status);
  210. }
  211. return SUCCESS;
  212. }
  213. Status CpuTaskPrepareOutput::Distribute() {
  214. if ((args_ == nullptr) || (args_size_ == 0) || (stream_ == nullptr)) {
  215. GELOGE(FAILED, "Task not initialized, distribute failed, size: %u", args_size_);
  216. return FAILED;
  217. }
  218. rtError_t status = rtCpuKernelLaunch(nullptr, kCpuTaskPrepareOutput, kCoreDim, args_, args_size_, nullptr, stream_);
  219. if (status != RT_ERROR_NONE) {
  220. GELOGE(RT_FAILED, "Call rt CpuKernelLaunch PrepareOutput failed, status: 0x%X", status);
  221. return RT_ERROR_TO_GE_STATUS(status);
  222. }
  223. GELOGI("Cpu kernel launch prepare output task success.");
  224. return SUCCESS;
  225. }
  226. ///
  227. /// @ingroup ge
  228. /// @brief definiteness queue schedule, bind output queue to task.
  229. /// @param [in] queue_id: output queue id from user.
  230. /// @param [in] out_mbuf: mbuf for output data.
  231. /// @return: 0 for success / others for failed
  232. ///
  233. Status CpuTaskModelEnqueue::Init(uint32_t queue_id, uintptr_t out_mbuf) {
  234. if ((args_ != nullptr) || (args_size_ > 0)) {
  235. GELOGE(FAILED, "Task already initialized, size: %u", args_size_);
  236. return FAILED;
  237. }
  238. // Get NetOutput Input address and bind to queue.
  239. args_size_ = sizeof(MbufQueueInfo);
  240. rtError_t status = rtMalloc(&args_, args_size_, RT_MEMORY_HBM);
  241. if (status != RT_ERROR_NONE) {
  242. GELOGE(RT_FAILED, "Call rt malloc failed, status: 0x%x", status);
  243. return RT_ERROR_TO_GE_STATUS(status);
  244. }
  245. GE_PRINT_DYNAMIC_MEMORY(rtMalloc, "args data.", args_size_)
  246. MbufQueueInfo queue_info;
  247. queue_info.queue_id = queue_id;
  248. queue_info.in_mbuf = out_mbuf;
  249. status = rtMemcpy(args_, args_size_, &queue_info, args_size_, RT_MEMCPY_HOST_TO_DEVICE);
  250. if (status != RT_ERROR_NONE) {
  251. GELOGE(RT_FAILED, "Call rt memcpy failed, status: 0x%x", status);
  252. return RT_ERROR_TO_GE_STATUS(status);
  253. }
  254. return SUCCESS;
  255. }
  256. Status CpuTaskModelEnqueue::Distribute() {
  257. if ((args_ == nullptr) || (args_size_ == 0) || (stream_ == nullptr)) {
  258. GELOGE(FAILED, "Task not initialized, distribute failed, size: %u", args_size_);
  259. return FAILED;
  260. }
  261. rtError_t status = rtCpuKernelLaunch(nullptr, kCpuTaskModelEnqueue, kCoreDim, args_, args_size_, nullptr, stream_);
  262. if (status != RT_ERROR_NONE) {
  263. GELOGE(RT_FAILED, "Call rt CpuKernelLaunch ModelEnqueue failed, status: 0x%X", status);
  264. return RT_ERROR_TO_GE_STATUS(status);
  265. }
  266. GELOGI("Cpu kernel launch model enqueue task success.");
  267. return SUCCESS;
  268. }
  269. ///
  270. /// @ingroup ge
  271. /// @brief definiteness queue schedule, active entry stream.
  272. /// @param [in] stream: stream to be active.
  273. /// @return: 0 for success / others for failed
  274. ///
  275. Status CpuTaskActiveEntry::Init(rtStream_t stream) {
  276. if (stream == nullptr) {
  277. GELOGE(FAILED, "Task active stream not valid");
  278. return FAILED;
  279. }
  280. active_stream_ = stream;
  281. return SUCCESS;
  282. }
  283. Status CpuTaskActiveEntry::Distribute() {
  284. if ((active_stream_ == nullptr) || (stream_ == nullptr)) {
  285. GELOGE(FAILED, "Task not initialized, distribute failed, size: %u", args_size_);
  286. return FAILED;
  287. }
  288. rtError_t ret = rtStreamActive(active_stream_, stream_);
  289. if (ret != RT_ERROR_NONE) {
  290. GELOGE(RT_FAILED, "Call rt StreamActive failed, ret: 0x%X", ret);
  291. return RT_ERROR_TO_GE_STATUS(ret);
  292. }
  293. GELOGI("Cpu kernel launch active entry task success.");
  294. return SUCCESS;
  295. }
  296. ///
  297. /// @ingroup ge
  298. /// @brief definiteness queue schedule, wait for end graph.
  299. /// @param [in] model_id: model id for wait end graph.
  300. /// @return: 0 for success / others for failed
  301. ///
  302. Status CpuTaskWaitEndGraph::Init(uint32_t model_id) {
  303. if ((args_ != nullptr) || (args_size_ > 0)) {
  304. GELOGE(FAILED, "Task already initialized, size: %u", args_size_);
  305. return FAILED;
  306. }
  307. args_size_ = sizeof(model_id);
  308. rtError_t status = rtMalloc(&args_, args_size_, RT_MEMORY_HBM);
  309. if (status != RT_ERROR_NONE) {
  310. GELOGE(RT_FAILED, "Call rt malloc failed, status: 0x%x", status);
  311. return RT_ERROR_TO_GE_STATUS(status);
  312. }
  313. GE_PRINT_DYNAMIC_MEMORY(rtMalloc, "args data.", args_size_)
  314. status = rtMemcpy(args_, args_size_, &model_id, args_size_, RT_MEMCPY_HOST_TO_DEVICE);
  315. if (status != RT_ERROR_NONE) {
  316. GELOGE(RT_FAILED, "Call rt memcpy failed, status: 0x%x", status);
  317. return RT_ERROR_TO_GE_STATUS(status);
  318. }
  319. return SUCCESS;
  320. }
  321. Status CpuTaskWaitEndGraph::Distribute() {
  322. if ((args_ == nullptr) || (args_size_ == 0) || (stream_ == nullptr)) {
  323. GELOGE(FAILED, "Task not initialized, distribute failed, size: %u", args_size_);
  324. return FAILED;
  325. }
  326. rtError_t status = rtCpuKernelLaunch(nullptr, kCpuTaskWaitEndGraph, kCoreDim, args_, args_size_, nullptr, stream_);
  327. if (status != RT_ERROR_NONE) {
  328. GELOGE(RT_FAILED, "Call rt CpuKernelLaunch WaitEndGraph failed, status: 0x%X", status);
  329. return RT_ERROR_TO_GE_STATUS(status);
  330. }
  331. GELOGI("Cpu kernel launch wait end task success.");
  332. return SUCCESS;
  333. }
  334. ///
  335. /// @ingroup ge
  336. /// @brief definiteness queue schedule, repeat run model.
  337. /// @param [in] model_id: model id for repeat run.
  338. /// @return: 0 for success / others for failed
  339. ///
  340. Status CpuTaskModelRepeat::Init(uint32_t model_id) {
  341. if ((args_ != nullptr) || (args_size_ > 0)) {
  342. GELOGE(FAILED, "Task already initialized, size: %u", args_size_);
  343. return FAILED;
  344. }
  345. args_size_ = sizeof(model_id);
  346. rtError_t status = rtMalloc(&args_, args_size_, RT_MEMORY_HBM);
  347. if (status != RT_ERROR_NONE) {
  348. GELOGE(RT_FAILED, "Call rt malloc failed, status: 0x%x", status);
  349. return RT_ERROR_TO_GE_STATUS(status);
  350. }
  351. GE_PRINT_DYNAMIC_MEMORY(rtMalloc, "args data.", args_size_)
  352. status = rtMemcpy(args_, args_size_, &model_id, args_size_, RT_MEMCPY_HOST_TO_DEVICE);
  353. if (status != RT_ERROR_NONE) {
  354. GELOGE(RT_FAILED, "Call rt memcpy failed, status: 0x%x", status);
  355. return RT_ERROR_TO_GE_STATUS(status);
  356. }
  357. return SUCCESS;
  358. }
  359. Status CpuTaskModelRepeat::Distribute() {
  360. if ((args_ == nullptr) || (args_size_ == 0) || (stream_ == nullptr)) {
  361. GELOGE(FAILED, "Task not initialized, distribute failed, size: %u", args_size_);
  362. return FAILED;
  363. }
  364. rtError_t status = rtCpuKernelLaunch(nullptr, kCpuTaskModelRepeat, kCoreDim, args_, args_size_, nullptr, stream_);
  365. if (status != RT_ERROR_NONE) {
  366. GELOGE(RT_FAILED, "Call rt CpuKernelLaunch ModelRepeat failed, status: 0x%x", status);
  367. return RT_ERROR_TO_GE_STATUS(status);
  368. }
  369. GELOGI("Cpu kernel launch repeat task success.");
  370. return SUCCESS;
  371. }
  372. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示