You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

tbe_task.cc 4.1 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "ge_runtime/task/tbe_task.h"
  17. #include <vector>
  18. #include "ge_runtime/task/task_factory.h"
  19. namespace ge {
  20. namespace model_runner {
  21. TbeTask::TbeTask(const ModelContext &model_context, const std::shared_ptr<TbeTaskInfo> &task_info)
  22. : TaskRepeater<TbeTaskInfo>(model_context, task_info),
  23. task_info_(task_info),
  24. stream_(nullptr),
  25. stub_func_(nullptr),
  26. args_(nullptr) {
  27. if (task_info_ == nullptr) {
  28. GELOGW("task_info_ is null!");
  29. }
  30. auto stream_list = model_context.stream_list();
  31. if (stream_list.size() == 1) {
  32. stream_ = stream_list[0];
  33. } else if (stream_list.size() > task_info->stream_id()) {
  34. stream_ = stream_list[task_info->stream_id()];
  35. } else {
  36. GELOGE(PARAM_INVALID, "Index: %u >= stream_list.size(): %zu.", task_info->stream_id(), stream_list.size());
  37. return;
  38. }
  39. }
  40. TbeTask::~TbeTask() {
  41. if (args_ != nullptr) {
  42. rtError_t rt_ret = rtFree(args_);
  43. if (rt_ret != RT_ERROR_NONE) {
  44. GELOGE(RT_FAILED, "rtFree fwkOpBuf failed! ret: 0x%X.", rt_ret);
  45. }
  46. args_ = nullptr;
  47. }
  48. }
  49. bool TbeTask::Distribute() {
  50. GELOGI("InitTbeTask start.");
  51. if (stream_ == nullptr) {
  52. GELOGE(PARAM_INVALID, "stream_ is null!");
  53. return false;
  54. }
  55. // Get stub_func
  56. if (task_info_->stub_func().empty()) {
  57. GELOGE(PARAM_INVALID, "kernel_info->stub_func is empty!");
  58. return false;
  59. }
  60. rtError_t rt_ret = rtGetFunctionByName(const_cast<char *>(task_info_->stub_func().c_str()), &stub_func_);
  61. if (rt_ret != RT_ERROR_NONE) {
  62. GELOGE(RT_FAILED, "rtGetFunctionByName failed, ret: %d", static_cast<int32_t>(rt_ret));
  63. stub_func_ = nullptr;
  64. return false;
  65. }
  66. GELOGI("TbeTask: stub_func = %s [%p].", task_info_->stub_func().c_str(), stub_func_);
  67. // Get args
  68. std::vector<void *> tensor_device_addrs;
  69. tensor_device_addrs.insert(tensor_device_addrs.end(), task_info_->input_data_addrs().begin(),
  70. task_info_->input_data_addrs().end());
  71. tensor_device_addrs.insert(tensor_device_addrs.end(), task_info_->output_data_addrs().begin(),
  72. task_info_->output_data_addrs().end());
  73. tensor_device_addrs.insert(tensor_device_addrs.end(), task_info_->workspace_addrs().begin(),
  74. task_info_->workspace_addrs().end());
  75. auto args_size = static_cast<uint32_t>(tensor_device_addrs.size() * sizeof(void *));
  76. rt_ret = rtMalloc(&args_, args_size, RT_MEMORY_HBM);
  77. if (rt_ret != RT_ERROR_NONE) {
  78. GELOGE(RT_FAILED, "rtMalloc failed, ret: %d", static_cast<int32_t>(rt_ret));
  79. return false;
  80. }
  81. GE_PRINT_DYNAMIC_MEMORY(rtMalloc, "task args data.", args_size)
  82. rt_ret = rtMemcpy(args_, args_size, reinterpret_cast<void *>(tensor_device_addrs.data()), args_size,
  83. RT_MEMCPY_HOST_TO_DEVICE);
  84. if (rt_ret != RT_ERROR_NONE) {
  85. GELOGE(RT_FAILED, "rtMemcpy fail, ret 0x%X.", rt_ret);
  86. return false;
  87. }
  88. GELOGI("DistributeTbeTask start.");
  89. auto dump_flag = task_info_->dump_flag() ? RT_KERNEL_DUMPFLAG : RT_KERNEL_DEFAULT;
  90. rt_ret = rtKernelLaunchWithFlag(stub_func_, task_info_->block_dim(), args_, args_size, nullptr, stream_, dump_flag);
  91. if (rt_ret != RT_ERROR_NONE) {
  92. GELOGE(RT_FAILED, "Call rt api rtKernelLaunch failed, ret: 0x%X", rt_ret);
  93. return false;
  94. }
  95. GELOGI("[DataDump] task name:%s, dump_flag:%d", task_info_->op_name().c_str(), dump_flag);
  96. return true;
  97. }
  98. REGISTER_TASK(TaskInfoType::TBE, TbeTask, TbeTaskInfo);
  99. } // namespace model_runner
  100. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示