You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

build_task_utils.cc 6.3 kB

5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "single_op/task/build_task_utils.h"
  17. #include "runtime/rt.h"
  18. #include "graph/load/model_manager/model_utils.h"
  19. #include "graph/manager/graph_var_manager.h"
  20. #include "graph/utils/type_utils.h"
  21. #include "framework/common/debug/ge_log.h"
  22. #include "framework/common/types.h"
  23. namespace ge {
  24. namespace {
  25. const uint64_t kSessionId = UINT64_MAX;
  26. uint8_t *kVarBase = nullptr;
  27. const uint64_t kLogicVarBase = 0;
  28. const uint64_t kVarSize = 0;
  29. }
  30. std::vector<std::vector<void *>> BuildTaskUtils::GetAddresses(const OpDescPtr &op_desc,
  31. const SingleOpModelParam &param,
  32. bool keep_workspace) {
  33. std::vector<std::vector<void *>> ret;
  34. RuntimeParam runtime_para;
  35. runtime_para.mem_size = param.memory_size;
  36. runtime_para.logic_mem_base = param.base_addr;
  37. runtime_para.mem_base = param.mem_base;
  38. runtime_para.weight_size = param.weight_size;
  39. runtime_para.logic_weight_base = param.weight_addr;
  40. runtime_para.weight_base = param.weight_base;
  41. runtime_para.var_size = kVarSize;
  42. runtime_para.logic_var_base = kLogicVarBase;
  43. runtime_para.var_base = kVarBase;
  44. runtime_para.session_id = kSessionId;
  45. runtime_para.is_single_op = true;
  46. ret.emplace_back(ModelUtils::GetInputDataAddrs(runtime_para, op_desc));
  47. ret.emplace_back(ModelUtils::GetOutputDataAddrs(runtime_para, op_desc));
  48. if (keep_workspace) {
  49. ret.emplace_back(ModelUtils::GetWorkspaceDataAddrs(runtime_para, op_desc));
  50. }
  51. return ret;
  52. }
  53. std::vector<void *> BuildTaskUtils::JoinAddresses(const std::vector<std::vector<void *>> &addresses) {
  54. std::vector<void *> ret;
  55. for (auto &address : addresses) {
  56. ret.insert(ret.end(), address.begin(), address.end());
  57. }
  58. return ret;
  59. }
  60. std::vector<void *> BuildTaskUtils::GetKernelArgs(const OpDescPtr &op_desc,
  61. const SingleOpModelParam &param) {
  62. auto addresses = GetAddresses(op_desc, param);
  63. return JoinAddresses(addresses);
  64. }
  65. std::string BuildTaskUtils::InnerGetTaskInfo(const OpDescPtr &op_desc,
  66. const std::vector<const void *> &input_addrs,
  67. const std::vector<const void *> &output_addrs) {
  68. std::stringstream ss;
  69. if (op_desc != nullptr) {
  70. auto op_type = op_desc->GetType();
  71. if (op_type == ge::NETOUTPUT || op_type == ge::DATA) {
  72. return ss.str();
  73. }
  74. // Conv2D IN[DT_FLOAT16 NC1HWC0[256, 128, 7, 7, 16],DT_FLOAT16 FRACTAL_Z[128, 32, 16, 16]]
  75. // OUT[DT_FLOAT16 NC1HWC0[256, 32, 7, 7, 16]]
  76. ss << op_type << " IN[";
  77. for (uint32_t idx = 0; idx < op_desc->GetAllInputsSize(); idx++) {
  78. const GeTensorDescPtr &input = op_desc->MutableInputDesc(idx);
  79. if (input == nullptr) {
  80. continue;
  81. }
  82. ss << TypeUtils::DataTypeToSerialString(input->GetDataType()) << " ";
  83. ss << TypeUtils::FormatToSerialString(input->GetFormat());
  84. ss << VectorToString(input->GetShape().GetDims()) << " ";
  85. if (idx < input_addrs.size()) {
  86. ss << input_addrs[idx];
  87. }
  88. if (idx < op_desc->GetInputsSize() - 1) {
  89. ss << ",";
  90. }
  91. }
  92. ss << "] OUT[";
  93. for (uint32_t idx = 0; idx < op_desc->GetOutputsSize(); idx++) {
  94. const GeTensorDescPtr &output = op_desc->MutableOutputDesc(idx);
  95. ss << TypeUtils::DataTypeToSerialString(output->GetDataType()) << " ";
  96. Format out_format = output->GetFormat();
  97. const GeShape &out_shape = output->GetShape();
  98. const auto &dims = out_shape.GetDims();
  99. ss << TypeUtils::FormatToSerialString(out_format);
  100. ss << VectorToString(dims) << " ";
  101. if (idx < output_addrs.size()) {
  102. ss << output_addrs[idx];
  103. }
  104. if (idx < op_desc->GetOutputsSize() - 1) {
  105. ss << ",";
  106. }
  107. }
  108. ss << "]\n";
  109. }
  110. return ss.str();
  111. }
  112. std::string BuildTaskUtils::GetTaskInfo(const OpDescPtr &op_desc) {
  113. vector<const void *> input_addrs;
  114. vector<const void *> output_addrs;
  115. return InnerGetTaskInfo(op_desc, input_addrs, output_addrs);
  116. }
  117. std::string BuildTaskUtils::GetTaskInfo(const OpDescPtr &op_desc,
  118. const std::vector<DataBuffer> &inputs,
  119. const std::vector<DataBuffer> &outputs) {
  120. vector<const void *> input_addrs;
  121. vector<const void *> output_addrs;
  122. GE_CHECK_NOTNULL_EXEC(op_desc, return "");
  123. if (op_desc->GetAllInputsSize() == inputs.size()) {
  124. std::for_each(inputs.begin(), inputs.end(), [&](const DataBuffer &db) { input_addrs.push_back(db.data); });
  125. }
  126. if (op_desc->GetOutputsSize() == outputs.size()) {
  127. std::for_each(outputs.begin(), outputs.end(), [&](const DataBuffer &db) { output_addrs.push_back(db.data); });
  128. }
  129. return InnerGetTaskInfo(op_desc, input_addrs, output_addrs);
  130. }
  131. std::string BuildTaskUtils::GetTaskInfo(const hybrid::TaskContext &task_context) {
  132. auto &node_item = task_context.GetNodeItem();
  133. auto op_desc = node_item.GetOpDesc();
  134. GE_CHECK_NOTNULL_EXEC(op_desc, return "");
  135. vector<const void *> input_addrs;
  136. vector<const void *> output_addrs;
  137. if (op_desc->GetAllInputsSize() == static_cast<uint32_t>(task_context.NumInputs())) {
  138. for (size_t i = 0; i < op_desc->GetAllInputsSize(); ++i) {
  139. input_addrs.push_back(task_context.GetInput(i)->GetData());
  140. }
  141. }
  142. if (op_desc->GetOutputsSize() == static_cast<uint32_t>(task_context.NumOutputs())) {
  143. for (size_t i = 0; i < op_desc->GetOutputsSize(); ++i) {
  144. output_addrs.push_back(task_context.GetOutput(i)->GetData());
  145. }
  146. }
  147. return InnerGetTaskInfo(op_desc, input_addrs, output_addrs);
  148. }
  149. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示