You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

build_task_utils.cc 4.1 kB

5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "single_op/task/build_task_utils.h"
  17. #include "runtime/rt.h"
  18. #include "graph/load/new_model_manager/model_utils.h"
  19. #include "graph/manager/graph_var_manager.h"
  20. #include "graph/utils/type_utils.h"
  21. #include "framework/common/debug/ge_log.h"
  22. #include "framework/common/types.h"
  23. namespace ge {
  24. namespace {
  25. const uint64_t kSessionId = UINT64_MAX;
  26. uint8_t *kVarBase = nullptr;
  27. const uint64_t kLogicVarBase = 0;
  28. const uint64_t kVarSize = 0;
  29. }
  30. std::vector<std::vector<void *>> BuildTaskUtils::GetAddresses(const OpDescPtr &op_desc,
  31. const SingleOpModelParam &param) {
  32. std::vector<std::vector<void *>> ret;
  33. RuntimeParam runtime_para;
  34. runtime_para.mem_size = param.memory_size;
  35. runtime_para.logic_mem_base = param.base_addr;
  36. runtime_para.mem_base = param.mem_base;
  37. runtime_para.weight_size = param.weight_size;
  38. runtime_para.logic_weight_base = param.weight_addr;
  39. runtime_para.weight_base = param.weight_base;
  40. runtime_para.var_size = kVarSize;
  41. runtime_para.logic_var_base = kLogicVarBase;
  42. runtime_para.var_base = kVarBase;
  43. runtime_para.session_id = kSessionId;
  44. runtime_para.is_single_op = true;
  45. ret.emplace_back(ModelUtils::GetInputDataAddrs(runtime_para, op_desc));
  46. ret.emplace_back(ModelUtils::GetOutputDataAddrs(runtime_para, op_desc));
  47. ret.emplace_back(ModelUtils::GetWorkspaceDataAddrs(runtime_para, op_desc));
  48. return ret;
  49. }
  50. std::vector<void *> BuildTaskUtils::JoinAddresses(const std::vector<std::vector<void *>> &addresses) {
  51. std::vector<void *> ret;
  52. for (auto &address : addresses) {
  53. ret.insert(ret.end(), address.begin(), address.end());
  54. }
  55. return ret;
  56. }
  57. std::vector<void *> BuildTaskUtils::GetKernelArgs(const OpDescPtr &op_desc,
  58. const SingleOpModelParam &param) {
  59. auto addresses = GetAddresses(op_desc, param);
  60. return JoinAddresses(addresses);
  61. }
  62. std::string BuildTaskUtils::GetTaskInfo(const OpDescPtr &op_desc) {
  63. std::stringstream ss;
  64. if (op_desc != nullptr) {
  65. auto op_type = op_desc->GetType();
  66. if (op_type == ge::NETOUTPUT || op_type == ge::DATA) {
  67. return ss.str();
  68. }
  69. // Conv2D IN[DT_FLOAT16 NC1HWC0[256, 128, 7, 7, 16],DT_FLOAT16 FRACTAL_Z[128, 32, 16, 16]]
  70. // OUT[DT_FLOAT16 NC1HWC0[256, 32, 7, 7, 16]]
  71. ss << op_type << " IN[";
  72. for (uint32_t idx = 0; idx < op_desc->GetAllInputsSize(); idx++) {
  73. const GeTensorDescPtr &input = op_desc->MutableInputDesc(idx);
  74. if (input == nullptr) {
  75. continue;
  76. }
  77. ss << TypeUtils::DataTypeToSerialString(input->GetDataType()) << " ";
  78. ss << TypeUtils::FormatToSerialString(input->GetFormat());
  79. ss << VectorToString(input->GetShape().GetDims());
  80. if (idx < op_desc->GetInputsSize() - 1) {
  81. ss << ",";
  82. }
  83. }
  84. ss << "] OUT[";
  85. for (uint32_t idx = 0; idx < op_desc->GetOutputsSize(); idx++) {
  86. const GeTensorDescPtr &output = op_desc->MutableOutputDesc(idx);
  87. ss << TypeUtils::DataTypeToSerialString(output->GetDataType()) << " ";
  88. Format out_format = output->GetFormat();
  89. const GeShape &out_shape = output->GetShape();
  90. const auto &dims = out_shape.GetDims();
  91. ss << TypeUtils::FormatToSerialString(out_format);
  92. ss << VectorToString(dims);
  93. if (idx < op_desc->GetOutputsSize() - 1) {
  94. ss << ",";
  95. }
  96. }
  97. ss << "]\n";
  98. }
  99. return ss.str();
  100. }
  101. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示