You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

hcom_util.cc 15 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/manager/util/hcom_util.h"
  17. #include "common/debug/log.h"
  18. #include "common/math/math_util.h"
  19. #include "common/op/attr_value_util.h"
  20. #include "common/op/ge_op_utils.h"
  21. #include "graph/utils/tensor_utils.h"
  22. #include "graph/utils/type_utils.h"
  23. namespace ge {
  24. Status HcomOmeUtil::GetHcclDataType(const ge::ConstOpDescPtr &op_desc,
  25. std::vector<GETaskKernelHcclInfo> &kernel_hccl_infos) {
  26. GE_CHECK_NOTNULL(op_desc);
  27. if (CheckKernelHcclInfo(op_desc, kernel_hccl_infos) != SUCCESS) {
  28. GELOGE(PARAM_INVALID, "HcomOmeUtil:: the number of GETaskKernelHcclInfo is invalid.");
  29. return PARAM_INVALID;
  30. }
  31. GELOGI("GetHcclDataType start, node[%s], opType[%s].", op_desc->GetName().c_str(), op_desc->GetType().c_str());
  32. if (op_desc->GetType() == HVDWAIT) {
  33. return SUCCESS;
  34. }
  35. ge::DataType src_data_type = ge::DT_FLOAT;
  36. for (size_t i = 0; i < kernel_hccl_infos.size(); i++) {
  37. if (op_desc->GetType() == HCOMRECEIVE) {
  38. bool ret = ge::AttrUtils::GetDataType(op_desc, HCOM_ATTR_DATA_TYPE, src_data_type);
  39. if (ret == false) {
  40. GELOGE(PARAM_INVALID, "op:HcomReceive, op desc no attr: dtype.");
  41. return PARAM_INVALID;
  42. }
  43. } else {
  44. auto input_desc_ptr = op_desc->GetInputDescPtr(i);
  45. GE_CHECK_NOTNULL(input_desc_ptr);
  46. src_data_type = input_desc_ptr->GetDataType();
  47. }
  48. auto iter = kConstOpHcclDataType.find(static_cast<int64_t>(src_data_type));
  49. if (iter == kConstOpHcclDataType.end()) {
  50. GELOGE(PARAM_INVALID,
  51. "HcomOmeUtil:: Node: %s Optype: %s HcomDataType cann't support! Current Davinci Data Type : %s",
  52. op_desc->GetName().c_str(), op_desc->GetType().c_str(),
  53. ge::TypeUtils::DataTypeToSerialString(src_data_type).c_str());
  54. return PARAM_INVALID;
  55. }
  56. kernel_hccl_infos[i].dataType = iter->second;
  57. }
  58. return SUCCESS;
  59. }
  60. Status HcomOmeUtil::GetHcclTypeSize(HcclDataType data_type, int32_t &size) {
  61. auto iter = kConstOpHcclDataTypeSize.find(data_type);
  62. GE_CHK_BOOL_EXEC(iter != kConstOpHcclDataTypeSize.end(), return PARAM_INVALID,
  63. "HcomOmeUtil::HcomDataTypeSize , No DataTypeSize!");
  64. size = iter->second;
  65. return SUCCESS;
  66. }
  67. Status HcomOmeUtil::GetHcomCount(const ge::ConstOpDescPtr &op_desc, HcclDataType data_type, bool is_allgather,
  68. int &count) {
  69. GE_CHECK_NOTNULL(op_desc);
  70. if (!IsHCOMOp(op_desc->GetType())) {
  71. GELOGE(PARAM_INVALID, "HcomOmeUtil:: operator is not Hcom operator.");
  72. return PARAM_INVALID;
  73. }
  74. int64_t total_size = 0;
  75. int64_t align_size = 512;
  76. int32_t size = 0;
  77. GE_CHK_STATUS_RET(HcomOmeUtil::GetHcclTypeSize(data_type, size), "GetHcomCount: GetHcclTypeSize fail!");
  78. if (op_desc->GetType() == HCOMRECEIVE) {
  79. vector<int64_t> shape_dims;
  80. bool ret = ge::AttrUtils::GetListInt(op_desc, HCOM_ATTR_SHAPE, shape_dims);
  81. if (ret == false) {
  82. GELOGE(PARAM_INVALID, "op:HcomReceive, op desc no attr: shape.");
  83. return PARAM_INVALID;
  84. }
  85. ge::GeShape shape = ge::GeShape(shape_dims);
  86. int64_t input_size = shape.GetShapeSize() * size;
  87. total_size = (input_size + align_size - 1) / align_size * align_size;
  88. } else {
  89. for (size_t i = 0; i < op_desc->GetInputsSize(); i++) {
  90. int64_t input_size = 0;
  91. int64_t block_size = 0;
  92. GE_CHECK_NOTNULL(op_desc->GetInputDescPtr(i));
  93. GE_CHK_STATUS_RET(ge::TensorUtils::GetSize(*op_desc->GetInputDescPtr(i), input_size),
  94. "get size from TensorDesc failed, op : %s, input index : %zu", op_desc->GetName().c_str(), i);
  95. // dynamic shape hccl op get size from output tensor desc
  96. if (op_desc->HasAttr(ATTR_NAME_IS_UNKNOWN_SHAPE)) {
  97. GE_CHECK_NOTNULL(op_desc->GetOutputDescPtr(i));
  98. GE_CHK_STATUS_RET(ge::TensorUtils::GetSize(*op_desc->GetOutputDescPtr(i), input_size),
  99. "get size from TensorDesc failed, op : %s, input index : %zu", op_desc->GetName().c_str(), i);
  100. }
  101. GE_IF_BOOL_EXEC(
  102. op_desc->GetType() == HCOMREDUCESCATTER, int32_t rank_size = 0;
  103. GE_CHK_BOOL_RET_STATUS(ge::AttrUtils::GetInt(op_desc, HCOM_ATTR_RANK_SIZE, rank_size), PARAM_INVALID,
  104. "get HCOM_ATTR_RANK_SIZE failed");
  105. GE_CHK_BOOL_RET_STATUS(rank_size != 0, PARAM_INVALID, "rank size is zero");
  106. int64_t shape_size = op_desc->GetInputDescPtr(i)->GetShape().GetShapeSize(); GE_CHK_STATUS_RET(
  107. ge::CheckInt64Uint32MulOverflow(shape_size, size), "Product of shape size and size beyond INT64_MAX");
  108. block_size = (shape_size * size) / rank_size;
  109. GE_CHK_STATUS_RET(ge::CheckInt64AddOverflow(total_size, block_size), "Total size is beyond the INT64_MAX");
  110. total_size = total_size + block_size; continue;);
  111. int64_t shape_size = op_desc->GetInputDescPtr(i)->GetShape().GetShapeSize();
  112. GELOGD("hcom util node %s inputsize %ld, shapesize %ld, datasize %d.",
  113. op_desc->GetName().c_str(), input_size, shape_size, size);
  114. GE_CHK_STATUS_RET(ge::CheckInt64Int32MulOverflow(shape_size, size),
  115. "Product of shape size and size beyond INT64_MAX");
  116. GE_IF_BOOL_EXEC(is_allgather, block_size = shape_size * size;);
  117. GE_IF_BOOL_EXEC(!is_allgather, block_size = (input_size + align_size - 1) / align_size * align_size;);
  118. GE_CHK_STATUS_RET(ge::CheckInt64AddOverflow(total_size, block_size), "Total size is beyond the INT64_MAX");
  119. total_size = total_size + block_size;
  120. }
  121. }
  122. GE_CHK_BOOL_RET_STATUS(size != 0, PARAM_INVALID, "Size is zero");
  123. count = static_cast<int>(total_size / size);
  124. GE_CHK_BOOL_EXEC(total_size % size == 0, return PARAM_INVALID, "total_size:%ld is not divisiable by size:%d.",
  125. total_size, size);
  126. return SUCCESS;
  127. }
  128. Status HcomOmeUtil::GetHorovodCount(const ge::ConstOpDescPtr &op_desc,
  129. std::vector<GETaskKernelHcclInfo> &kernel_hccl_infos) {
  130. GE_CHECK_NOTNULL(op_desc);
  131. if (!IsHorovodOp(op_desc->GetType())) {
  132. GELOGE(PARAM_INVALID, "HcomOmeUtil:: operator is not Horovod operator.");
  133. return PARAM_INVALID;
  134. }
  135. int64_t align_size = 512;
  136. int32_t size = 0;
  137. for (size_t i = 0; i < op_desc->GetInputsSize(); i++) {
  138. GE_CHK_STATUS_RET(HcomOmeUtil::GetHcclTypeSize(static_cast<HcclDataType>(kernel_hccl_infos[i].dataType), size),
  139. "GetHorovodCount: GetHcclTypeSize fail!");
  140. int64_t input_size = 0;
  141. int64_t block_size = 0;
  142. GE_CHECK_NOTNULL(op_desc->GetInputDescPtr(i));
  143. GE_CHK_STATUS_RET(ge::TensorUtils::GetSize(*op_desc->GetInputDescPtr(i), input_size),
  144. "get size from TensorDesc failed, op : %s, input index : %zu", op_desc->GetName().c_str(), i);
  145. int64_t shape_size = op_desc->GetInputDescPtr(i)->GetShape().GetShapeSize();
  146. GE_CHK_STATUS_RET(ge::CheckInt64Int32MulOverflow(shape_size, size),
  147. "Product of shape size and size beyond INT64_MAX");
  148. if (kernel_hccl_infos[0].hccl_type == HVDCALLBACKALLGATHER) {
  149. block_size = shape_size * size;
  150. } else {
  151. block_size = (input_size + align_size - 1) / align_size * align_size;
  152. }
  153. GE_CHK_BOOL_RET_STATUS(size != 0, PARAM_INVALID, "Size is zero");
  154. GE_CHK_BOOL_EXEC(block_size % size == 0, return PARAM_INVALID, "block_size:%ld is not divisiable by size:%d.",
  155. block_size, size);
  156. kernel_hccl_infos[i].count = static_cast<int>(block_size / size);
  157. }
  158. return SUCCESS;
  159. }
  160. Status HcomOmeUtil::GetHcclCount(const ge::ConstOpDescPtr &op_desc,
  161. std::vector<GETaskKernelHcclInfo> &kernel_hccl_infos) {
  162. GE_CHECK_NOTNULL(op_desc);
  163. Status ret;
  164. ret = CheckKernelHcclInfo(op_desc, kernel_hccl_infos);
  165. if (ret != SUCCESS) {
  166. GELOGE(PARAM_INVALID, "HcomOmeUtil:: the number of GETaskKernelHcclInfo is invalid.");
  167. return PARAM_INVALID;
  168. }
  169. GELOGI("GetHcclCount start, node[%s], opType[%s].", op_desc->GetName().c_str(), op_desc->GetType().c_str());
  170. if (IsHCOMOp(op_desc->GetType())) {
  171. int32_t count = 0;
  172. ret = GetHcomCount(op_desc, static_cast<HcclDataType>(kernel_hccl_infos[0].dataType),
  173. kernel_hccl_infos[0].hccl_type == HCOMALLGATHER, count);
  174. if (ret != SUCCESS) {
  175. GELOGE(ret, "HcomOmeUtil:: Node: %s Optype: %s get the Hcom operator hccl count fail.",
  176. op_desc->GetName().c_str(), op_desc->GetType().c_str());
  177. return PARAM_INVALID;
  178. }
  179. kernel_hccl_infos[0].count = count;
  180. }
  181. if (IsHorovodOp(op_desc->GetType())) {
  182. ret = GetHorovodCount(op_desc, kernel_hccl_infos);
  183. if (ret != SUCCESS) {
  184. GELOGE(PARAM_INVALID, "HcomOmeUtil:: Node: %s Optype: %s get the Horovod hccl operator count fail.",
  185. op_desc->GetName().c_str(), op_desc->GetType().c_str());
  186. return PARAM_INVALID;
  187. }
  188. }
  189. return SUCCESS;
  190. }
  191. Status HcomOmeUtil::GetHcclOperationType(const ge::ConstOpDescPtr &op_desc, HcclReduceOp &op_type) {
  192. GE_CHECK_NOTNULL(op_desc);
  193. if (IsHCOMOp(op_desc->GetType())) {
  194. std::string hcom_op_type;
  195. GE_CHK_BOOL_EXEC(ge::AttrUtils::GetStr(op_desc, HCOM_ATTR_REDUCE_TYPE, hcom_op_type), return PARAM_INVALID,
  196. "HcomOmeUtil:: Node: %s Optype: %s Get HCOM_ATTR_REDUCE_TYPE fail, not support!",
  197. op_desc->GetName().c_str(), op_desc->GetType().c_str());
  198. if (hcom_op_type == "min") {
  199. op_type = HCCL_REDUCE_MIN;
  200. } else if (hcom_op_type == "max") {
  201. op_type = HCCL_REDUCE_MAX;
  202. } else if (hcom_op_type == "prod") {
  203. op_type = HCCL_REDUCE_PROD;
  204. } else if (hcom_op_type == "sum") {
  205. op_type = HCCL_REDUCE_SUM;
  206. } else {
  207. GELOGE(PARAM_INVALID, "HcomOmeUtil::Get HCOM_ATTR_REDUCE_TYPE fail, [%s] not support!", hcom_op_type.c_str());
  208. return PARAM_INVALID;
  209. }
  210. }
  211. if (IsHorovodOp(op_desc->GetType())) {
  212. int64_t horovod_op_type;
  213. GE_CHK_BOOL_EXEC(ge::AttrUtils::GetInt(op_desc, ATTR_HOROVOD_ATTR_REDUCE_TYPE, horovod_op_type),
  214. return PARAM_INVALID,
  215. "HcomOmeUtil:: Node: %s Optype: %s Get ATTR_HOROVOD_ATTR_REDUCE_TYPE fail, not support!",
  216. op_desc->GetName().c_str(), op_desc->GetType().c_str());
  217. auto iter = kHorovodRedOpToHcclRedOp.find(static_cast<HorovodReduceOp>(horovod_op_type));
  218. if (iter == kHorovodRedOpToHcclRedOp.end()) {
  219. GELOGE(PARAM_INVALID, "HcomOmeUtil:: Node: %s Optype: %s HcomOpType cann't support! Current HcomOpType : %ld",
  220. op_desc->GetName().c_str(), op_desc->GetType().c_str(), horovod_op_type);
  221. return PARAM_INVALID;
  222. }
  223. op_type = iter->second;
  224. }
  225. return SUCCESS;
  226. }
  227. Status HcomOmeUtil::GetHcclRootId(const ge::ConstOpDescPtr &op_desc, int64_t &root_id) {
  228. GE_CHECK_NOTNULL(op_desc);
  229. GE_CHK_BOOL_EXEC(ge::AttrUtils::GetInt(op_desc, HCOM_ATTR_ROOT_RANK, root_id), return PARAM_INVALID,
  230. "HcomOmeUtil::Node %s Optype: %s Get HCOM_ATTR_ROOT_INDEX fail, not support!",
  231. op_desc->GetName().c_str(), op_desc->GetType().c_str());
  232. return SUCCESS;
  233. }
  234. Status HcomOmeUtil::GetAllRootId(const ge::ConstOpDescPtr &op_desc,
  235. std::vector<GETaskKernelHcclInfo> &kernel_hccl_infos) {
  236. GE_CHECK_NOTNULL(op_desc);
  237. if (op_desc->GetType() == HCOMBROADCAST || op_desc->GetType() == HVDCALLBACKBROADCAST || op_desc->GetType() == HCOMREDUCE) {
  238. GELOGI("GetAllRootId Node[%s] opType[%s] get hccl rootId.", op_desc->GetName().c_str(), op_desc->GetType().c_str());
  239. int64_t root_id = 0;
  240. Status dmrt = GetHcclRootId(op_desc, root_id);
  241. if (dmrt != SUCCESS) {
  242. GELOGE(FAILED, "davinci_model: GetHcomRootId fail! domi error: %u", dmrt);
  243. return FAILED;
  244. }
  245. for (size_t i = 0; i < kernel_hccl_infos.size(); i++) {
  246. kernel_hccl_infos[i].rootId = root_id;
  247. }
  248. }
  249. return SUCCESS;
  250. }
  251. bool HcomOmeUtil::IsHCOMOp(const string &op_type) {
  252. return (op_type == HCOMALLREDUCE) || (op_type == HCOMALLGATHER) || (op_type == HCOMBROADCAST) ||
  253. (op_type == HCOMSEND) || (op_type == HCOMRECEIVE) || (op_type == HCOMREDUCESCATTER) || (op_type == HCOMREDUCE);
  254. }
  255. bool HcomOmeUtil::IsHorovodOp(const string &op_type) {
  256. return (op_type == HVDCALLBACKALLREDUCE) || (op_type == HVDCALLBACKALLGATHER) || (op_type == HVDCALLBACKBROADCAST) ||
  257. (op_type == HVDWAIT);
  258. }
  259. Status HcomOmeUtil::CheckKernelHcclInfo(const ge::ConstOpDescPtr &op_desc,
  260. std::vector<GETaskKernelHcclInfo> &kernel_hccl_infos) {
  261. GE_CHECK_NOTNULL(op_desc);
  262. if (IsHCOMOp(op_desc->GetType()) && kernel_hccl_infos.size() != 1) {
  263. GELOGE(PARAM_INVALID, "HcomOmeUtil:: in Hcom scenario, the number of GETaskKernelHcclInfo is invalid.");
  264. return PARAM_INVALID;
  265. }
  266. if (IsHorovodOp(op_desc->GetType())) {
  267. if (op_desc->GetType() == HVDWAIT) {
  268. return SUCCESS;
  269. }
  270. if (kernel_hccl_infos.empty() || op_desc->GetInputsSize() != kernel_hccl_infos.size()) {
  271. GELOGE(PARAM_INVALID, "HcomOmeUtil:: in Horovod scenario, the number of GETaskKernelHcclInfo is invalid.");
  272. return PARAM_INVALID;
  273. }
  274. }
  275. return SUCCESS;
  276. }
  277. void HcomOmeUtil::GetHcclType(const domi::TaskDef &task_def, std::vector<GETaskKernelHcclInfo> &kernel_hccl_infos) {
  278. auto hccl_def = task_def.kernel_hccl();
  279. std::string hccl_type = hccl_def.hccl_type();
  280. for (size_t i = 0; i < kernel_hccl_infos.size(); i++) {
  281. kernel_hccl_infos[i].hccl_type = hccl_type;
  282. }
  283. }
  284. Status HcomOmeUtil::GetHorovodInputs(const ge::ConstOpDescPtr &op_desc,
  285. std::vector<GETaskKernelHcclInfo> &kernel_hccl_infos) {
  286. GE_CHECK_NOTNULL(op_desc);
  287. if (!IsHorovodOp(op_desc->GetType())) {
  288. return SUCCESS;
  289. }
  290. if (CheckKernelHcclInfo(op_desc, kernel_hccl_infos) != SUCCESS) {
  291. GELOGE(PARAM_INVALID, "HcomOmeUtil:: Node: %s Optype: %s the number of GETaskKernelHcclInfo is invalid.",
  292. op_desc->GetName().c_str(), op_desc->GetType().c_str());
  293. return PARAM_INVALID;
  294. }
  295. if (op_desc->GetType() == HVDWAIT) {
  296. return SUCCESS;
  297. }
  298. for (size_t i = 0; i < op_desc->GetInputsSize(); i++) {
  299. ConstGeTensorDescPtr input_desc = op_desc->GetInputDescPtr(i);
  300. GETaskKernelHcclInfo &kernel_hccl_info = kernel_hccl_infos.at(i);
  301. kernel_hccl_info.input_name = op_desc->GetInputNameByIndex(i);
  302. kernel_hccl_info.dims = input_desc->GetShape().GetDims();
  303. }
  304. return SUCCESS;
  305. }
  306. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示