You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

hcom_ops.h 9.6 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file hcom_ops.h
  18. * \brief huawei collective communication library ops.
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_HCOM_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_HCOM_OPS_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. * @brief Outputs a tensor gathering all input tensors.
  26. * @par Inputs:
  27. * x: A tensor. Must be one of the following types: int8, int16, int32, float16,
  28. float32.
  29. * @par Attributes:
  30. * @li rank_size: A required integer identifying the number of ranks
  31. participating in the op.
  32. * @li group: A required string identifying the group name of ranks
  33. participating in the op.
  34. * @par Outputs:
  35. * y: A Tensor. Has the same type as "x".
  36. * @attention Constraints:
  37. "group" is limited to 128 characters. Use "hccl_world_group"
  38. as the name of a world group.
  39. */
  40. REG_OP(HcomAllGather)
  41. .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16, DT_INT64, DT_UINT64}))
  42. .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16, DT_INT64, DT_UINT64}))
  43. .REQUIRED_ATTR(rank_size, Int)
  44. .REQUIRED_ATTR(group, String)
  45. .ATTR(alpha, Float, 1.0)
  46. .ATTR(beta, Float, 0.0)
  47. .OP_END_FACTORY_REG(HcomAllGather)
  48. /**
  49. * @brief Outputs a tensor containing the reduction across all input tensors
  50. passed to op.
  51. * @par Inputs:
  52. * x: A tensor. Must be one of the following types: int8, int16, int32, float16,
  53. float32.
  54. * @par Attributes:
  55. * @li reduction: A required string identifying the reduction operation to
  56. perform.The supported operation are: "sum", "max", "min", "prod".
  57. * @li group: A required string identifying the group name of ranks
  58. participating in the op.
  59. * @li fusion: An optional integer identifying the fusion flag of the op.
  60. 0: no fusion; 1 (default): fusion; 2: fusion the ops by fusion id.
  61. * @li fusion_id: An optional integer identifying the fusion id of the op.
  62. * The HcomAllReduce ops with the same fusion id will be fused.
  63. * @par Outputs:
  64. * y: A Tensor. Has the same type as "x".
  65. * @attention Constraints:
  66. *"group" is limited to 128 characters. Use "hccl_world_group"
  67. as the name of a world group.
  68. */
  69. REG_OP(HcomAllReduce)
  70. .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  71. .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  72. .REQUIRED_ATTR(reduction, String)
  73. .REQUIRED_ATTR(group, String)
  74. .ATTR(fusion, Int, 1)
  75. .ATTR(fusion_id, Int, -1)
  76. .ATTR(alpha, Float, 1.0)
  77. .ATTR(beta, Float, 0.0)
  78. .OP_END_FACTORY_REG(HcomAllReduce)
  79. /**
  80. * @brief Broadcasts the input tensor in root rank to all ranks.
  81. * @par Inputs:
  82. * x: A list of dynamic input tensor. Must be one of the following types:
  83. int8, int16, int32, float16, float32. It's a dynamic input.
  84. * @par Attributes:
  85. * @li root_rank: A required integer identifying the root rank in the op
  86. input of this rank will be broadcast to other ranks.
  87. * @li fusion: A required integer identifying if the op need to fusion,the
  88. default value is none fusion
  89. * @li fusion: A required integer identifying the fusion id if para fusion
  90. is set.
  91. * @li group: A required string identifying the group name of ranks
  92. participating in the op.
  93. * @par Outputs:
  94. * y: A list of dynamic output tensor. Has the same type and length as "x".
  95. * It's a dynamic output.
  96. * @attention Constraints:
  97. "group" is limited to 128 characters. Use "hccl_world_group"
  98. as the name of a world group.
  99. */
  100. REG_OP(HcomBroadcast)
  101. .DYNAMIC_INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16, DT_INT64, DT_UINT64}))
  102. .DYNAMIC_OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16, DT_INT64, DT_UINT64}))
  103. .REQUIRED_ATTR(root_rank, Int)
  104. .REQUIRED_ATTR(group, String)
  105. .ATTR(fusion, Int, 0)
  106. .ATTR(fusion_id, Int, -1)
  107. .ATTR(alpha, Float, 1.0)
  108. .ATTR(beta, Float, 0.0)
  109. .OP_END_FACTORY_REG(HcomBroadcast)
  110. /**
  111. * @brief Performs reduction across all input tensors, scattering in equal
  112. blocks among ranks, each rank getting a chunk of data based on its rank
  113. index.
  114. * @par Inputs:
  115. * x: A tensor. Must be one of the following types: int8, int16, int32, float16,
  116. float32.
  117. * @par Attributes:
  118. * @li reduction: A required string identifying the reduction operation to
  119. perform. The supported operation are: "sum", "max", "min", "prod".
  120. * @li group: A required string identifying the group name of ranks
  121. participating in the op.
  122. * @li rank_size: A required integer identifying the number of ranks
  123. participating in the op.
  124. * @par Outputs:
  125. * y: A Tensor. Has the same type as "x".
  126. * @attention Constraints:
  127. "group" is limited to 128 characters. Use "hccl_world_group"
  128. as the name of a world group.
  129. */
  130. REG_OP(HcomReduceScatter)
  131. .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  132. .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16}))
  133. .REQUIRED_ATTR(reduction, String)
  134. .REQUIRED_ATTR(group, String)
  135. .REQUIRED_ATTR(rank_size, Int)
  136. .ATTR(alpha, Float, 1.0)
  137. .ATTR(beta, Float, 0.0)
  138. .OP_END_FACTORY_REG(HcomReduceScatter)
  139. /**
  140. * @brief Sends the input tensor to destination rank.
  141. * @par Inputs:
  142. * x: A tensor. Must be one of the following types: int8, int16, int32, float16,
  143. float32.
  144. * @par Attributes:
  145. * @li sr_tag: A required integer identifying the send/recv message tag. The
  146. message will be received by the HcomReceive op with the same "sr_tag".
  147. * @li dest_rank: A required integer identifying the destination rank.
  148. * @li group: A string identifying the group name of ranks participating in
  149. the op.
  150. * @par Outputs:
  151. * None.
  152. * @attention Constraints:
  153. @li "group" is limited to 128 characters. Use
  154. "hccl_world_group" as the name of a world group.
  155. * @li Operators HcomSend and HcomReceive have the same "sr_tag".
  156. * @see HcomReceive
  157. */
  158. REG_OP(HcomSend)
  159. .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16, DT_INT64, DT_UINT64}))
  160. .REQUIRED_ATTR(group, String)
  161. .REQUIRED_ATTR(sr_tag, Int)
  162. .REQUIRED_ATTR(dest_rank, Int)
  163. .ATTR(alpha, Float, 1.0)
  164. .ATTR(beta, Float, 0.0)
  165. .OP_END_FACTORY_REG(HcomSend)
  166. /**
  167. * @brief Receives the tensor from source rank.
  168. * @par Inputs:
  169. * None.
  170. * @par Attributes:
  171. * @li sr_tag: A required integer identifying the send/recv message tag. The
  172. message will be send by the HcomSend op with the same "sr_tag".
  173. * @li src_rank: A required integer identifying the source rank.
  174. * @li group: A required string identifying the group name of ranks
  175. * participating in the op.
  176. * @li shape: A required list identifying the shape of the tensor to be
  177. received.
  178. * @li dtype: A required integer identifying the type of the tensor to be
  179. received. The supported types are: int8, int16, int32, float16, float32.
  180. * @par Outputs:
  181. * y: A tensor with type identified in "dtype".
  182. * @attention Constraints:
  183. @li "group" is limited to 128 characters. Use
  184. "hccl_world_group" as the name of a world group.
  185. * @li Operators HcomSend and HcomReceive have the same "sr_tag".
  186. * @li "shape" should be same as the input tensor of HcomSend.
  187. * @li "dtype" should be same as the input tensor of HcomSend.
  188. * @see HcomSend
  189. */
  190. REG_OP(HcomReceive)
  191. .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT8, DT_INT16, DT_FLOAT16, DT_INT64, DT_UINT64}))
  192. .REQUIRED_ATTR(group, String)
  193. .REQUIRED_ATTR(sr_tag, Int)
  194. .REQUIRED_ATTR(src_rank, Int)
  195. .REQUIRED_ATTR(shape, ListInt)
  196. .REQUIRED_ATTR(dtype, Type)
  197. .ATTR(alpha, Float, 1.0)
  198. .ATTR(beta, Float, 0.0)
  199. .OP_END_FACTORY_REG(HcomReceive)
  200. /**
  201. * @brief Performs Remote Read of input tensors
  202. * @par Inputs:
  203. * remote: A tensor. describing the remote memory address to read: u64 remoteId, u64 addrRemote, u64 length
  204. * @par Outputs:
  205. * local: A Tensor. whose value is length / size_of(Type)
  206. */
  207. REG_OP(HcomRemoteRead)
  208. .INPUT(remote, TensorType({DT_INT64, DT_UINT64}))
  209. .OUTPUT(local, TensorType::ALL())
  210. .REQUIRED_ATTR(dtype, Type)
  211. .OP_END_FACTORY_REG(HcomRemoteRead)
  212. REG_OP(HcomRemoteRefRead)
  213. .INPUT(remote, TensorType({DT_UINT64}))
  214. .INPUT(cache_var, TensorType({DT_UINT64}))
  215. .INPUT(local_offset, TensorType({DT_UINT64}))
  216. .OUTPUT(cache_var, TensorType({DT_UINT64}))
  217. .REQUIRED_ATTR(dtype, Type)
  218. .OP_END_FACTORY_REG(HcomRemoteRefRead)
  219. /**
  220. * @brief Performs Remote Write of input tensors
  221. * @par Inputs:
  222. * remote: A tensor. describing the remote memory address to write: u64 remoteId, u64 addrRemote, u64 length
  223. * @par Inputs:
  224. * local: A Tensor. whose value is length / size_of(Type)
  225. */
  226. REG_OP(HcomRemoteWrite)
  227. .INPUT(remote, TensorType({DT_INT64, DT_UINT64}))
  228. .INPUT(local, TensorType::ALL())
  229. .OP_END_FACTORY_REG(HcomRemoteWrite)
  230. REG_OP(HcomRemoteScatterWrite)
  231. .INPUT(remote, TensorType({DT_INT64, DT_UINT64}))
  232. .INPUT(local, TensorType::ALL())
  233. .OPTIONAL_INPUT(local_offset, TensorType({DT_UINT64}))
  234. .OP_END_FACTORY_REG(HcomRemoteScatterWrite)
  235. } // namespace ge
  236. #endif // OPS_BUILT_IN_OP_PROTO_INC_HCOM_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示