You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

split_combination_ops.h 6.7 kB

5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_OP_SPLIT_COMBINATION_OPS_H
  17. #define GE_OP_SPLIT_COMBINATION_OPS_H
  18. #include "../graph/operator_reg.h"
  19. namespace ge {
  20. REG_OP(Split)
  21. .INPUT(split_dim, TensorType({DT_INT32}))
  22. .INPUT(value, TensorType::BasicType())
  23. .DYNAMIC_OUTPUT(output, TensorType::BasicType())
  24. .REQUIRED_ATTR(num_split, Int)
  25. .OP_END_FACTORY_REG(Split)
  26. REG_OP(SplitD)
  27. .INPUT(value, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  28. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT, DT_FLOAT16}))
  29. .DYNAMIC_OUTPUT(output, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  30. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT, DT_FLOAT16}))
  31. .REQUIRED_ATTR(split_dim, Int)
  32. .REQUIRED_ATTR(num_split, Int)
  33. .OP_END_FACTORY_REG(SplitD)
  34. REG_OP(SplitV)
  35. .INPUT(input_value, TensorType::BasicType())
  36. .INPUT(input_size_splits, TensorType::IndexNumberType())
  37. .INPUT(input_split_dim, TensorType({DT_INT32}))
  38. .DYNAMIC_OUTPUT(output_data, TensorType::BasicType())
  39. .REQUIRED_ATTR(num_split, Int)
  40. .OP_END_FACTORY_REG(SplitV)
  41. REG_OP(SplitVD)
  42. .INPUT(input_value, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  43. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT, DT_FLOAT16}))
  44. .DYNAMIC_OUTPUT(output_data, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  45. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT, DT_FLOAT16}))
  46. .REQUIRED_ATTR(size_splits, ListInt)
  47. .REQUIRED_ATTR(split_dim, Int)
  48. .REQUIRED_ATTR(num_split, Int)
  49. .OP_END_FACTORY_REG(SplitVD)
  50. /**
  51. *@brief Concatenates a list of N tensors along the first dimension.
  52. *@par Inputs:
  53. * Two inputs, including:
  54. * @li values: A list of Tensors. Must be one of the following types: int8, int16, int32, \n
  55. * int64, uint8, uint16, uint32, uint64, float16, float32. \n
  56. * Tensors to be concatenated. \n
  57. * All must have size 1 in the first dimension and same shape.
  58. * @li shape: A Tensor of the same type as "x". \n
  59. * The final shape of the result. Should be equal to the shapes of any input
  60. * but with the number of input values in the first dimension.
  61. *@par Attributes:
  62. * shape: A required list of ints.
  63. *@par Outputs:
  64. *output_data: The concatenated tensor with same type as "values".
  65. */
  66. REG_OP(ParallelConcat)
  67. .DYNAMIC_INPUT(values, TensorType({DT_FLOAT,DT_FLOAT16,DT_INT8,DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_UINT32,DT_UINT64}))
  68. .OUTPUT(output_data, TensorType({DT_FLOAT,DT_FLOAT16,DT_INT8,DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_UINT32,DT_UINT64}))
  69. .REQUIRED_ATTR(shape, ListInt)
  70. .OP_END_FACTORY_REG(ParallelConcat)
  71. REG_OP(ConcatExt2)
  72. .DYNAMIC_INPUT(input_values, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_INT64, DT_UINT64, DT_UINT32, DT_INT16, DT_UINT16, DT_UINT8}))
  73. .OUTPUT(output_data, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_INT64, DT_UINT64, DT_UINT32, DT_INT16, DT_UINT16, DT_UINT8}))
  74. .REQUIRED_ATTR(axis, Int)
  75. .REQUIRED_ATTR(N, Int)
  76. .OP_END_FACTORY_REG(ConcatExt2)
  77. REG_OP(ConcatV2)
  78. .DYNAMIC_INPUT(input_values, TensorType::BasicType())
  79. .INPUT(axis, TensorType::IndexNumberType())
  80. .OUTPUT(output_data, TensorType::BasicType())
  81. .REQUIRED_ATTR(N, Int)
  82. .OP_END_FACTORY_REG(ConcatV2)
  83. REG_OP(ConcatD)
  84. .DYNAMIC_INPUT(input_values, TensorType({DT_FLOAT,DT_FLOAT16,DT_INT8,DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_UINT32,DT_UINT64}))
  85. .OUTPUT(output_data, TensorType({DT_FLOAT,DT_FLOAT16,DT_INT8,DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_UINT32,DT_UINT64}))
  86. .REQUIRED_ATTR(concat_dim, Int)
  87. .REQUIRED_ATTR(N, Int)
  88. .OP_END_FACTORY_REG(ConcatD)
  89. REG_OP(Concat)
  90. .DYNAMIC_INPUT(input_values, TensorType::BasicType())
  91. .INPUT(concat_dim, TensorType::IndexNumberType())
  92. .OUTPUT(output_data, TensorType::BasicType())
  93. .REQUIRED_ATTR(N, Int)
  94. .OP_END_FACTORY_REG(Concat)
  95. /**
  96. *@brief Packs the list of tensors in values into a tensor with rank one higher than each tensor in
  97. * values, by packing them along the axis dimension. Given a list of length N of tensors of
  98. * shape (A, B, C); if axis == 0 then the output tensor will have the shape (N, A, B, C).
  99. *@par Inputs:
  100. * x: A list of N Tensors. Must be one of the following types: int8, int16, int32,
  101. * int64, uint8, uint16, uint32, uint64, float16, float32, bool.
  102. *@par Attributes:
  103. *@li axis: A required int.
  104. * Dimension along which to pack. The range is [-(R+1), R+1).
  105. *@li N: A required int. Number of tensors.
  106. *@par Outputs:
  107. *y: A Tensor. Has the same type as "x".
  108. */
  109. REG_OP(Pack)
  110. .DYNAMIC_INPUT(x, TensorType::BasicType())
  111. .OUTPUT(y, TensorType::BasicType())
  112. .REQUIRED_ATTR(axis, Int)
  113. .REQUIRED_ATTR(N, Int)
  114. .OP_END_FACTORY_REG(Pack)
  115. /**
  116. *@brief Computes offsets of concat inputs within its output.
  117. *@par Inputs:
  118. *Two inputs, including:
  119. * @li concat_dim: A Tensor of type int32.
  120. * @li x: A list of 1D Tensor objects of type int32.
  121. *@par Attributes:
  122. *@li Concat_dim: A required int. Must be within the rank of input "x".
  123. *@li N: A required int.
  124. *@par Outputs:
  125. *y: A Tensor list with same type as "x".
  126. */
  127. REG_OP(ConcatOffset)
  128. .INPUT(concat_dim, TensorType({DT_INT32}))
  129. .DYNAMIC_INPUT(x, TensorType({DT_INT32}))
  130. .DYNAMIC_OUTPUT(y, TensorType({DT_INT32}))
  131. .REQUIRED_ATTR(N, Int)
  132. .OP_END_FACTORY_REG(ConcatOffset)
  133. /**
  134. *@brief Computes offsets of concat inputs within its output.
  135. *@par Inputs:
  136. *Two inputs, including:
  137. * @li concat_dim: A Tensor of type int32.
  138. * @li x: A list of 1D Tensor objects of type int32.
  139. *@par Attributes:
  140. *@li Concat_dim: A required int. Must be within the rank of input "x".
  141. *@li N: A required int.
  142. *@par Outputs:
  143. *y: A Tensor list with same type as "x".
  144. */
  145. REG_OP(ConcatOffsetD)
  146. .DYNAMIC_INPUT(x, TensorType({DT_INT32}))
  147. .DYNAMIC_OUTPUT(y, TensorType({DT_INT32}))
  148. .REQUIRED_ATTR(concat_dim, Int)
  149. .REQUIRED_ATTR(N, Int)
  150. .OP_END_FACTORY_REG(ConcatOffsetD)
  151. } // namespace ge
  152. #endif // GE_OP_SPLIT_COMBINATION_OPS_H

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示