You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

split_combination_ops.h 16 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
3 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file split_combination_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_SPLIT_COMBINATION_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_SPLIT_COMBINATION_OPS_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. *@brief Splits a tensor along dimension "split_dim" into "num_split" smaller tensors . \n
  26. *@par Inputs:
  27. * Two inputs, including:
  28. *@li x: An ND Tensor.
  29. *Must be one of the types:float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8, int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32.
  30. *@li split_dim: Must be the following type:int32. Specifies the dimension along which to split . \n
  31. *@par Attributes:
  32. *num_split: A required int32. Specifies the number of output tensors. No default value . \n
  33. *@par Outputs:
  34. *y: Dynamic output.A list of output tensors. Has the same type and format as "x" . \n
  35. *@attention Constraints:
  36. *@li "num_split" is greater than or equals to 1.
  37. *@li "num_split" is divisible by the size of dimension "split_dim".
  38. *@li "split_dim" is in the range [-len(x.shape), (x.shape)-1] . \n
  39. *@par Third-party framework compatibility
  40. * Compatible with the TensorFlow operator Split.
  41. */
  42. REG_OP(Split)
  43. .INPUT(split_dim, TensorType({DT_INT32}))
  44. .INPUT(x, TensorType::BasicType())
  45. .DYNAMIC_OUTPUT(y, TensorType::BasicType())
  46. .REQUIRED_ATTR(num_split, Int)
  47. .OP_END_FACTORY_REG(Split)
  48. /**
  49. *@brief Splits a tensor along dimension "split_dim" into "num_split" smaller tensors . \n
  50. *@par Inputs:
  51. * One input:
  52. *: An ND Tensor.
  53. *Must be one of the following types: float16, float32, int32, int8, int16, int64, uint8, uint16, uint32, uint64
  54. *@par Attributes:
  55. *@li split_dim: A required int32. Specifies the dimension along which to split. No default value.
  56. *@li num_split: A required int32. Specifies the number of output tensors. No default value . \n
  57. *@par Outputs:
  58. *y:Dynamic output. A list of output tensors. Has the same type and format as "x" . \n
  59. *@attention Constraints:
  60. *@li "num_split" is greater than or equals to 1.
  61. *@li "num_split" is divisible by the size of dimension "split_dim".
  62. *@li "split_dim" is in the range [-len(x.shape), (x.shape)-1] . \n
  63. *@par Third-party framework compatibility
  64. * Compatible with the TensorFlow operator Split.
  65. * @par Restrictions:
  66. * Warning: THIS FUNCTION IS DEPRECATED. Please use Split instead.
  67. */
  68. REG_OP(SplitD)
  69. .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  70. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT, DT_FLOAT16}))
  71. .DYNAMIC_OUTPUT(y, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  72. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT, DT_FLOAT16}))
  73. .REQUIRED_ATTR(split_dim, Int)
  74. .REQUIRED_ATTR(num_split, Int)
  75. .OP_END_FACTORY_REG(SplitD)
  76. /**
  77. *@brief Splits a tensor along dimension "split_dim" into "num_split" smaller tensors according to "size_splits" . \n
  78. *@par Inputs:
  79. * Three inputs, including:
  80. *@li x: An ND Tensor.
  81. *Must be one of the types:float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8, int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32.
  82. *@li size_splits: Must be one of the types:int32, int64. Specifies a list containing the sizes of each output tensor along the split dimension.
  83. *@li split_dim: Must be the following type:int32. Specifies the dimension along which to split . \n
  84. *@par Attributes:
  85. *num_split: A required int32. Specifies the number of output tensors. No default value . \n
  86. *@par Outputs:
  87. *y: Dynamic output.A list of output tensors. Has the same type and format as "x" . \n
  88. *@attention Constraints:
  89. *@li Each element in "size_splits" is greater than or equal to 1.
  90. *@li "size_splits" and "num_split" have the same length.
  91. *@li The elements in "size_splits" sum to the size of dimension "split_dim" . \n
  92. *@par Third-party framework compatibility
  93. * Compatible with the TensorFlow operator SplitV.
  94. */
  95. REG_OP(SplitV)
  96. .INPUT(x, TensorType::BasicType())
  97. .INPUT(size_splits, TensorType::IndexNumberType())
  98. .INPUT(split_dim, TensorType({DT_INT32}))
  99. .DYNAMIC_OUTPUT(y, TensorType::BasicType())
  100. .REQUIRED_ATTR(num_split, Int)
  101. .OP_END_FACTORY_REG(SplitV)
  102. /**
  103. *@brief Splits a tensor along dimension "split_dim" into "num_split" smaller tensors according to "size_splits" . \n
  104. *@par Inputs:
  105. * One input:
  106. * x: An ND Tensor.
  107. *Must be one of the following types: float16, float32, int32, int8, int16, int64, uint8, uint16, uint32, uint64
  108. *@par Attributes:
  109. *@li size_splits: A required list of int32. Specifies a list containing the sizes of each output tensor along the split dimension.
  110. *@li split_dim: A required int32. Specifies the dimension along which to split. No default value.
  111. *@li num_split: A required int32. Specifies the number of output tensors. No default value . \n
  112. *@par Outputs:
  113. *y: Dynamic output.A list of output tensors. Has the same type and format as "x" . \n
  114. *@attention Constraints:
  115. *@li Each element in "size_splits" is greater than or equal to 1.
  116. *@li "size_splits" and "num_split" have the same length.
  117. Under the caffe framework, the conversion of slice_point through the cut point to cut segment is mapped to size_splits.
  118. *@li The elements in "size_splits" sum to the size of dimension "split_dim".
  119. Under the caffe framework,size_splits or axis transformat to split_dim.Only one can effect.
  120. *@par Third-party framework compatibility
  121. * Compatible with the TensorFlow operator SplitV.
  122. * @par Restrictions:
  123. * Warning: THIS FUNCTION IS DEPRECATED. Please use SplitV instead.
  124. */
  125. REG_OP(SplitVD)
  126. .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  127. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT, DT_FLOAT16}))
  128. .DYNAMIC_OUTPUT(y, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  129. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT, DT_FLOAT16}))
  130. .REQUIRED_ATTR(size_splits, ListInt)
  131. .REQUIRED_ATTR(split_dim, Int)
  132. .REQUIRED_ATTR(num_split, Int)
  133. .OP_END_FACTORY_REG(SplitVD)
  134. /**
  135. *@brief Concatenates a list of N tensors along the first dimension.
  136. *@par Inputs:
  137. * One input, including:
  138. * values: A list of Tensors. Must be one of the following types: int8, int16, int32,
  139. * int64, uint8, uint16, uint32, uint64, float16, float32.
  140. * Tensors to be concatenated. All must have size 1 in the first dimension and same shape.
  141. * It's a dynamic input. \n
  142. *@par Attributes:
  143. * @li shape: A required list of ints.
  144. * @li N: The numble of dynamic_input "values" . \n
  145. *@par Outputs:
  146. *output_data: The concatenated tensor with same type as "values".
  147. *@par Third-party framework compatibility
  148. *Compatible with the TensorFlow operator ParallelConcat.
  149. */
  150. REG_OP(ParallelConcat)
  151. .DYNAMIC_INPUT(values, TensorType({DT_FLOAT,DT_FLOAT16,DT_INT8,DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_UINT32,DT_UINT64}))
  152. .OUTPUT(output_data, TensorType({DT_FLOAT,DT_FLOAT16,DT_INT8,DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_UINT32,DT_UINT64}))
  153. .REQUIRED_ATTR(shape, ListInt)
  154. .REQUIRED_ATTR(N, Int)
  155. .OP_END_FACTORY_REG(ParallelConcat)
  156. /**
  157. *@brief Concatenates tensors along one dimension . \n
  158. *@par Inputs:
  159. * One input:
  160. *x: Dynamic input.An NC1HWC0 or ND Tensor.
  161. *Must be one of the following types: float16, float32, int32, int8, int16, int64, uint8, uint16, uint32, uint64
  162. *@par Attributes:
  163. *concat_dim: A required int8, int16, int32, or int64. Specifies the dimension along which to concatenate. No default value.
  164. *N: An attribute int8, int16, int32, or int64. Specifies the number of elements in "x". Defaults to "1".
  165. *@par Outputs:
  166. *y: A Tensor. Has the same type and format as "x" . \n
  167. *@attention Constraints:
  168. *@li "x" is a list of at least 2 "tensor" objects of the same type.
  169. *@li "concat_dim" is in the range [-len(x.shape), len(x.shape)] . \n
  170. *@par Third-party framework compatibility
  171. * Compatible with the TensorFlow operator ConcatV2.
  172. *@par Restrictions:
  173. *Warning: THIS FUNCTION IS DEPRECATED. Please use ConcatV2 instead.
  174. */
  175. REG_OP(ConcatV2D)
  176. .DYNAMIC_INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_INT64, DT_UINT64, DT_UINT32, DT_INT16, DT_UINT16, DT_UINT8}))
  177. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_INT64, DT_UINT64, DT_UINT32, DT_INT16, DT_UINT16, DT_UINT8}))
  178. .REQUIRED_ATTR(concat_dim, Int)
  179. .ATTR(N, Int, 1)
  180. .OP_END_FACTORY_REG(ConcatV2D)
  181. /**
  182. *@brief Concatenates tensors along one dimension . \n
  183. *@par Inputs:
  184. * Two inputs, including:
  185. *@li Dynamic input "x" is An NC1HWC0 or ND Tensor.
  186. *Must be one of the following types: float16, float32, int32, int8, int16, int64, uint8, uint16, uint32, uint64
  187. *@li concat_dim: An int32, or int64. Specifies the dimension along which to concatenate . \n
  188. *@par Attributes:
  189. *N: An optional int8, int16, int32, or int64. Specifies the number of elements in "x". No default value . \n
  190. *@par Outputs:
  191. *y: A Tensor. Has the same type and format as "x" . \n
  192. *@attention Constraints:
  193. * "x" is a list of at least 2 "tensor" objects of the same type . \n
  194. *@par Third-party framework compatibility
  195. * Compatible with the TensorFlow operator ConcatV2.
  196. */
  197. REG_OP(ConcatV2)
  198. .DYNAMIC_INPUT(x, TensorType::BasicType())
  199. .INPUT(concat_dim, TensorType::IndexNumberType())
  200. .OUTPUT(y, TensorType::BasicType())
  201. .ATTR(N, Int, 1)
  202. .OP_END_FACTORY_REG(ConcatV2)
  203. /**
  204. *@brief Concatenates tensors along one dimension . \n
  205. *@par Inputs:
  206. * One input:
  207. *x:Dynamic input. An NC1HWC0 or ND Tensor.
  208. *Must be one of the following types: float16, float32, int32, int8, int16, int64, uint8, uint16, uint32, uint64
  209. *@par Attributes:
  210. *@li concat_dim: A required int8, int16, int32, or int64. Specifies the dimension along which to concatenate. No default value.
  211. *@li N: An optional int8, int16, int32, or int64. Specifies the number of elements in "x". No default value . \n
  212. *@par Outputs:
  213. *y: A Tensor. Has the same type and format as "x" . \n
  214. *@attention Constraints:
  215. *@li "x" is a list of at least 2 "tensor" objects of the same type.
  216. *@li "concat_dim" is in the range [-len(x.shape), len(x.shape)] . \n
  217. *@par Third-party framework compatibility
  218. * Compatible with the TensorFlow operator Concat.
  219. *@par Restrictions:
  220. *Warning: THIS FUNCTION IS DEPRECATED. Please use Concat instead.
  221. */
  222. REG_OP(ConcatD)
  223. .DYNAMIC_INPUT(x, TensorType({DT_FLOAT,DT_FLOAT16,DT_INT8,DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_UINT32,DT_UINT64}))
  224. .OUTPUT(y, TensorType({DT_FLOAT,DT_FLOAT16,DT_INT8,DT_INT16,DT_INT32,DT_INT64,DT_UINT8,DT_UINT16,DT_UINT32,DT_UINT64}))
  225. .REQUIRED_ATTR(concat_dim, Int)
  226. .ATTR(N, Int, 1)
  227. .OP_END_FACTORY_REG(ConcatD)
  228. /**
  229. *@brief Concatenates tensors along one dimension . \n
  230. *@par Inputs:
  231. * Two inputs, including:
  232. *@li x: Dynamic input.An NC1HWC0 or ND Tensor.
  233. *Must be one of the following types: float16, float32, double, int32,
  234. * uint8, int16, int8, complex64, int64, qint8, quint8, qint32, uint16,
  235. * complex128, uint32, uint64, qint16, quint16.
  236. *@li concat_dim: An int32, or int64. Specifies the dimension along which to concatenate . \n
  237. *@par Attributes:
  238. *N: An optional int8, int16, int32, or int64. Specifies the number of elements in "x" . \n
  239. *@par Outputs:
  240. *y: A Tensor. Has the same type and format as "x" . \n
  241. *@attention Constraints:
  242. *@li "x" is a list of at least 2 "tensor" objects of the same type.
  243. *@li "concat_dim" is in the range [-len(x.shape), len(x.shape)] . \n
  244. *@par Third-party framework compatibility
  245. * Compatible with the TensorFlow operator Concat.
  246. */
  247. REG_OP(Concat)
  248. .INPUT(concat_dim, TensorType::IndexNumberType())
  249. .DYNAMIC_INPUT(x, TensorType::BasicType())
  250. .OUTPUT(y, TensorType::BasicType())
  251. .ATTR(N, Int, 1)
  252. .OP_END_FACTORY_REG(Concat)
  253. /**
  254. *@brief Packs the list of tensors in values into a tensor with rank one higher than each tensor in
  255. * values, by packing them along the axis dimension. Given a list of length N of tensors of
  256. * shape (A, B, C); if axis == 0 then the output tensor will have the shape (N, A, B, C) . \n
  257. *@par Inputs:
  258. * x: A list of N Tensors. Must be one of the following types: int8, int16, int32,
  259. * int64, uint8, uint16, uint32, uint64, float16, float32, bool . It's a dynamic input. \n
  260. *@par Attributes:
  261. *@li axis: A optional int, default value is 0.
  262. * Dimension along which to pack. The range is [-(R+1), R+1).
  263. *@li N: A required int. Number of tensors . \n
  264. *@par Outputs:
  265. *y: A Tensor. Has the same type as "x".
  266. *@par Third-party framework compatibility
  267. * Compatible with the TensorFlow operator Pack.
  268. */
  269. REG_OP(Pack)
  270. .DYNAMIC_INPUT(x, TensorType::BasicType())
  271. .OUTPUT(y, TensorType::BasicType())
  272. .ATTR(axis, Int, 0)
  273. .REQUIRED_ATTR(N, Int)
  274. .OP_END_FACTORY_REG(Pack)
  275. /**
  276. *@brief Computes offsets of concat inputs within its output . \n
  277. *@par Inputs:
  278. *Two inputs, including:
  279. * @li concat_dim: A Tensor of type int32.
  280. * @li x: A list of 1D Tensor objects of type int32 . It's a dynamic input. \n
  281. *@par Attributes:
  282. *N: A required int . \n
  283. *@par Outputs:
  284. *y: A Tensor list with same type as "x" . It's a dynamic output. \n
  285. *@par Third-party framework compatibility
  286. *@ Compatible with the TensorFlow operator ConcatOffset.
  287. */
  288. REG_OP(ConcatOffset)
  289. .INPUT(concat_dim, TensorType({DT_INT32}))
  290. .DYNAMIC_INPUT(x, TensorType({DT_INT32}))
  291. .DYNAMIC_OUTPUT(y, TensorType({DT_INT32}))
  292. .REQUIRED_ATTR(N, Int)
  293. .OP_END_FACTORY_REG(ConcatOffset)
  294. /**
  295. *@brief Computes offsets of concat inputs within its output . \n
  296. *@par Inputs:
  297. *Two inputs, including:
  298. * @li concat_dim: A Tensor of type int32.
  299. * @li x: A list of 1D Tensor objects of type int32 . It's a dynamic input. \n
  300. *@par Attributes:
  301. *@li Concat_dim: A required int. Must be within the rank of input "x".
  302. *@li N: A required int . \n
  303. *@par Outputs:
  304. *y: A Tensor list with same type as "x" . It's a dynamic output. \n
  305. *@par Third-party framework compatibility
  306. *@ Compatible with the TensorFlow operator ConcatOffset.
  307. *@par Restrictions:
  308. *Warning: THIS FUNCTION IS DEPRECATED. Please use ConcatOffset instead.
  309. */
  310. REG_OP(ConcatOffsetD)
  311. .DYNAMIC_INPUT(x, TensorType({DT_INT32}))
  312. .DYNAMIC_OUTPUT(y, TensorType({DT_INT32}))
  313. .REQUIRED_ATTR(concat_dim, Int)
  314. .REQUIRED_ATTR(N, Int)
  315. .OP_END_FACTORY_REG(ConcatOffsetD)
  316. /**
  317. *@brief Compute combinations of length of the given tensor. \n
  318. *@par Inputs:
  319. *x: A list of 1D Tensor objects. \n
  320. *@par Attributes:
  321. *@li r: An optional int indicates number of elements to combine. Defaults to 2.
  322. *@li with_replacement: An optional bool indicates whether to allow duplication
  323. *in combination. Defaults to "False". \n
  324. *@par Outputs:
  325. *y: A Tensor list with same type as "x" . \n
  326. *@par Third-party framework compatibility
  327. *@ Compatible with the Pytorch operator Combinations.
  328. */
  329. REG_OP(Combinations)
  330. .INPUT(x, TensorType::ALL())
  331. .OUTPUT(y, TensorType::ALL())
  332. .ATTR(r, Int, 2)
  333. .ATTR(with_replacement, Bool, false)
  334. .OP_END_FACTORY_REG(Combinations)
  335. } // namespace ge
  336. #endif // OPS_BUILT_IN_OP_PROTO_INC_SPLIT_COMBINATION_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示