You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

transformation_ops.h 26 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file transformation_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_TRANSFORMATION_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_TRANSFORMATION_OPS_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. *@brief This operation convert output dataType and shape
  26. *@par Inputs:
  27. *The input handle must have the resource type. Inputs include:
  28. *@li x:A list of Tensor objects. One or more tensors from which
  29. the enqueued tensors should be taken . \n
  30. *@par Outputs:
  31. *@li y:A list of Tensor objects. One or more tensors from which
  32. the enqueued tensors should be taken . \n
  33. *@par Attributes:
  34. *@li type: An optional ge::DataType. It refers to the target data type of outputs . \n
  35. *@par Third-party framework compatibility
  36. *Compatible with tensorflow QueueIsClosed operator.
  37. */
  38. REG_OP(Bitcast)
  39. .INPUT(x, TensorType({DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32, DT_UINT32, DT_UINT8,
  40. DT_INT64, DT_UINT64, DT_INT16, DT_UINT16, DT_DOUBLE, DT_COMPLEX64,
  41. DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32}))
  42. .OUTPUT(y, TensorType({DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32, DT_UINT32, DT_UINT8,
  43. DT_INT64, DT_UINT64, DT_INT16, DT_UINT16, DT_DOUBLE, DT_COMPLEX64,
  44. DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32}))
  45. .REQUIRED_ATTR(type, Type)
  46. .OP_END_FACTORY_REG(Bitcast)
  47. /**
  48. *@brief Convert tensor format from HWCN to C1HWNCoC0 . \n
  49. *@par Inputs:
  50. *x: A Tensor. Must be 4D Tensor of type float16, float32, int32, uint16, with format HWCN . \n
  51. *@par Outputs:
  52. *y: A 6D Tensor. Has the same type as "x", with format C1HWNCoC0.
  53. */
  54. REG_OP(DepthwiseWeight4DTo6D)
  55. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT16}))
  56. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT16}))
  57. .OP_END_FACTORY_REG(DepthwiseWeight4DTo6D)
  58. /**
  59. *@brief Convert tensor format from C1HWNCoC0 to HWCN . \n
  60. *@par Inputs:
  61. *x: A Tensor. Must be 6D Tensor of type float16, float32, int32, uint16, with format C1HWNCoC0 . \n
  62. *@par Attributes:
  63. *channel_size: An optional int, specifying the channel size of 4D Tensor with format HWCN . \n
  64. *@par Outputs:
  65. *y: A 4D Tensor. Has the same type as "x", with format HWCN.
  66. */
  67. REG_OP(DepthwiseWeight6DTo4D)
  68. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT16}))
  69. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT16}))
  70. .ATTR(channel_size, Int, 16)
  71. .OP_END_FACTORY_REG(DepthwiseWeight6DTo4D)
  72. /**
  73. *@brief Permutes the dimensions according to perm.
  74. The returned tensor's dimension i will correspond to the input dimension perm[i] . \n
  75. *@par Inputs:
  76. *x: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64 . \n
  77. *@par Attributes:
  78. *perm: A permutation of the dimensions of "x" . \n
  79. *@par Outputs:
  80. *y: A Tensor. Has the same type as "x".
  81. *@par Restrictions:
  82. *Warning: THIS FUNCTION IS DEPRECATED. Please use Transpose instead.
  83. */
  84. REG_OP(TransposeD)
  85. .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  86. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT16, DT_FLOAT}))
  87. .OUTPUT(y, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  88. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT16, DT_FLOAT}))
  89. .REQUIRED_ATTR(perm, ListInt)
  90. .OP_END_FACTORY_REG(TransposeD)
  91. /**
  92. *@brief Permutes the dimensions according to perm.
  93. The returned tensor's dimension i will correspond to the input dimension perm[i] . \n
  94. *@par Inputs:
  95. *Two inputs, including:
  96. *@li x: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64.
  97. *@li perm: A Tensor of type int32 or int64. A permutation of the dimensions of "x" . \n
  98. *@par Outputs:
  99. *y: A Tensor. Has the same type as "x" . \n
  100. *@par Third-party framework compatibility
  101. *Compatible with the TensorFlow operator Transpose.
  102. */
  103. REG_OP(Transpose)
  104. .INPUT(x, TensorType::BasicType())
  105. .INPUT(perm, TensorType::IndexNumberType())
  106. .OUTPUT(y, TensorType::BasicType())
  107. .OP_END_FACTORY_REG(Transpose)
  108. /**
  109. *@brief Doing format_transfer for various data format only
  110. support "NHWC/NCHW" to "NC1HWC0" and "NC1HWC0" to "NHWC/NCHW"
  111. "NCHW" to "FRACTAL_Zn" or "FRACTAL_Zn" to "NCHW".
  112. "HWCN" to "FRACTAL_Zn" or "FRACTAL_Zn" to "HWCN" . \n
  113. *@par Inputs:
  114. *src: A Tensor dtype of all types . \n
  115. *@par Attributes:
  116. *@li src_format: A string source data format, can be "NHWC", "NCHW", "FRACTAL_Zn" etc.
  117. *@li dst_format: A string target data format, can be "NC1HWC0", "NCHW", "FRACTAL_Zn" etc . \n
  118. *@par Outputs:
  119. *dst: A Tensor dtype of all types.
  120. */
  121. REG_OP(TransData)
  122. .INPUT(src, TensorType::BasicType())
  123. .OUTPUT(dst, TensorType::BasicType())
  124. .REQUIRED_ATTR(src_format, String)
  125. .REQUIRED_ATTR(dst_format, String)
  126. .OP_END_FACTORY_REG(TransData)
  127. /**
  128. *@brief Permutes the dimensions according to order.
  129. The returned tensor's dimension i will correspond to the input dimension order[i] . \n
  130. *@par Inputs:
  131. *x: A Tensor. Must be one of the following types: float16, float32 . \n
  132. *@par Attributes:
  133. *order: A permutation of the dimensions of "x".Type is int32.support any axis transformation.Defaults to "{0}"
  134. *@par Outputs:
  135. *y: A Tensor. Has the same type as "x".
  136. */
  137. REG_OP(Permute)
  138. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  139. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  140. .ATTR(order, ListInt, {0})
  141. .OP_END_FACTORY_REG(Permute)
  142. /**
  143. *@brief Flattens the inputs. Reserves axis 0 and flattens the input tensors
  144. * along axis 1 . \n
  145. *@par Inputs:
  146. *One input:
  147. *x: A multi-dimensional Tensor. Must be one of the following types:
  148. * int8, uint8, int16, uint16, int32, uint32, int64,uint64, float16, float32 . \n
  149. *@par Outputs:
  150. *y: A 2D flattened Tensor (Reserves axis 0 and flattens the input tensors
  151. * along axis 1). Must be one of the following data types: int8, uint8, int16,
  152. * uint16, int32, uint32, int64,uint64, float16, float32 . \n
  153. *@par Third-party framework compatibility
  154. * Compatible with TensorFlow operator Flatten.
  155. */
  156. REG_OP(Flatten)
  157. .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64,
  158. DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64,
  159. DT_FLOAT, DT_FLOAT16}))
  160. .OUTPUT(y, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64,
  161. DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64,
  162. DT_FLOAT, DT_FLOAT16}))
  163. .OP_END_FACTORY_REG(Flatten)
  164. /**
  165. *@brief Permutes and crops the input tensor . \n
  166. *@par Inputs:
  167. * Three inputs, including:
  168. *@li x: A 5D Tensor of type float16 or int8 or uint8, with format NC1HWC0.
  169. *@li block_shape: A 1D list or tuple of int32 or int64.
  170. *@li crops: A 2D list or tuple of int32 or int64. Specifies the amount to
  171. *crop from start and end dimensions after permutation . \n
  172. *@par Outputs:
  173. *y: A Tensor with format NC1HWC0. Has the same type as input "x" . \n
  174. *@par Third-party framework compatibility
  175. * Compatible with the TensorFlow operator BatchToSpaceND.
  176. */
  177. REG_OP(BatchToSpaceND)
  178. .INPUT(x, TensorType::BasicType())
  179. .INPUT(block_shape, TensorType::IndexNumberType())
  180. .INPUT(crops, TensorType::IndexNumberType())
  181. .OUTPUT(y, TensorType::BasicType())
  182. .OP_END_FACTORY_REG(BatchToSpaceND)
  183. /**
  184. *@brief Permutes and crops the input tensor . \n
  185. *@par Inputs:
  186. * One input:
  187. *x: A 5D Tensor of type float16 or int8 or uint8, with format NC1HWC0 . \n
  188. *@par Attributes:
  189. *@li block_shape: A required 1D list or tuple of int32 or int64.
  190. *@li crops: A required 2D list or tuple of int32 or int64. Specifies the amount to crop
  191. * from the start and end dimensions after permutation . \n
  192. *@par Outputs:
  193. *y: A Tensor with format NC1HWC0. Has the same type as input "x".
  194. *@par Third-party framework compatibility
  195. * Compatible with the TensorFlow operator BatchToSpaceND.
  196. *
  197. * @par Restrictions:
  198. * Warning: THIS FUNCTION IS DEPRECATED. Please use BatchToSpaceND instead.
  199. */
  200. REG_OP(BatchToSpaceNDD)
  201. .INPUT(x, TensorType::BasicType())
  202. .OUTPUT(y, TensorType::BasicType())
  203. .REQUIRED_ATTR(block_shape, ListInt)
  204. .REQUIRED_ATTR(crops, ListInt)
  205. .OP_END_FACTORY_REG(BatchToSpaceNDD)
  206. /**
  207. *@brief Pads and permutes the input tensor . \n
  208. *@par Inputs:
  209. * Three inputs, including:
  210. *@li x: A 5D Tensor of type float16 or float32, with format NC1HWC0.
  211. *@li block_shape: A 1D list or tuple of int32 or int64.
  212. *@li paddings: A 2D list or tuple of int32 or int64. Specifies the padding for the start and end dimensions after permutation . \n
  213. *@par Outputs:
  214. *y: A Tensor with format NC1HWC0. Has the same type as input "x" . \n
  215. *@par Third-party framework compatibility
  216. * Compatible with the TensorFlow operator SpaceToBatchND.
  217. */
  218. REG_OP(SpaceToBatchND)
  219. .INPUT(x, TensorType::BasicType())
  220. .INPUT(block_shape, TensorType::IndexNumberType())
  221. .INPUT(paddings, TensorType::IndexNumberType())
  222. .OUTPUT(y, TensorType::BasicType())
  223. .OP_END_FACTORY_REG(SpaceToBatchND)
  224. /**
  225. *@brief Pads and permutes the input tensor . \n
  226. *@par Inputs:
  227. * One input:
  228. *x: A 5D Tensor of type float16 or float32, with format NC1HWC0 . \n
  229. *@par Attributes:
  230. *@li block_shape: A required 1D list or tuple of int32 or int64.
  231. *@li paddings: A required 2D list or tuple of int32 or int64. Specifies the padding for the start and end dimensions after permutation . \n
  232. *@par Outputs:
  233. *y: A Tensor with format NC1HWC0. Has the same type as input "x" . \n
  234. *@par Third-party framework compatibility
  235. * Compatible with the TensorFlow operator SpaceToBatchND.
  236. *
  237. * @par Restrictions:
  238. * Warning: THIS FUNCTION IS DEPRECATED. Please use SpaceToBatchND instead.
  239. */
  240. REG_OP(SpaceToBatchNDD)
  241. .INPUT(x, TensorType::BasicType())
  242. .OUTPUT(y, TensorType::BasicType())
  243. .REQUIRED_ATTR(block_shape, ListInt)
  244. .REQUIRED_ATTR(paddings, ListInt)
  245. .OP_END_FACTORY_REG(SpaceToBatchNDD)
  246. /**
  247. *@brief Outputs a copy of the input tensor where values from the "height" and
  248. * "width" dimensions are moved to the "depth" dimension . \n
  249. *@par Inputs:
  250. *x: An NHWC Tensor. Must be one of the following types:
  251. * float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8,
  252. * int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32.
  253. *@par Attributes:
  254. *@li block_size: A required int, specifying the input block size.
  255. *@li data_format: An optional string, specifying the data format. Defaults to
  256. * "NHWC" . \n
  257. *@par Outputs:
  258. *y: A Tensor. Has the same type as input "x".
  259. *@par Third-party framework compatibility
  260. * Compatible with the TensorFlow operator SpaceToDepth.
  261. */
  262. REG_OP(SpaceToDepth)
  263. .INPUT(x, TensorType::BasicType())
  264. .OUTPUT(y, TensorType::BasicType())
  265. .REQUIRED_ATTR(block_size, Int)
  266. .ATTR(data_format, String, "NHWC")
  267. .OP_END_FACTORY_REG(SpaceToDepth)
  268. /**
  269. *@brief Rearranges data from depth into blocks of spatial data . \n
  270. *@par Inputs:
  271. *x: A Tensor. Must be one of the following types: float16, float32, double, int32, uint8,
  272. * int16, int8, complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16,
  273. * complex128, uint32, uint64
  274. *@par Attributes:
  275. *Two attributes, including:
  276. * @li block_size: An int >= 2, specifying the size of the spatial block.
  277. * @li data_format: An optional string, specifying the data format. Defaults to "NHWC" . \n
  278. *@par Outputs:
  279. *y: A Tensor of the same type as "x" . \n
  280. *@par Third-party framework compatibility:
  281. * Compatible with TensorFlow operator DepthToSpace.
  282. */
  283. REG_OP(DepthToSpace)
  284. .INPUT(x, TensorType::BasicType())
  285. .OUTPUT(y, TensorType::BasicType())
  286. .REQUIRED_ATTR(block_size, Int)
  287. .ATTR(data_format, String, "NHWC")
  288. .OP_END_FACTORY_REG(DepthToSpace)
  289. /**
  290. *@brief Permutes data into spatial data blocks and then prunes them . \n
  291. *@par Inputs:
  292. *@li x: A 4D Tensor with format NHWC.
  293. *@li crops: A 1D list or tuple of int32 or int64 . \n
  294. *Must be one of the following types: float16, float32
  295. *@par Attributes:
  296. *block_size: A required int8, int16, int32, or int64. No default value . \n
  297. *@par Outputs:
  298. *y: A 4D Tensor with format NHWC,
  299. * of type float16 or float32 . \n
  300. *@attention Constraints:
  301. *@li The size of the first dimension of input "x" must be divisible by (block_size * block_size).
  302. *@li "crops" is a 4Dshape [batch, height, width, depth], height = height_pad - crop_top - crop_bottom,
  303. *width = width_pad - crop_left - crop_right.
  304. *@li block_size > 2
  305. *@par Third-party framework compatibility
  306. * Compatible with the TensorFlow operator BatchToSpace.
  307. */
  308. REG_OP(BatchToSpace)
  309. .INPUT(x, TensorType::BasicType())
  310. .INPUT(crops, TensorType::IndexNumberType())
  311. .OUTPUT(y, TensorType::BasicType())
  312. .REQUIRED_ATTR(block_size, Int)
  313. .OP_END_FACTORY_REG(BatchToSpace)
  314. /**
  315. *@brief Rearrange the batch (permutes) data into spatial data blocks, and then crop them . \n
  316. *@par Inputs:
  317. * One input:
  318. *x: An Tensor of shape [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, depth].
  319. *The batch size of the input tensor must be divisible by (block size * block size).
  320. *Must be one of the following types: float16, float32, double, int64, int32, uint8, uint16, uint32, uint64,
  321. *int8, int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32 . \n
  322. *@par Attributes:
  323. *@li block_size: Must be one of the following types: `int32`, `int64`.
  324. *@li crops: An Tensor. Must be one of the following types: int32, Int64.
  325. *2D tensor with non negative integer of shape [2, 2]. It specifies how many
  326. *elements are clipped from the intermediate result of spatial dimension . \n
  327. *@par Outputs:
  328. *y: A Tensor. Has the same type and format as input "x" . \n
  329. *@attention Constraints:
  330. *@li The size of the first dimension of input "x" must be divisible by (block_size * block_size).
  331. *@li "crops" is a 2D tensor of non-negative integers with shape (2, 2).
  332. *@li block_size > 2
  333. *@par Third-party framework compatibility
  334. * Compatible with the TensorFlow operator BatchToSpace.
  335. *
  336. * @par Restrictions:
  337. * Warning: THIS FUNCTION IS DEPRECATED. Please use BatchToSpace instead.
  338. */
  339. REG_OP(BatchToSpaceD)
  340. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8,
  341. DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, DT_INT16, DT_COMPLEX64,
  342. DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32}))
  343. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_UINT8,
  344. DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, DT_INT16, DT_COMPLEX64,
  345. DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32}))
  346. .REQUIRED_ATTR(block_size, Int)
  347. .REQUIRED_ATTR(crops, ListInt)
  348. .OP_END_FACTORY_REG(BatchToSpaceD)
  349. /**
  350. *@brief Outputs a copy of the input tensor where values from the "height" and
  351. * "width" dimensions are padded and rearranged to the "batch" dimension . \n
  352. *@par Inputs:
  353. * Two inputs, including:
  354. *@li x: An NHWC Tensor. Must be one of the following types:
  355. * float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8,
  356. * int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32.
  357. *@li paddings: A 2D tensor of type int, specifying the input . \n
  358. *@par Attributes:
  359. *block_size: A required int, specifying the input block size . \n
  360. *@par Outputs:
  361. *y: A Tensor. Has the same type as input "x".
  362. *@par Third-party framework compatibility
  363. * Compatible with the TensorFlow operator SpaceToBatch.
  364. */
  365. REG_OP(SpaceToBatch)
  366. .INPUT(x, TensorType::BasicType())
  367. .INPUT(paddings, TensorType::IndexNumberType())
  368. .OUTPUT(y, TensorType::BasicType())
  369. .REQUIRED_ATTR(block_size, Int)
  370. .OP_END_FACTORY_REG(SpaceToBatch)
  371. /**
  372. *@brief Outputs a copy of the input tensor where values from the "height" and "width" dimensions are padded and rearranged to the "batch" dimension . \n
  373. *@par Inputs:
  374. *x: An NHWC Tensor. Must be one of the following types: float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8, int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32.
  375. *@par Attributes:
  376. *@li block_size: A required int, specifying the input block size.
  377. *@li paddings: A 2D tensor. All data types are supported . \n
  378. *@par Outputs:
  379. *y: A Tensor. Has the same type as input "x".
  380. *@par Third-party framework compatibility
  381. *@ Compatible with the TensorFlow operator SpaceToBatch.
  382. *
  383. * @par Restrictions:
  384. * Warning: THIS FUNCTION IS DEPRECATED. Please use SpaceToBatch instead.
  385. */
  386. REG_OP(SpaceToBatchD)
  387. .INPUT(x, TensorType::BasicType())
  388. .OUTPUT(y, TensorType::BasicType())
  389. .REQUIRED_ATTR(block_size, Int)
  390. .REQUIRED_ATTR(paddings, ListInt)
  391. .OP_END_FACTORY_REG(SpaceToBatchD)
  392. /**
  393. * @brief Unpacks the given dimension of a rank-R Tensor "x" into rank-(R-1)
  394. * tensors . \n
  395. * @par Inputs:
  396. * x: A rank-R tensor (R > 0) of type BasicType, with format ND or NC1HWC0 . \n
  397. * @par Attributes:
  398. * @li num: A required int, specifying the number of tensors to be unpacked to.
  399. * Defaults to "None".
  400. * @li axis: An optional int, specifying the axis to unpack along. The value range
  401. * is [-R, R) . \n
  402. * @par Outputs:
  403. * y: Dynamic output. The list of Tensor objects unpacked from "x", of type BasicType . \n
  404. * @attention Constraints:
  405. * @li If "num" is not specified, it is inferred from the shape of "x".
  406. * @li For the ND format, "axis" is in the range [-R, R); For the NC1HWC0 format,
  407. * "axis" must not be 2, 3, -2, or -3 . \n
  408. * @par Third-party framework compatibility
  409. * Compatible with the TensorFlow operator Unpack.
  410. */
  411. REG_OP(Unpack)
  412. .INPUT(x, TensorType::BasicType())
  413. .DYNAMIC_OUTPUT(y, TensorType::BasicType())
  414. .REQUIRED_ATTR(num, Int)
  415. .ATTR(axis, Int, 0)
  416. .OP_END_FACTORY_REG(Unpack)
  417. /**
  418. * @brief Extract "patches" from "images" and stacks them in the "depth"
  419. * dimension of the output . \n
  420. * @par Inputs:
  421. * x: A 4D Tensor with shape [batch, in_rows, in_cols, depth], Must be one of the
  422. * following types:float32, double, int32, uint8, int16, int8, int64, uint16,
  423. * float16, uint32, uint64
  424. * @par Attributes:
  425. * @li ksizes: A required list or tuple. The size of the sliding window for each
  426. * dimension of images.
  427. * @li strides: A required list or tuple. How far the centers of two consecutive
  428. * patches are in the images. Must be: [1, stride_rows, stride_cols, 1].
  429. * @li rates: A required list or tuple. Must be: [1, rate_rows, rate_cols, 1].
  430. * This is the input stride, specifying how far two consecutive patch
  431. * samples are in the input. Equivalent to extracting patches
  432. * with patch_sizes_eff = patch_sizes + (patch_sizes - 1) *
  433. * (rates - 1), followed by subsampling them spatially by a factor of rates.
  434. * This is equivalent to rate in dilated (a.k.a. Atrous) convolutions.
  435. * @li padding: A required string. The type of padding algorithm to use . \n
  436. * @par Outputs:
  437. * y: A 4D Tensor with shape [batch, out_rows, out_cols, ksize_rows *
  438. * ksize_cols * depth] containing image patches with size ksize_rows x ksize_cols
  439. * x depth vectorized in the "depth" dimension. Note "out_rows" and "out_cols"
  440. * are the dimensions of the output patches . \n
  441. * @attention Constraints:
  442. * "ksizes", "strides" and "rates" are lists of integers . \n
  443. * @par Third-party framework compatibility
  444. * Compatible with the TensorFlow operator ExtractImagePatches.
  445. */
  446. REG_OP(ExtractImagePatches)
  447. .INPUT(x, TensorType::RealNumberType())
  448. .OUTPUT(y, TensorType::RealNumberType())
  449. .REQUIRED_ATTR(ksizes, ListInt)
  450. .REQUIRED_ATTR(strides, ListInt)
  451. .REQUIRED_ATTR(rates, ListInt)
  452. .REQUIRED_ATTR(padding, String)
  453. .OP_END_FACTORY_REG(ExtractImagePatches)
  454. /**
  455. * @brief Extract "patches" from "input" and put them in the "depth"
  456. * dimension of the output . \n
  457. * @par Inputs:
  458. * x: A 5D Tensor with shape [batch, in_planes, in_rows, in_cols, depth] . \n
  459. * @par Attributes:
  460. * @li ksizes: A required list or tuple. The size of the sliding window for each
  461. * dimension of "x".
  462. * @li strides: A required list or tuple. How far the centers of two consecutive
  463. * patches are in "x". Must be: [1, stride_planes, stride_rows, stride_cols, 1].
  464. * @li padding: A required string. The type of padding algorithm to use . \n
  465. * @par Outputs:
  466. * Output: A 5D Tensor with shape [batch, out_planes, out_rows, out_cols, ksize_planes *
  467. * ksize_rows * ksize_cols * depth] containing patches with size (ksize_rows * ksize_cols
  468. * * depth) vectorized in the "depth" dimension. Note "out_planes", "out_rows" and "out_cols"
  469. * are the dimensions of the output patches . \n
  470. * @attention Constraints:
  471. * "ksizes" and "strides" are lists of integers.
  472. * @par Third-party framework compatibility
  473. * Compatible with the TensorFlow operator ExtractVolumePatches.
  474. */
  475. REG_OP(ExtractVolumePatches)
  476. .INPUT(x, TensorType::REALNUMBERTYPE())
  477. .OUTPUT(y, TensorType::REALNUMBERTYPE())
  478. .REQUIRED_ATTR(ksizes, ListInt)
  479. .REQUIRED_ATTR(strides, ListInt)
  480. .REQUIRED_ATTR(padding, String)
  481. .OP_END_FACTORY_REG(ExtractVolumePatches)
  482. /**
  483. *@brief Confuse reshape and transpose . \n
  484. *@par Inputs:
  485. *x: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64 . \n
  486. *@par Attributes:
  487. *@li perm: A permutation of the dimensions of "x".
  488. *@li shape: The shape of the input.
  489. *@li transpose_first: If True, the transpose is first, otherwise the reshape is first . \n
  490. *@par Outputs:
  491. *y: A Tensor. Has the same type as "x".
  492. *
  493. * @par Restrictions:
  494. * Warning: THIS FUNCTION IS DEPRECATED. Please use ConfusionTranspose instead.
  495. */
  496. REG_OP(ConfusionTransposeD)
  497. .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  498. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT16, DT_FLOAT}))
  499. .OUTPUT(y, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  500. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT16, DT_FLOAT}))
  501. .REQUIRED_ATTR(perm, ListInt)
  502. .REQUIRED_ATTR(shape, ListInt)
  503. .REQUIRED_ATTR(transpose_first, Bool)
  504. .OP_END_FACTORY_REG(ConfusionTransposeD)
  505. /**
  506. *@brief Confuse reshape and transpose . \n
  507. *@par Inputs:
  508. *@li x: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64.
  509. *@li shape: The shape of the input . \n
  510. *@par Attributes:
  511. *@li perm: A permutation of the dimensions of "x".
  512. *@li transpose_first: If True, the transpose is first, otherwise the reshape is first . \n
  513. *@par Outputs:
  514. *y: A Tensor. Has the same type as "x".
  515. *@par Restrictions:
  516. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  517. */
  518. REG_OP(ConfusionTranspose)
  519. .INPUT(x, TensorType::BasicType())
  520. .INPUT(shape, TensorType::IndexNumberType())
  521. .OUTPUT(y, TensorType::BasicType())
  522. .REQUIRED_ATTR(perm, ListInt)
  523. .REQUIRED_ATTR(transpose_first, Bool)
  524. .OP_END_FACTORY_REG(ConfusionTranspose)
  525. /**
  526. *@brief Flattens the input tensor to one-dimensional . \n
  527. *@par Inputs:
  528. *x: An ND tensor. All data types are supported . \n
  529. *@par Attributes:
  530. *@li axis: An optional int32, specifying the first axis to flatten. All preceding axes are retained in the output. Defaults to "1".
  531. *@li end_axis: An optional int32, specifying the last axis to flatten. All following axes are retained in the output. Defaults to "-1" . \n
  532. *@par Outputs:
  533. *y: The flattened ND tensor. All data types are supported . \n
  534. *@attention Constraints:
  535. * "axis" and "end_axis" must be within the dimension range of the input. This operator cannot be directly called by the acllopExecute API.
  536. *@par Third-party framework compatibility
  537. * Compatible with the Caffe operator Flatten.
  538. */
  539. REG_OP(FlattenV2)
  540. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_INT16, DT_UINT16,
  541. DT_INT32, DT_UINT32, DT_INT64, DT_UINT64}))
  542. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_INT16, DT_UINT16,
  543. DT_INT32, DT_UINT32, DT_INT64, DT_UINT64}))
  544. .ATTR(axis, Int, 1)
  545. .ATTR(end_axis, Int, -1)
  546. .OP_END_FACTORY_REG(FlattenV2)
  547. /**
  548. *@brief Compress large weight to small one. Usually inserted before Conv2d.
  549. *
  550. *@par Inputs:
  551. *weight: A tensor before compress. Must be one of the following types: DT_INT8, DT_FLOAT16
  552. *
  553. *@par Outputs:
  554. *@li weight_compress: A tensor after compress. Must be one of the following types: DT_INT8, DT_FLOAT16
  555. *@li compress_index: A tensor. Must be one of the following types: DT_INT8
  556. *
  557. *@par Attributes:
  558. *compress_parameters: A required int8, specifying the compressing block.
  559. *
  560. *@par Restrictions:
  561. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  562. */
  563. REG_OP(Compress)
  564. .INPUT(weight, TensorType({DT_INT8, DT_FLOAT16}))
  565. .OUTPUT(weight_compress, TensorType({DT_INT8, DT_FLOAT16}))
  566. .OUTPUT(compress_index, TensorType({DT_INT8}))
  567. .REQUIRED_ATTR(compress_parameters, ListInt)
  568. .OP_END_FACTORY_REG(Compress)
  569. /**
  570. *@brief Compress large weight to small one. Usually inserted before FullyConnection.
  571. *
  572. *@par Inputs:
  573. *weight: A tensor before compress. Must be one of the following types: DT_INT8, DT_FLOAT16
  574. *
  575. *@par Outputs:
  576. *@li weight_compress: A tensor after compress. Must be one of the following types: DT_INT8, DT_FLOAT16
  577. *@li compress_index: A tensor. Must be one of the following types: DT_INT8
  578. *
  579. *@par Attributes:
  580. *compress_parameters: A required int8, specifying the compressing block.
  581. *
  582. *@par Restrictions:
  583. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  584. */
  585. REG_OP(CompressFcOp)
  586. .INPUT(weight, TensorType({DT_INT8}))
  587. .OUTPUT(weight_compress, TensorType({DT_INT8}))
  588. .OUTPUT(compress_index, TensorType({DT_INT8}))
  589. .REQUIRED_ATTR(compress_parameters, ListInt)
  590. .OP_END_FACTORY_REG(CompressFcOp)
  591. } // namespace ge
  592. #endif // OPS_BUILT_IN_OP_PROTO_INC_TRANSFORMATION_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示