You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

transformation_ops.h 38 kB

5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
3 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file transformation_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_TRANSFORMATION_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_TRANSFORMATION_OPS_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. *@brief This operation convert output dataType and shape
  26. *@par Inputs:
  27. *The input handle must have the resource type. Inputs include:
  28. *x:A list of Tensor objects. One or more tensors from which
  29. the enqueued tensors should be taken . \n
  30. *@par Outputs:
  31. *y:A list of Tensor objects. One or more tensors from which
  32. the enqueued tensors should be taken . \n
  33. *@par Attributes:
  34. *type: An optional ge::DataType. It refers to the target data type of outputs . \n
  35. *@par Third-party framework compatibility
  36. *Compatible with tensorflow QueueIsClosed operator.
  37. */
  38. REG_OP(Bitcast)
  39. .INPUT(x, TensorType({DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32, DT_UINT32, DT_UINT8,
  40. DT_INT64, DT_UINT64, DT_INT16, DT_UINT16, DT_DOUBLE, DT_COMPLEX64,
  41. DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32}))
  42. .OUTPUT(y, TensorType({DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32, DT_UINT32, DT_UINT8,
  43. DT_INT64, DT_UINT64, DT_INT16, DT_UINT16, DT_DOUBLE, DT_COMPLEX64,
  44. DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32}))
  45. .REQUIRED_ATTR(type, Type)
  46. .OP_END_FACTORY_REG(Bitcast)
  47. /**
  48. *@brief Convert tensor format from HWCN to C1HWNCoC0 . \n
  49. *@par Inputs:
  50. *x: A Tensor. Must be 4D Tensor of type float16, float32, int32, uint16, with format HWCN . \n
  51. *@par Outputs:
  52. *y: A 6D Tensor. Has the same type as "x", with format C1HWNCoC0. \n
  53. *@attention Constraints:
  54. *THIS OPERATOR IS DEPRECATED. It will be removed in a future version.
  55. */
  56. REG_OP(DepthwiseWeight4DTo6D)
  57. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT16}))
  58. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT16}))
  59. .OP_END_FACTORY_REG(DepthwiseWeight4DTo6D)
  60. /**
  61. *@brief Convert tensor format from C1HWNCoC0 to HWCN . \n
  62. *@par Inputs:
  63. *x: A Tensor. Must be 6D Tensor of type float16, float32, int32, uint16, with format C1HWNCoC0 . \n
  64. *@par Attributes:
  65. *channel_size: An optional int, specifying the channel size of 4D Tensor with format HWCN . \n
  66. *@par Outputs:
  67. *y: A 4D Tensor. Has the same type as "x", with format HWCN. \n
  68. *@attention Constraints:
  69. *THIS OPERATOR IS DEPRECATED. It will be removed in a future version.
  70. */
  71. REG_OP(DepthwiseWeight6DTo4D)
  72. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT16}))
  73. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT16}))
  74. .ATTR(channel_size, Int, 16)
  75. .OP_END_FACTORY_REG(DepthwiseWeight6DTo4D)
  76. /**
  77. *@brief Permutes the dimensions according to perm.
  78. The returned tensor's dimension i will correspond to the input dimension perm[i] . \n
  79. *@par Inputs:
  80. *x: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64 . \n
  81. *@par Attributes:
  82. *perm: A permutation of the dimensions of "x" . \n
  83. *@par Outputs:
  84. *y: A Tensor. Has the same type as "x".
  85. *@par Restrictions:
  86. *Warning: THIS FUNCTION IS DEPRECATED. Please use Transpose instead.
  87. */
  88. REG_OP(TransposeD)
  89. .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  90. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT16, DT_FLOAT}))
  91. .OUTPUT(y, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  92. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT16, DT_FLOAT}))
  93. .REQUIRED_ATTR(perm, ListInt)
  94. .OP_END_FACTORY_REG(TransposeD)
  95. /**
  96. *@brief Permutes the dimensions according to perm.
  97. The returned tensor's dimension i will correspond to the input dimension perm[i] . \n
  98. *@par Inputs:
  99. *Two inputs, including:
  100. *@li x: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64.
  101. *@li perm: A Tensor of type int32 or int64. A permutation of the dimensions of "x" . \n
  102. *@par Outputs:
  103. *y: A Tensor. Has the same type as "x" . \n
  104. *@par Third-party framework compatibility
  105. *Compatible with the TensorFlow operator Transpose.
  106. */
  107. REG_OP(Transpose)
  108. .INPUT(x, TensorType::BasicType())
  109. .INPUT(perm, TensorType::IndexNumberType())
  110. .OUTPUT(y, TensorType::BasicType())
  111. .OP_END_FACTORY_REG(Transpose)
  112. /**
  113. *@brief Do format transfer for various data format.
  114. * In general, the framework will insert it atomatically . \n
  115. *@par Inputs:
  116. *src: A Tensor. For all branches can be types: float16, float32, int32, int8, bool.
  117. * For branches without padding also can be types: int16, int64, uint8, uint16, uint32, uint64 . \n
  118. *@par Attributes:
  119. *@li src_format: A string source data format, can be "NHWC", "NCHW" etc.
  120. *@li dst_format: A string target data format, can be "NCHW" etc.
  121. *@li src_subformat: A optional int32 for source sub-format, default value is 0.
  122. *@li dst_subformat: A optional int32 for target sub-format, default value is 0.
  123. *@li groups: A optional int32, default value is 1. \n
  124. *@par Outputs:
  125. *dst: A Tensor. Has the same type as "src".
  126. */
  127. REG_OP(TransData)
  128. .INPUT(src, TensorType::BasicType())
  129. .OUTPUT(dst, TensorType::BasicType())
  130. .REQUIRED_ATTR(src_format, String)
  131. .REQUIRED_ATTR(dst_format, String)
  132. .ATTR(src_subformat, Int, 0)
  133. .ATTR(dst_subformat, Int, 0)
  134. .ATTR(groups, Int, 1)
  135. .OP_END_FACTORY_REG(TransData)
  136. /**
  137. *@brief Do format transfer for various data format only
  138. support "ND" to "ND_RNN_BIAS" and "ND" to "FRACTAL_ZN_RNN"
  139. *@par Inputs:
  140. *src: A Tensor. For all branches can be types: float16, float32, int32, int8, bool.
  141. * For branches without padding also can be types: int16, int64, uint8, uint16, uint32, uint64 . \n
  142. *@par Attributes:
  143. *@li src_format: A string source data format, can be "ND", "ND_RNN_BIAS", "FRACTAL_ZN_RNN" etc.
  144. *@li dst_format: A string target data format, can be "ND", "ND_RNN_BIAS", "FRACTAL_ZN_RNN" etc.
  145. *@li input_size: A mental int32.
  146. *@li hidden_size: A mental int32.
  147. *@par Outputs:
  148. *dst: A Tensor. Has the same type as "src".
  149. */
  150. REG_OP(TransDataRNN)
  151. .INPUT(src, TensorType::BasicType())
  152. .OUTPUT(dst, TensorType::BasicType())
  153. .REQUIRED_ATTR(src_format, String)
  154. .REQUIRED_ATTR(dst_format, String)
  155. .REQUIRED_ATTR(input_size, Int)
  156. .REQUIRED_ATTR(hidden_size, Int)
  157. .OP_END_FACTORY_REG(TransDataRNN)
  158. /**
  159. *@brief Permutes the dimensions according to order.
  160. The returned tensor's dimension i will correspond to the input dimension order[i] . \n
  161. *@par Inputs:
  162. *x: A Tensor. Must be one of the following types: float16, float32 . \n
  163. *@par Attributes:
  164. *order: A permutation of the dimensions of "x".Type is int32.support any axis transformation.Defaults to "{0}"
  165. *@par Outputs:
  166. *y: A Tensor. Has the same type as "x".
  167. */
  168. REG_OP(Permute)
  169. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  170. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  171. .ATTR(order, ListInt, {0})
  172. .OP_END_FACTORY_REG(Permute)
  173. /**
  174. *@brief Flattens the inputs tensor into a 2D matrix. If input tensor has shape (d_0, d_1,..., d_n),
  175. * then the output will have shape (d_0 X d_1 ... d_(axis-1), d_axis X d_(axis + 1)...X d_n)\n
  176. *@par Inputs:
  177. * One input:
  178. * x: A multi-dimensional Tensor. Must be one of the following types:
  179. * int8, uint8, int16, uint16, int32, uint32, int64,uint64, float16, float32.
  180. *@par Outputs:
  181. * y: A 2D flattened Tensor with the contents of the input tensor, with input dimensions up to axis flattened
  182. * to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output.
  183. * Must be one of the following data types: int8, uint8, int16, uint16, int32, uint32, int64,uint64, float16, float32 .
  184. *@par Attributes:
  185. * axis: A optional int32, default value is 1. Indicate up to which input dimensions (exclusive) should be flattened
  186. * to the outer dimension of the output. The value for axis must be in the range [-r, r], where r is the rank of
  187. * the input tensor. Negative value means counting dimensions from the back. When axis = 0, the shape of
  188. * the output tensor is (1, (d_0 X d_1 ... d_n), where the shape of the input tensor is (d_0, d_1, ... d_n).
  189. *@par Third-party framework compatibility
  190. * Compatible with TensorFlow / ONNX operator Flatten.
  191. */
  192. REG_OP(Flatten)
  193. .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64,
  194. DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64,
  195. DT_FLOAT, DT_FLOAT16}))
  196. .OUTPUT(y, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64,
  197. DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64,
  198. DT_FLOAT, DT_FLOAT16}))
  199. .ATTR(axis, Int, 1)
  200. .OP_END_FACTORY_REG(Flatten)
  201. /**
  202. *@brief Permutes and crops the input tensor . \n
  203. *@par Inputs:
  204. * Three inputs, including:
  205. *@li x: A 5D Tensor of type float16 or int8 or uint8.
  206. *@li block_shape: A 1D list or tuple of int32 or int64.
  207. *@li crops: A 2D list or tuple of int32 or int64. Specifies the amount to
  208. *crop from start and end dimensions after permutation . \n
  209. *@par Outputs:
  210. *y: A Tensor has the same type as input "x" . \n
  211. *@par Third-party framework compatibility
  212. * Compatible with the TensorFlow operator BatchToSpaceND.
  213. */
  214. REG_OP(BatchToSpaceND)
  215. .INPUT(x, TensorType::BasicType())
  216. .INPUT(block_shape, TensorType::IndexNumberType())
  217. .INPUT(crops, TensorType::IndexNumberType())
  218. .OUTPUT(y, TensorType::BasicType())
  219. .OP_END_FACTORY_REG(BatchToSpaceND)
  220. /**
  221. *@brief Permutes and crops the input tensor . \n
  222. *@par Inputs:
  223. * One input:
  224. *x: A 5D Tensor of type float16 or int8 or uint8. \n
  225. *@par Attributes:
  226. *@li block_shape: A required 1D list or tuple of int32 or int64.
  227. *@li crops: A required 2D list or tuple of int32 or int64. Specifies the amount to crop
  228. * from the start and end dimensions after permutation . \n
  229. *@par Outputs:
  230. *y: A Tensor has the same type as input "x".
  231. *@par Third-party framework compatibility
  232. * Compatible with the TensorFlow operator BatchToSpaceND.
  233. *
  234. * @par Restrictions:
  235. * Warning: THIS FUNCTION IS DEPRECATED. Please use BatchToSpaceND instead.
  236. */
  237. REG_OP(BatchToSpaceNDD)
  238. .INPUT(x, TensorType::BasicType())
  239. .OUTPUT(y, TensorType::BasicType())
  240. .REQUIRED_ATTR(block_shape, ListInt)
  241. .REQUIRED_ATTR(crops, ListInt)
  242. .OP_END_FACTORY_REG(BatchToSpaceNDD)
  243. /**
  244. *@brief Pads and permutes the input tensor . \n
  245. *@par Inputs:
  246. * Three inputs, including:
  247. *@li x: A 5D Tensor of type float16 or float32.
  248. *@li block_shape: A 1D list or tuple of int32 or int64.
  249. *@li paddings: A 2D list or tuple of int32 or int64. Specifies the padding for the start and end dimensions after permutation . \n
  250. *@par Outputs:
  251. *y: A Tensor has the same type as input "x" . \n
  252. *@par Third-party framework compatibility
  253. * Compatible with the TensorFlow operator SpaceToBatchND.
  254. */
  255. REG_OP(SpaceToBatchND)
  256. .INPUT(x, TensorType::BasicType())
  257. .INPUT(block_shape, TensorType::IndexNumberType())
  258. .INPUT(paddings, TensorType::IndexNumberType())
  259. .OUTPUT(y, TensorType::BasicType())
  260. .OP_END_FACTORY_REG(SpaceToBatchND)
  261. /**
  262. *@brief Pads and permutes the input tensor . \n
  263. *@par Inputs:
  264. * One input:
  265. *x: A 5D Tensor of type float16 or float32. \n
  266. *@par Attributes:
  267. *@li block_shape: A required 1D list or tuple of int32 or int64.
  268. *@li paddings: A required 2D list or tuple of int32 or int64. Specifies the padding for the start and end dimensions after permutation . \n
  269. *@par Outputs:
  270. *y: A Tensor has the same type as input "x" . \n
  271. *@par Third-party framework compatibility
  272. * Compatible with the TensorFlow operator SpaceToBatchND.
  273. *
  274. * @par Restrictions:
  275. * Warning: THIS FUNCTION IS DEPRECATED. Please use SpaceToBatchND instead.
  276. */
  277. REG_OP(SpaceToBatchNDD)
  278. .INPUT(x, TensorType::BasicType())
  279. .OUTPUT(y, TensorType::BasicType())
  280. .REQUIRED_ATTR(block_shape, ListInt)
  281. .REQUIRED_ATTR(paddings, ListInt)
  282. .OP_END_FACTORY_REG(SpaceToBatchNDD)
  283. /**
  284. *@brief Outputs a copy of the input tensor where values from the "height" and
  285. * "width" dimensions are moved to the "depth" dimension . \n
  286. *@par Inputs:
  287. *x: An NHWC Tensor. Must be one of the following types:
  288. * float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8,
  289. * int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32.
  290. *@par Attributes:
  291. *@li block_size: A required int, specifying the input block size.
  292. *@li data_format: An optional string, specifying the data format. Defaults to
  293. * "NHWC" . \n
  294. *@par Outputs:
  295. *y: A Tensor. Has the same type as input "x".
  296. *@par Third-party framework compatibility
  297. * Compatible with the TensorFlow operator SpaceToDepth.
  298. */
  299. REG_OP(SpaceToDepth)
  300. .INPUT(x, TensorType::BasicType())
  301. .OUTPUT(y, TensorType::BasicType())
  302. .REQUIRED_ATTR(block_size, Int)
  303. .ATTR(data_format, String, "NHWC")
  304. .OP_END_FACTORY_REG(SpaceToDepth)
  305. /**
  306. *@brief Rearranges data from depth into blocks of spatial data . \n
  307. *@par Inputs:
  308. *x: A Tensor. Must be one of the following types: float16, float32, double, int32, uint8,
  309. * int16, int8, complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16,
  310. * complex128, uint32, uint64
  311. *@par Attributes:
  312. *Three attributes, including:
  313. * @li block_size: An int >= 2, specifying the size of the spatial block.
  314. * @li mode: An optional string, specifying the mode. Defaults to "DCR".
  315. * @li data_format: An optional string, specifying the data format. Defaults to "NHWC" . \n
  316. *@par Outputs:
  317. *y: A Tensor of the same type as "x" . \n
  318. *@par Third-party framework compatibility:
  319. * Compatible with TensorFlow operator DepthToSpace.
  320. */
  321. REG_OP(DepthToSpace)
  322. .INPUT(x, TensorType::BasicType())
  323. .OUTPUT(y, TensorType::BasicType())
  324. .REQUIRED_ATTR(block_size, Int)
  325. .ATTR(mode, String, "DCR")
  326. .ATTR(data_format, String, "NHWC")
  327. .OP_END_FACTORY_REG(DepthToSpace)
  328. /**
  329. *@brief Permutes data into spatial data blocks and then prunes them . \n
  330. *@par Inputs:
  331. *@li x: A 4D Tensor with format. Must set the format, supported format list ["NCHW, NHWC"]
  332. *@li crops: A 1D list or tuple of int32 or int64 . \n
  333. *Must be one of the following types: float16, float32
  334. *@par Attributes:
  335. *block_size: A required int8, int16, int32, or int64. No default value . \n
  336. *@par Outputs:
  337. *y: A 4D Tensor with format NHWC,
  338. * of type float16 or float32 . \n
  339. *@attention Constraints:
  340. *@li The size of the first dimension of input "x" must be divisible by (block_size * block_size).
  341. *@li "crops" is a 4Dshape [batch, height, width, depth], height = height_pad - crop_top - crop_bottom,
  342. *width = width_pad - crop_left - crop_right.
  343. *@li block_size > 2
  344. *@par Third-party framework compatibility
  345. * Compatible with the TensorFlow operator BatchToSpace.
  346. */
  347. REG_OP(BatchToSpace)
  348. .INPUT(x, TensorType::BasicType())
  349. .INPUT(crops, TensorType::IndexNumberType())
  350. .OUTPUT(y, TensorType::BasicType())
  351. .REQUIRED_ATTR(block_size, Int)
  352. .OP_END_FACTORY_REG(BatchToSpace)
  353. /**
  354. *@brief Rearrange the batch (permutes) data into spatial data blocks, and then crop them . \n
  355. *@par Inputs:
  356. * One input:
  357. *x: An Tensor of shape [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, depth].
  358. *The batch size of the input tensor must be divisible by (block size * block size).
  359. *Must be one of the following types: float16, float32, double, int64, int32, uint8, uint16, uint32, uint64,
  360. *int8, int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32 . \n
  361. *@par Attributes:
  362. *@li block_size: Must be one of the following types: `int32`, `int64`.
  363. *@li crops: An Tensor. Must be one of the following types: int32, Int64.
  364. *2D tensor with non negative integer of shape [2, 2]. It specifies how many
  365. *elements are clipped from the intermediate result of spatial dimension . \n
  366. *@par Outputs:
  367. *y: A Tensor. Has the same type and format as input "x" . \n
  368. *@attention Constraints:
  369. *@li The size of the first dimension of input "x" must be divisible by (block_size * block_size).
  370. *@li "crops" is a 2D tensor of non-negative integers with shape (2, 2).
  371. *@li block_size > 2
  372. *@par Third-party framework compatibility
  373. * Compatible with the TensorFlow operator BatchToSpace.
  374. *
  375. * @par Restrictions:
  376. * Warning: THIS FUNCTION IS DEPRECATED. Please use BatchToSpace instead.
  377. */
  378. REG_OP(BatchToSpaceD)
  379. .INPUT(x, TensorType::BasicType())
  380. .OUTPUT(y, TensorType::BasicType())
  381. .REQUIRED_ATTR(block_size, Int)
  382. .REQUIRED_ATTR(crops, ListInt)
  383. .OP_END_FACTORY_REG(BatchToSpaceD)
  384. /**
  385. *@brief Outputs a copy of the input tensor where values from the "height" and
  386. * "width" dimensions are padded and rearranged to the "batch" dimension . \n
  387. *@par Inputs:
  388. * Two inputs, including:
  389. *@li x: An 4D Tensor. Must be one of the following types:
  390. * float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8,
  391. * int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32.
  392. * Must set the format, supported format list ["NCHW, NHWC"]
  393. *@li paddings: A 2D tensor of type int, specifying the input . \n
  394. *@par Attributes:
  395. *block_size: A required int, specifying the input block size . \n
  396. *@par Outputs:
  397. *y: A Tensor. Has the same type as input "x".
  398. *@par Third-party framework compatibility
  399. * Compatible with the TensorFlow operator SpaceToBatch.
  400. */
  401. REG_OP(SpaceToBatch)
  402. .INPUT(x, TensorType::BasicType())
  403. .INPUT(paddings, TensorType::IndexNumberType())
  404. .OUTPUT(y, TensorType::BasicType())
  405. .REQUIRED_ATTR(block_size, Int)
  406. .OP_END_FACTORY_REG(SpaceToBatch)
  407. /**
  408. *@brief Outputs a copy of the input tensor where values from the "height" and "width" dimensions are padded and rearranged to the "batch" dimension . \n
  409. *@par Inputs:
  410. *x: An NHWC Tensor. Must be one of the following types: float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8, int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32.
  411. *@par Attributes:
  412. *@li block_size: A required int, specifying the input block size.
  413. *@li paddings: A 2D tensor. All data types are supported . \n
  414. *@par Outputs:
  415. *y: A Tensor. Has the same type as input "x".
  416. *@par Third-party framework compatibility
  417. *@ Compatible with the TensorFlow operator SpaceToBatch.
  418. *
  419. * @par Restrictions:
  420. * Warning: THIS FUNCTION IS DEPRECATED. Please use SpaceToBatch instead.
  421. */
  422. REG_OP(SpaceToBatchD)
  423. .INPUT(x, TensorType::BasicType())
  424. .OUTPUT(y, TensorType::BasicType())
  425. .REQUIRED_ATTR(block_size, Int)
  426. .REQUIRED_ATTR(paddings, ListInt)
  427. .OP_END_FACTORY_REG(SpaceToBatchD)
  428. /**
  429. * @brief Unpacks the given dimension of a rank-R Tensor "x" into rank-(R-1)
  430. * tensors . \n
  431. * @par Inputs:
  432. * x: A rank-R tensor (R > 0) of type BasicType. \n
  433. * @par Attributes:
  434. * @li num: A required int, specifying the number of tensors to be unpacked to.
  435. * Defaults to "None".
  436. * @li axis: An optional int, specifying the axis to unpack along. The value range
  437. * is [-R, R) . \n
  438. * @par Outputs:
  439. * y: Dynamic output. The list of Tensor objects unpacked from "x", of type BasicType . \n
  440. * @attention Constraints:
  441. * @li If "num" is not specified, it is inferred from the shape of "x".
  442. * @li For the ND format, "axis" is in the range [-R, R). \n
  443. * @par Third-party framework compatibility
  444. * Compatible with the TensorFlow operator Unpack.
  445. */
  446. REG_OP(Unpack)
  447. .INPUT(x, TensorType::BasicType())
  448. .DYNAMIC_OUTPUT(y, TensorType::BasicType())
  449. .REQUIRED_ATTR(num, Int)
  450. .ATTR(axis, Int, 0)
  451. .OP_END_FACTORY_REG(Unpack)
  452. /**
  453. * @brief Extract "patches" from "images" and stacks them in the "depth"
  454. * dimension of the output . \n
  455. * @par Inputs:
  456. * x: A 4D Tensor with shape [batch, in_rows, in_cols, depth], Must be one of the
  457. * following types:float32, double, int32, uint8, int16, int8, int64, uint16,
  458. * float16, uint32, uint64. The inputs must have data_format with one of follows:
  459. * NHWC, NCHW.
  460. * @par Attributes:
  461. * @li ksizes: A required list or tuple. The size of the sliding window for each
  462. * dimension of images.
  463. * @li strides: A required list or tuple. How far the centers of two consecutive
  464. * patches are in the images. Must be: [1, stride_rows, stride_cols, 1].
  465. * @li rates: A required list or tuple. Must be: [1, rate_rows, rate_cols, 1].
  466. * This is the input stride, specifying how far two consecutive patch
  467. * samples are in the input. Equivalent to extracting patches
  468. * with patch_sizes_eff = patch_sizes + (patch_sizes - 1) *
  469. * (rates - 1), followed by subsampling them spatially by a factor of rates.
  470. * This is equivalent to rate in dilated (a.k.a. Atrous) convolutions.
  471. * @li padding: A required string. The type of padding algorithm to use,
  472. support "SAME" or "VALID". \n
  473. * @par Outputs:
  474. * y: A 4D Tensor with shape [batch, out_rows, out_cols, ksize_rows *
  475. * ksize_cols * depth] containing image patches with size ksize_rows x ksize_cols
  476. * x depth vectorized in the "depth" dimension. Note "out_rows" and "out_cols"
  477. * are the dimensions of the output patches . \n
  478. * @attention Constraints:
  479. * "ksizes", "strides" and "rates" are lists of integers . \n
  480. * @par Third-party framework compatibility
  481. * Compatible with the TensorFlow operator ExtractImagePatches.
  482. */
  483. REG_OP(ExtractImagePatches)
  484. .INPUT(x, TensorType::RealNumberType())
  485. .OUTPUT(y, TensorType::RealNumberType())
  486. .REQUIRED_ATTR(ksizes, ListInt)
  487. .REQUIRED_ATTR(strides, ListInt)
  488. .REQUIRED_ATTR(rates, ListInt)
  489. .REQUIRED_ATTR(padding, String)
  490. .OP_END_FACTORY_REG(ExtractImagePatches)
  491. /**
  492. * @brief Extract "patches" from "input" and put them in the "depth"
  493. * dimension of the output . \n
  494. * @par Inputs:
  495. * x: A 5D Tensor with shape [batch, in_planes, in_rows, in_cols, depth] . \n
  496. * The inputs must have data_format with one of follows: NDHWC, NCDHW. \n
  497. * @par Attributes:
  498. * @li ksizes: A required list or tuple. The size of the sliding window for each
  499. * dimension of "x".
  500. * @li strides: A required list or tuple. How far the centers of two consecutive
  501. * patches are in "x". Must be: [1, stride_planes, stride_rows, stride_cols, 1].
  502. * @li padding: A required string. The type of padding algorithm to use ,
  503. * support "SAME" or "VALID" . \n
  504. * @par Outputs:
  505. * Output: A 5D Tensor with shape [batch, out_planes, out_rows, out_cols, ksize_planes *
  506. * ksize_rows * ksize_cols * depth] containing patches with size (ksize_rows * ksize_cols
  507. * * depth) vectorized in the "depth" dimension. Note "out_planes", "out_rows" and "out_cols"
  508. * are the dimensions of the output patches . \n
  509. * @attention Constraints:
  510. * "ksizes" and "strides" are lists of integers.
  511. * @par Third-party framework compatibility
  512. * Compatible with the TensorFlow operator ExtractVolumePatches.
  513. */
  514. REG_OP(ExtractVolumePatches)
  515. .INPUT(x, TensorType::REALNUMBERTYPE())
  516. .OUTPUT(y, TensorType::REALNUMBERTYPE())
  517. .REQUIRED_ATTR(ksizes, ListInt)
  518. .REQUIRED_ATTR(strides, ListInt)
  519. .REQUIRED_ATTR(padding, String)
  520. .OP_END_FACTORY_REG(ExtractVolumePatches)
  521. /**
  522. *@brief Confuse reshape and transpose . \n
  523. *@par Inputs:
  524. *x: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64 . \n
  525. *@par Attributes:
  526. *@li perm: A permutation of the dimensions of "x".
  527. *@li shape: The shape of the input.
  528. *@li transpose_first: If True, the transpose is first, otherwise the reshape is first . \n
  529. *@par Outputs:
  530. *y: A Tensor. Has the same type as "x".
  531. *
  532. * @par Restrictions:
  533. * Warning: THIS FUNCTION IS DEPRECATED. Please use ConfusionTranspose instead.
  534. */
  535. REG_OP(ConfusionTransposeD)
  536. .INPUT(x, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  537. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT16, DT_FLOAT}))
  538. .OUTPUT(y, TensorType({DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  539. DT_UINT16, DT_UINT32, DT_UINT64, DT_FLOAT16, DT_FLOAT}))
  540. .REQUIRED_ATTR(perm, ListInt)
  541. .REQUIRED_ATTR(shape, ListInt)
  542. .REQUIRED_ATTR(transpose_first, Bool)
  543. .OP_END_FACTORY_REG(ConfusionTransposeD)
  544. /**
  545. *@brief Confuse reshape and transpose . \n
  546. *@par Inputs:
  547. *@li x: A Tensor. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64.
  548. *@li shape: The shape of the input . \n
  549. *@par Attributes:
  550. *@li perm: A permutation of the dimensions of "x".
  551. *@li transpose_first: If True, the transpose is first, otherwise the reshape is first . \n
  552. *@par Outputs:
  553. *y: A Tensor. Has the same type as "x".
  554. *@par Restrictions:
  555. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  556. */
  557. REG_OP(ConfusionTranspose)
  558. .INPUT(x, TensorType::BasicType())
  559. .INPUT(shape, TensorType::IndexNumberType())
  560. .OUTPUT(y, TensorType::BasicType())
  561. .REQUIRED_ATTR(perm, ListInt)
  562. .REQUIRED_ATTR(transpose_first, Bool)
  563. .OP_END_FACTORY_REG(ConfusionTranspose)
  564. /**
  565. *@brief Flattens the input tensor to one-dimensional . \n
  566. *@par Inputs:
  567. *x: An ND tensor. All data types are supported . \n
  568. *@par Attributes:
  569. *@li axis: An optional int32, specifying the first axis to flatten. All preceding axes are retained in the output. Defaults to "1".
  570. *@li end_axis: An optional int32, specifying the last axis to flatten. All following axes are retained in the output. Defaults to "-1" . \n
  571. *@par Outputs:
  572. *y: The flattened ND tensor. All data types are supported . \n
  573. *@attention Constraints:
  574. * "axis" and "end_axis" must be within the dimension range of the input. This operator cannot be directly called by the acllopExecute API.
  575. *@par Third-party framework compatibility
  576. * Compatible with the Caffe operator Flatten.
  577. */
  578. REG_OP(FlattenV2)
  579. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_INT16, DT_UINT16,
  580. DT_INT32, DT_UINT32, DT_INT64, DT_UINT64}))
  581. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_INT16, DT_UINT16,
  582. DT_INT32, DT_UINT32, DT_INT64, DT_UINT64}))
  583. .ATTR(axis, Int, 1)
  584. .ATTR(end_axis, Int, -1)
  585. .OP_END_FACTORY_REG(FlattenV2)
  586. /**
  587. *@brief Compress large weight to small one. Usually inserted before Conv2d.
  588. *
  589. *@par Inputs:
  590. *weight: A tensor before compress. Must be one of the following types: DT_INT8, DT_FLOAT16
  591. *
  592. *@par Outputs:
  593. *@li weight_compress: A tensor after compress. Must be one of the following types: DT_INT8, DT_FLOAT16
  594. *@li compress_index: A tensor. Must be one of the following types: DT_INT8
  595. *
  596. *@par Attributes:
  597. *compress_parameters: A required int8, specifying the compressing block.
  598. *
  599. *@par Restrictions:
  600. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  601. */
  602. REG_OP(Compress)
  603. .INPUT(weight, TensorType({DT_INT8, DT_FLOAT16}))
  604. .OUTPUT(weight_compress, TensorType({DT_INT8, DT_FLOAT16}))
  605. .OUTPUT(compress_index, TensorType({DT_INT8}))
  606. .REQUIRED_ATTR(compress_parameters, ListInt)
  607. .OP_END_FACTORY_REG(Compress)
  608. /**
  609. *@brief Compress large weight to small one. Usually inserted before FullyConnection.
  610. *
  611. *@par Inputs:
  612. *weight: A tensor before compress. Must be one of the following types: DT_INT8, DT_FLOAT16
  613. *
  614. *@par Outputs:
  615. *@li weight_compress: A tensor after compress. Must be one of the following types: DT_INT8, DT_FLOAT16
  616. *@li compress_index: A tensor. Must be one of the following types: DT_INT8
  617. *
  618. *@par Attributes:
  619. *compress_parameters: A required int8, specifying the compressing block.
  620. *
  621. *@par Restrictions:
  622. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  623. */
  624. REG_OP(CompressFcOp)
  625. .INPUT(weight, TensorType({DT_INT8}))
  626. .OUTPUT(weight_compress, TensorType({DT_INT8}))
  627. .OUTPUT(compress_index, TensorType({DT_INT8}))
  628. .REQUIRED_ATTR(compress_parameters, ListInt)
  629. .OP_END_FACTORY_REG(CompressFcOp)
  630. /**
  631. *@brief Performs Col2im for each batch entry. \n
  632. *@par Inputs:
  633. *@li x: The Col Tensor. 4-D, shape: `(n, c, kernel_h*kernel_w, ho*wo)`.
  634. where ho/wo is do = (output_d + 2*padding_d - dilation_d*(kernel_d - 1) - 1)//stride_d + 1.
  635. *@li output_size: The img shape Tensor. 1-D, shape:`(2)`, value: (output_h, output_w). \n
  636. *@par Outputs:
  637. *y: The img Tensor. 4-D, shape: `(n, c, output_h, output_w)`. \n
  638. *@par Attributes:
  639. *@li kernel_shape: ListInt, value: `(kernel_h, kernel_w)`, the shape of kernel in convolution.
  640. *@li dilation: ListInt, value: `(dilation_h, dilation_w)`, the dilation in convolution.
  641. *@li padding: ListInt, value: `(padding_h, padding_w)`, the dilation in convolution.
  642. *@li stride: ListInt, value: `(stride_h, stride_w)`, the dilation in convolution. \n
  643. *@par Third-party framework compatibility
  644. * Compatible with Pytorch col2im/im2col_backward operator.
  645. */
  646. REG_OP(Col2im)
  647. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16}))
  648. .INPUT(output_size, TensorType({DT_INT32, DT_INT32}))
  649. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16}))
  650. .REQUIRED_ATTR(kernel_size, ListInt)
  651. .REQUIRED_ATTR(dilation, ListInt)
  652. .REQUIRED_ATTR(padding, ListInt)
  653. .REQUIRED_ATTR(stride, ListInt)
  654. .OP_END_FACTORY_REG(Col2im)
  655. /**
  656. * @brief Performs Im2col for each batch entry. \n
  657. * @par Inputs:
  658. * x: A 4D Tensor with shape [batch, in_rows, in_cols, depth], Must be one of the
  659. * following types:float32, int8, float16. The inputs must have data_format with
  660. * one of follows:NHWC, NCHW.
  661. * @par Attributes:
  662. * @li ksizes: A required list or tuple. The size of the sliding window for each
  663. * dimension of images.
  664. * @li strides: A optional list or tuple. How far the centers of two consecutive
  665. * patches are in the images. Defaults to "{1}".
  666. * @li dilations: A optional list or tuple. Defaults to "{1}".
  667. * This is the input stride, specifying how far two consecutive patch
  668. * samples are in the input. Equivalent to extracting patches
  669. * with patch_sizes_eff = patch_sizes + (patch_sizes - 1) *
  670. * (dilations - 1), followed by subsampling them spatially by a factor of dilations.
  671. * This is equivalent to rate in dilated (a.k.a. Atrous) convolutions.
  672. * @li padding_mode: A optional String. The type of padding algorithm to use,
  673. * support "SAME", "VALID", "CALCULATED". Among the three modes, only the "CALCULATED"
  674. * means to use the pads below. Defaults to "CALCULATED".
  675. * @li pads: A optional list or tuple. The pad distance. Defaults to "{0}". \n
  676. * @par Outputs:
  677. * y: A 4D Tensor with shape [batch, out_rows, out_cols, ksize_rows *
  678. * ksize_cols * depth] containing image patches with size ksize_rows x ksize_cols
  679. * x depth vectorized in the "depth" dimension. Note "out_rows" and "out_cols"
  680. * are the dimensions of the output patches . \n
  681. * @attention Constraints:
  682. * "ksizes", "strides", "dilations" and "pads" are lists of integers . \n
  683. * @par Third-party framework compatibility
  684. * Compatible with Pytorch Im2col operator.
  685. */
  686. REG_OP(Im2col)
  687. .INPUT(x, TensorType::RealNumberType())
  688. .OUTPUT(y, TensorType::RealNumberType())
  689. .REQUIRED_ATTR(ksizes, ListInt)
  690. .ATTR(strides, ListInt, {1})
  691. .ATTR(dilations, ListInt, {1})
  692. .ATTR(padding_mode, String, "CALCULATED")
  693. .ATTR(pads, ListInt, {0})
  694. .OP_END_FACTORY_REG(Im2col)
  695. /**
  696. *@brief Generates a 2D or 3D flow field (sampling grid), given a batch of affine
  697. matrices theta. \n
  698. *@par Inputs:
  699. *Input theta must be float16 or float, output_size must be int32 type.Inputs
  700. include:
  701. *@li theta: input batch of affine matrices with shape (N,2,3) for 2D or (N,3,4)
  702. for 3D
  703. *@li output_size: the target output image size. (N×C×H×W for 2D or N×C×D×H×W for
  704. 3D) Example: torch.Size((32, 3, 24, 24)) . \n
  705. *@par Attributes:
  706. *align_corners: if True, consider -1 and 1 to refer to the centers of the corner
  707. pixels rather than the image corners.Refer to grid_sample() for a more complete
  708. description. A grid generated by affine_grid() should be passed to grid_sample()
  709. with the same setting for this option. Default: False \n
  710. *@par Outputs:
  711. *@li y: A 2-D integer tensor of shape [M] representing the
  712. selected indices from the boxes tensor, where M <= max_output_size. \n
  713. *@attention Constraints:
  714. *Input theta must be float16 or float, output_size must be int32 type .
  715. The current implementation of AffineGrid operator AiCore adopts
  716. BatchMatMul's FP16 fusion operator scheme, and the accuracy will
  717. decrease when the theta range exceeds [-10,10].If the model requires
  718. high accuracy of AffineGrid, it is recommended to use AICPU. \n
  719. *@par Third-party framework compatibility
  720. *Compatible with Pytorch affine_grid operator.
  721. */
  722. REG_OP(AffineGrid)
  723. .INPUT(theta, TensorType({DT_FLOAT16, DT_FLOAT}))
  724. .INPUT(output_size, TensorType({DT_INT32}))
  725. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  726. .ATTR(align_corners, Bool, false)
  727. .OP_END_FACTORY_REG(AffineGrid)
  728. /**
  729. *@brief Make memory of a view be contiguous. \n
  730. *@par Inputs:
  731. *Four inputs, including:
  732. *@li x: The input tensor.
  733. *@li size: The shape of output tensor.
  734. *@li stride: The stride of output tensor.
  735. *@li storage_offset: The offset in the underlying storage of the output tensor. \n
  736. *@par Outputs:
  737. *y: A Tensor. Has the same type as "x" . \n
  738. *@par Third-party framework compatibility
  739. *Compatible with the pytorch operator as_strided.
  740. */
  741. REG_OP(AsStrided)
  742. .INPUT(x, TensorType::BasicType())
  743. .INPUT(size, TensorType::IndexNumberType())
  744. .INPUT(stride, TensorType::IndexNumberType())
  745. .INPUT(storage_offset, TensorType::IndexNumberType())
  746. .OUTPUT(y, TensorType::BasicType())
  747. .OP_END_FACTORY_REG(AsStrided)
  748. /**
  749. *@brief This transform extracts n-grams from the input sequence and save them as a
  750. vector. \n
  751. *@par Inputs:
  752. *@li input: can be either a 1-D or 2-D tensor for n-gram extraction, It is ether string UTF-8 or int32/int64 . \n
  753. *@par Attributes:
  754. *@li max_gram_length : int (required)
  755. *Maximum n-gram length. If this value is 3, 3-grams will be used to generate the output .
  756. *@li max_skip_count : int (required)
  757. *Maximum number of items (integers/strings) to be skipped when constructing an n-gram from X.
  758. If max_skip_count=1, min_gram_length=2, max_gram_length=3, this operator may generate 2-grams
  759. with skip_count=0 and skip_count=1, and 3-grams with skip_count=0 and skip_count=1.
  760. *@li min_gram_length : int (required)
  761. *Minimum n-gram length. If this value is 2 and max_gram_length is 3, output may contain counts of
  762. 2-grams and 3-grams.
  763. *@li mode : string (required)
  764. *The weighting criteria. It can be one of "TF" (term frequency), "IDF" (inverse document frequency),
  765. and "TFIDF" (the combination of TF and IDF).
  766. *@li ngram_counts : list of ints (required)
  767. *The starting indexes of 1-grams, 2-grams, and so on in pool. It is useful when determining the boundary
  768. between two consecutive collections of n-grams. For example, if ngram_counts is [0, 17, 36],
  769. the first index (zero-based) of 1-gram/2-gram/3-gram in pool are 0/17/36. This format is essentially identical
  770. to CSR (or CSC) sparse matrix format, and we choose to use this due to its popularity.
  771. *@li ngram_indexes : list of ints (required)
  772. *list of int64s (type: AttributeProto::INTS). This list is parallel to the specified 'pool_*' attribute. The i-th element
  773. in ngram_indexes indicate the coordinate of the i-th n-gram in the output tensor.
  774. *@li pool_int64s : list of ints
  775. *List of int64 n-grams learned from the training set. Either this or pool_strings attributes must be present but not both.
  776. It's an 1-D tensor starting with the collections of all 1-grams and ending with the collections of n-grams. The i-th element
  777. in pool stores the n-gram that should be mapped to coordinate ngram_indexes[i] in the output vector.
  778. *@li pool_strings : list of strings
  779. *List of strings n-grams learned from the training set. Either this or pool_int64s attributes must be present but not both.
  780. It's an 1-D tensor starting with the collections of all 1-grams and ending with the collections of n-grams. The i-th element
  781. in pool stores the n-gram that should be mapped to coordinate ngram_indexes[i] in the output vector.
  782. *@li weights : list of floats
  783. *list of floats. This attribute stores the weight of each n-gram in pool. The i-th element in weights is the weight of
  784. the i-th n-gram in pool. Its length equals to the size of ngram_indexes. By default, weights is an all-one tensor.This attribute
  785. is used when mode is "IDF" or "TFIDF" to scale the associated word counts. \n
  786. *@par Outputs:
  787. *@li output: tensor(float)
  788. *For 1-D input, output is the n-gram representation of that input. For 2-D input, the output is also a 2-D tensor
  789. whose i-th row is the n-gram representation of the i-th input row. More specifically, if input shape is [C], the corresponding
  790. output shape would be [max(ngram_indexes) + 1]. If input shape is [N, C], this operator produces a [N, max(ngram_indexes) + 1]-tensor. \n
  791. *@attention Constraints:
  792. *@li input can be either a 1-D or 2-D tensor, shape is [C] or [N, C].
  793. *@li max(ngram_indexes) + 1 == len(weights), len(y) == len(weights).
  794. *@li ngram_counts and pool(pool_int64s or pool_strings) must match.
  795. *@li either pool_strings or pool_int64s attributes must be present but not both.
  796. */
  797. REG_OP(TfIdfVectorizer)
  798. .INPUT(input, TensorType({DT_INT32, DT_INT64, DT_STRING}))
  799. .OUTPUT(output, TensorType({DT_FLOAT}))
  800. .REQUIRED_ATTR(max_gram_length, Int)
  801. .REQUIRED_ATTR(max_skip_count, Int)
  802. .REQUIRED_ATTR(min_gram_length, Int)
  803. .REQUIRED_ATTR(mode, String)
  804. .REQUIRED_ATTR(ngram_counts, ListInt)
  805. .REQUIRED_ATTR(ngram_indexes, ListInt)
  806. .ATTR(pool_int64s, ListInt, {})
  807. .ATTR(pool_strings, ListString, {})
  808. .ATTR(weights, ListFloat, {})
  809. .OP_END_FACTORY_REG(TfIdfVectorizer)
  810. } // namespace ge
  811. #endif // OPS_BUILT_IN_OP_PROTO_INC_TRANSFORMATION_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示