You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

array_ops.h 54 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file array_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_ARRAY_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_ARRAY_OPS_H_
  22. #include "graph/operator_reg.h"
  23. #include "graph/operator.h"
  24. namespace ge {
  25. /**
  26. *@brief Applies lower_bound(sorted_search_values, values) along each row. \n
  27. *@par Inputs:
  28. *The input sorted_x and values can be one-dimensional vector. Inputs include:
  29. * @li sorted_x:A `Tensor`. 2-D Tensor where each row is ordered.
  30. * @li values:A `Tensor`. Must have the same type as `sorted_x`. \n
  31. *@par Attributes:
  32. *out_type:An optional `DType` from: `int32, int64`.
  33. Defaults to `int32`. \n
  34. *@par Outputs:
  35. *y: A `Tensor` of type `out_type`. \n
  36. *@attention Constraints:
  37. *The implementation for LowerBound on Ascend uses AI CPU, with bad performance. \n
  38. *@par Quantization supported or not
  39. *Not supported
  40. *@par Quantized inference supported or not
  41. *Supported
  42. *@par L2 convergence supported or not
  43. *@par Multiple batches supported or not \n
  44. *@par Third-party framework compatibility
  45. *Compatible with tensorflow Operator LowerBound.
  46. */
  47. REG_OP(LowerBound)
  48. .INPUT(sorted_x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, \
  49. DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  50. .INPUT(values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, \
  51. DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  52. .OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
  53. .ATTR(out_type, Type, DT_INT32)
  54. .OP_END_FACTORY_REG(LowerBound)
  55. /**
  56. *@brief Reverses variable length slices. \n
  57. *@par Inputs:
  58. *Input "x" is a k-dimensional tensor. Inputs "num_lower" and "num_upper"
  59. are 0D scalars.
  60. * @li x: A Tensor. The input to reverse.
  61. * @li seq_lengths: A 1D Tensor of type int32 or int64. \n
  62. *@par Attributes:
  63. *@li seq_dim: An optional int. The dimension along which
  64. reversal is performed.
  65. *@li batch_dim: An optional int. Defaults to "0". The dimension along which
  66. reversal is performed. \n
  67. *@par Outputs:
  68. *y: A rank k tensor. Has the same shape as input. The extracted banded tensor. \n
  69. *@attention Constraints:
  70. *ReverseSequence runs on the Ascend AI CPU, which delivers poor performance. \n
  71. *@par Third-party framework compatibility
  72. *Compatible with the TensorFlow operator ReverseSequence.
  73. */
  74. REG_OP(ReverseSequence)
  75. .INPUT(x,
  76. TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  77. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  78. .INPUT(seq_lengths, TensorType({DT_INT32, DT_INT64}))
  79. .OUTPUT(y,
  80. TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  81. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  82. .REQUIRED_ATTR(seq_dim, Int)
  83. .ATTR(batch_dim, Int, 0)
  84. .OP_END_FACTORY_REG(ReverseSequence)
  85. /**
  86. *@brief Copies a tensor setting everything outside a central band in each innermost matrix. \n
  87. *@par Inputs:
  88. *Input "x" is a k-dimensional tensor. Inputs "num_lower" and "num_upper"
  89. are 0D scalars.
  90. * @li x: A rank k tensor.
  91. * @li num_lower: A 0D tensor. Number of superdiagonals to keep. If negative,
  92. keeps entire upper triangle.
  93. * @li num_upper: A 0D tensor. Number of superdiagonals to keep. If negative,
  94. keeps entire upper triangle. \n
  95. *@par Outputs:
  96. *y: A rank k tensor. Has the same shape as input. The extracted banded tensor. \n
  97. *@attention Constraints:
  98. *MatrixBandPart runs on the Ascend AI CPU, which delivers poor performance. \n
  99. *@par Third-party framework compatibility
  100. *Compatible with the TensorFlow operator MatrixBandPart.
  101. */
  102. REG_OP(MatrixBandPart)
  103. .INPUT(x, TensorType({ DT_INT8, DT_UINT8, \
  104. DT_INT16, DT_UINT16, DT_INT32, DT_INT64,
  105. DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_BOOL,
  106. DT_COMPLEX64, DT_COMPLEX128 }))
  107. .INPUT(num_lower, TensorType({ DT_INT32, DT_INT64 }))
  108. .INPUT(num_upper, TensorType({ DT_INT32, DT_INT64 }))
  109. .OUTPUT(y, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  110. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_BOOL,
  111. DT_COMPLEX64, DT_COMPLEX128}))
  112. .OP_END_FACTORY_REG(MatrixBandPart)
  113. /**
  114. *@brief Finds unique elements in a 1D tensor. \n
  115. *@par Inputs:
  116. *x: 1D tensor.
  117. *Input "x" is a k-dimensional tensor. Inputs "num_lower" and "num_upper"
  118. are 0D scalars. \n
  119. *@par Attributes:
  120. *out_idx: An optional DType from: "int32, int64".
  121. Defaults to "int32". \n
  122. *@par Outputs:
  123. *@li y: A Tensor. Has the same type as "x".
  124. *@li idx: A Tensor of type "out_idx".
  125. *@li count: A Tensor of type "out_idx". \n
  126. *@attention Constraints:
  127. *UniqueWithCounts runs on the Ascend AI CPU, which delivers poor performance. \n
  128. *@par Third-party framework compatibility
  129. *Compatible with the TensorFlow operator UniqueWithCounts.
  130. */
  131. REG_OP(UniqueWithCounts)
  132. .INPUT(x, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  133. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_STRING }))
  134. .OUTPUT(y, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  135. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_STRING }))
  136. .OUTPUT(idx, TensorType({ DT_INT32, DT_INT64 }))
  137. .OUTPUT(count, TensorType({ DT_INT32, DT_INT64 }))
  138. .REQUIRED_ATTR(out_idx, Type)
  139. .OP_END_FACTORY_REG(UniqueWithCounts)
  140. /**
  141. *@brief Finds unique elements in a 1D tensor. \n
  142. *@par Inputs:
  143. *x: 1D tensor.
  144. *Input "x" is a k-dimensional tensor. Inputs "num_lower" and "num_upper"
  145. are 0D scalars. \n
  146. *@par Attributes:
  147. *out_idx: An optional DType from: "int32, int64". Defaults to "int32". \n
  148. *@par Outputs:
  149. *@li y: "x" in the unique output "y".
  150. *@li idx: A tensor the same size as "x". The index of each value of "x". \n
  151. *@attention Constraints:
  152. *Unique runs on the Ascend AI CPU, which delivers poor performance. \n
  153. *@par Third-party framework compatibility
  154. *Compatible with the TensorFlow operator Unique.
  155. */
  156. REG_OP(Unique)
  157. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  158. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  159. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  160. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  161. .OUTPUT(idx, TensorType({DT_INT32, DT_INT64}))
  162. .ATTR(out_idx, Type, DT_INT32)
  163. .OP_END_FACTORY_REG(Unique)
  164. /**
  165. *@brief Finds unique elements in a 1D tensor. \n
  166. *@par Inputs:
  167. *Input "x" is a k-dimensional tensor. Inputs "num_lower" and "num_upper"
  168. are 0D scalars.
  169. *Including:
  170. * @li x: 1D tensor.
  171. * @li axis: A Tensor of type int32. Defaults to "None". \n
  172. *@par Attributes:
  173. *out_idx: An optional DType from: "int32, int64".
  174. Defaults to "int32". \n
  175. *@par Outputs:
  176. *@li y: "x" in the unique output "y".
  177. *@li idx: A tensor the same size as "x". The index of each value of "x". \n
  178. *@attention Constraints:
  179. *UniqueExt2 runs on the Ascend AI CPU, which delivers poor performance. \n
  180. *@par Third-party framework compatibility
  181. *Compatible with the TensorFlow operator UniqueExt2.
  182. */
  183. REG_OP(UniqueExt2)
  184. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  185. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  186. .INPUT(axis, TensorType({DT_INT32, DT_INT64}))
  187. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  188. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  189. .OUTPUT(idx, TensorType({DT_INT32, DT_INT64}))
  190. .ATTR(out_idx, Type, DT_INT32)
  191. .OP_END_FACTORY_REG(UniqueExt2)
  192. /**
  193. *@brief Computes the inverse permutation of a tensor. \n
  194. *@par Inputs:
  195. *x: A k-dimensional tensor. \n
  196. *@par Outputs:
  197. *y: A 1D tensor. \n
  198. *@attention Constraints:
  199. *InvertPermutation runs on the Ascend AI CPU, which delivers poor performance. \n
  200. *@par Third-party framework compatibility
  201. *Compatible with the TensorFlow operator InvertPermutation.
  202. */
  203. REG_OP(InvertPermutation)
  204. .INPUT(x, TensorType({DT_INT32, DT_INT64}))
  205. .OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
  206. .OP_END_FACTORY_REG(InvertPermutation)
  207. /**
  208. *@brief Checks a tensor for NaN and Inf values. \n
  209. *@par Inputs:
  210. *x: A k-dimensional tensor. \n
  211. *@par Attributes:
  212. *message: Prefix of the error message. \n
  213. *@par Outputs:
  214. *y: The output tensor. \n
  215. *@attention Constraints:
  216. *CheckNumerics runs on the Ascend AI CPU, which delivers poor performance. \n
  217. *@par Third-party framework compatibility
  218. *Compatible with the TensorFlow operator CheckNumerics.
  219. */
  220. REG_OP(CheckNumerics)
  221. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  222. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  223. .REQUIRED_ATTR(message, String)
  224. .OP_END_FACTORY_REG(CheckNumerics)
  225. /**
  226. *@brief Converts an array of flat indices into a tuple of coordinate arrays. \n
  227. *@par Inputs:
  228. *Input "indices" is a 0D or 1D tensor. Input "dims" is a 1D tensor.
  229. * @li indices: A 0D or 1D int Tensor whose elements are indices into
  230. the flattened version of an array of dimensions "dims".
  231. * @li dims: A 1D int Tensor of the same type as "indices".
  232. *The shape of the array to use for unraveling indices. \n
  233. *@par Outputs:
  234. *y: A Tensor. Has the same type as "indices". \n
  235. *@attention Constraints:
  236. *UnravelIndex runs on the Ascend AI CPU, which delivers poor performance. \n
  237. *@par Third-party framework compatibility
  238. *Compatible with the TensorFlow operator UnravelIndex.
  239. */
  240. REG_OP(UnravelIndex)
  241. .INPUT(indices, TensorType({DT_INT32, DT_INT64}))
  242. .INPUT(dims, TensorType({DT_INT32, DT_INT64}))
  243. .OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
  244. .OP_END_FACTORY_REG(UnravelIndex)
  245. /**
  246. *@brief Applies upper_bound(sorted_search_values, values) along each row. \n
  247. *@par Inputs:
  248. *Inputs "sorted_x" and "values" are 2D tensors.
  249. * @li sorted_x: A 2D Tensor where each row is ordered.
  250. * @li values: A 2D Tensor with the same numbers of rows as "sorted_x. \n
  251. *@par Attributes:
  252. *out_type: sets the optional out_type attribute to value. \n
  253. *@par Outputs:
  254. *y: A Tensor with the same shape as "values". \n
  255. *@attention Constraints:
  256. *UpperBound runs on the Ascend AI CPU, which delivers poor performance. \n
  257. *@par Third-party framework compatibility
  258. *Compatible with the TensorFlow operator UpperBound.
  259. */
  260. REG_OP(UpperBound)
  261. .INPUT(sorted_x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  262. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  263. .INPUT(values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  264. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  265. .OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
  266. .REQUIRED_ATTR(out_type, Type)
  267. .OP_END_FACTORY_REG(UpperBound)
  268. /**
  269. *@brief Finds unique elements in a 1D tensor. \n
  270. *@par Inputs:
  271. *Inputs "x" and "axis" are 1D vectors.
  272. * @li x: A 1D tensor.
  273. * @li axis: A 1D tensor. \n
  274. *@par Attributes:
  275. *out_idx: An optional DType from: "int32, int64".
  276. Defaults to "int32". \n
  277. *@par Outputs:
  278. *@li y: "x" in the unique output "y".
  279. *@li idx: A tensor the same size as "x". The index of each value of "x".
  280. *@li count: A tensor the same size as "x". The index of each value of "x". \n
  281. *@attention Constraints:
  282. *UniqueWithCountsExt2 runs on the Ascend AI CPU, which delivers poor performance. \n
  283. *@par Third-party framework compatibility
  284. *Compatible with the TensorFlow operator UniqueWithCountsExt2.
  285. */
  286. REG_OP(UniqueWithCountsExt2)
  287. .INPUT(x, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  288. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_STRING }))
  289. .INPUT(axis, TensorType({ DT_INT32, DT_INT64 }))
  290. .OUTPUT(y, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  291. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_STRING }))
  292. .OUTPUT(idx, TensorType({ DT_INT32, DT_INT64 }))
  293. .OUTPUT(count, TensorType({ DT_INT32, DT_INT64 }))
  294. .REQUIRED_ATTR(out_idx, Type)
  295. .OP_END_FACTORY_REG(UniqueWithCountsExt2)
  296. /**
  297. *@brief Fills the tensor with the mirror value. \n
  298. *@par Inputs:
  299. *Inputs "x" and "paddings" are 1D scalars.
  300. * @li x: The tensor to be padded.
  301. * @li paddings: A two-column matrix specifying the padding sizes.
  302. The number of rows Has the same rank as "x". \n
  303. *@par Attributes:
  304. *mode: Either "REFLECT" or "SYMMETRIC". In reflect mode the padded regions
  305. do not include the borders, while in symmetric mode the padded regions
  306. do include the borders. \n
  307. *@par Outputs:
  308. *y: The padded tensor. \n
  309. *@attention Constraints:
  310. *MirrorPad runs on the Ascend AI CPU, which delivers poor performance. \n
  311. *@par Third-party framework compatibility
  312. *Compatible with the TensorFlow operator MirrorPad.
  313. */
  314. REG_OP(MirrorPad)
  315. .INPUT(x, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  316. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_BOOL, \
  317. DT_COMPLEX64, DT_COMPLEX128 }))
  318. .INPUT(paddings, TensorType({ DT_INT32, DT_INT64 }))
  319. .OUTPUT(y, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  320. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_BOOL, \
  321. DT_COMPLEX64, DT_COMPLEX128 }))
  322. .REQUIRED_ATTR(mode, String)
  323. .OP_END_FACTORY_REG(MirrorPad)
  324. /**
  325. *@brief Calculates the difference between two numbers or a list of strings. \n
  326. *@par Inputs:
  327. *Inputs "x" and "y" are 1D vectors.
  328. * @li x: A Tensor. 1D. Values to keep.
  329. * @li y: A Tensor. Must have the same type as x. 1D. Values to remove. \n
  330. *@par Attributes:
  331. *out_idx: An optional DType from: "int32, int64". Defaults to "int32". \n
  332. *@par Outputs:
  333. *@li out: A Tensor. Has the same type as "x".
  334. *@li idx: A Tensor of type "out_idx". \n
  335. *@attention Constraints:
  336. *ListDiff runs on the Ascend AI CPU, which delivers poor performance. \n
  337. *@par Third-party framework compatibility
  338. *Compatible with the TensorFlow operator ListDiff.
  339. */
  340. REG_OP(ListDiff)
  341. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_UINT8, DT_INT8,
  342. DT_INT16, DT_UINT16, DT_INT32, DT_INT64}))
  343. .INPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_UINT8, DT_INT8,
  344. DT_INT16, DT_UINT16, DT_INT32, DT_INT64}))
  345. .OUTPUT(out, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_UINT8, DT_INT8,
  346. DT_INT16, DT_UINT16, DT_INT32, DT_INT64}))
  347. .OUTPUT(idx, TensorType({DT_INT32, DT_INT64}))
  348. .ATTR(out_idx, Type, DT_INT32)
  349. .OP_END_FACTORY_REG(ListDiff)
  350. /**
  351. *@brief Create an empty tensor, using the shape and dtype specified in attributes. \n
  352. *@par Attributes:
  353. *@li dtype: Specify the data type of the empty tensor.
  354. *@li shape: Specify the shape of the empty tensor. \n
  355. *@par Outputs:
  356. *y: The empty constant tensor. \n
  357. *@par Third-party framework compatibility
  358. *Compatible with the TensorFlow operator _ParallelConcatStart.
  359. */
  360. REG_OP(_ParallelConcatStart)
  361. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  362. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  363. .ATTR(dtype, Type, DT_INT32)
  364. .ATTR(shape, ListInt, {})
  365. .OP_END_FACTORY_REG(_ParallelConcatStart)
  366. /**
  367. *@brief Creates a constant tensor from a tensor-like object. This operator is used for inference.
  368. Operator Const has the same definition as operator Constant. \n
  369. *@par Attributes:
  370. *value: Required. The value and type of the resulting tensor, and no restrictions on type. \n
  371. *@par Outputs:
  372. *y: A constant tensor. \n
  373. *@par Third-party framework compatibility
  374. *Compatible with the TensorFlow operator Const.
  375. */
  376. REG_OP(Const)
  377. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  378. DT_UINT8, DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  379. .ATTR(value, Tensor, Tensor())
  380. .OP_END_FACTORY_REG(Const)
  381. /**
  382. *@brief Creates a constant tensor for training. \n
  383. *@par Attributes:
  384. *value: Required. The value and type of the resulting tensor, and no restrictions on type. \n
  385. *@par Outputs:
  386. *y: The constant tensor. \n
  387. *@par Third-party framework compatibility
  388. *Compatible with the TensorFlow operator Const.
  389. */
  390. REG_OP(Constant)
  391. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  392. DT_UINT8, DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  393. .ATTR(value, Tensor, Tensor())
  394. .OP_END_FACTORY_REG(Constant)
  395. /**
  396. *@brief Creates a file constant tensor, The operator is used to process the very large weight which is store in file. \n
  397. *@par Attributes:
  398. *file_path: A string, used to record file path. \n
  399. *file_id: A string, used to record file id. \n
  400. *shape: data shape. \n
  401. *dtype: data type. \n
  402. *@par Outputs:
  403. *y: The FileConstant tensor. \n
  404. */
  405. REG_OP(FileConstant)
  406. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  407. DT_UINT8, DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  408. .ATTR(file_path, String, "")
  409. .ATTR(file_id, String, "")
  410. .REQUIRED_ATTR(shape, ListInt)
  411. .REQUIRED_ATTR(dtype, Type)
  412. .OP_END_FACTORY_REG(FileConstant)
  413. /**
  414. *@brief Returns a copy of the input tensor. \n
  415. *@par Inputs:
  416. *x: A tensor. \n
  417. *@par Outputs:
  418. *y: A copy of input tensor. \n
  419. *@par Third-party framework compatibility
  420. *Compatible with the TensorFlow operator Snapshot.
  421. */
  422. REG_OP(Snapshot)
  423. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  424. DT_UINT8, DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  425. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  426. DT_UINT8, DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  427. .OP_END_FACTORY_REG(Snapshot)
  428. /**
  429. *@brief Gives a guarantee to the runtime that the input tensor is a constant. \n
  430. *@par Inputs:
  431. *x: A tensor. \n
  432. *@par Outputs:
  433. *y: The input tensor. \n
  434. *@par Third-party framework compatibility
  435. *Compatible with the TensorFlow operator GuaranteeConst.
  436. */
  437. REG_OP(GuaranteeConst)
  438. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  439. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  440. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  441. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  442. .OP_END_FACTORY_REG(GuaranteeConst)
  443. /**
  444. *@brief Returns the target shape for broadcasting shapes "x1" and "x2". \n
  445. *@par Inputs:
  446. *@li x1: A tensor of type int32 or int64. A shape.
  447. *@li x2: A tensor of the same type as "x1". The other shape. \n
  448. *@par Outputs:
  449. *y: A tensor. The broadcasted shape. \n
  450. *@par Third-party framework compatibility
  451. *Compatible with the TensorFlow operator BroadcastArgs.
  452. */
  453. REG_OP(BroadcastArgs)
  454. .INPUT(x1, TensorType({DT_INT32, DT_INT64}))
  455. .INPUT(x2, TensorType({DT_INT32, DT_INT64}))
  456. .OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
  457. .OP_END_FACTORY_REG(BroadcastArgs)
  458. /**
  459. *@brief Outputs its input tensor as is and triggers an error if a gradient is requested. \n
  460. *@par Inputs:
  461. *x: A tensor. \n
  462. *@par Attributes:
  463. *message: Will be printed in the error at the attempt to request a gradient. \n
  464. *@par Outputs:
  465. *y: The input tensor. \n
  466. *@par Third-party framework compatibility
  467. *Compatible with the TensorFlow operator PreventGradient.
  468. */
  469. REG_OP(PreventGradient)
  470. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  471. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  472. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  473. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  474. .ATTR(message, String, "")
  475. .OP_END_FACTORY_REG(PreventGradient)
  476. /**
  477. *@brief Returns the reduction indices for computing gradients of "x1" and "x2" with broadcast. \n
  478. *@par Inputs:
  479. *@li x1: A tensor of type int32 or int64.
  480. *@li x2: A tensor of type int32 or int64.
  481. "x2" has the same type as "x1". \n
  482. *@par Outputs:
  483. *@li y1: A tensor. Reduction indices of "x1".
  484. *@li y2: A tensor. Reduction indices of "x2". \n
  485. *@par Third-party framework compatibility
  486. *Compatible with the TensorFlow operator BroadcastGradientArgs.
  487. */
  488. REG_OP(BroadcastGradientArgs)
  489. .INPUT(x1, TensorType({DT_INT32, DT_INT64}))
  490. .INPUT(x2, TensorType({DT_INT32, DT_INT64}))
  491. .OUTPUT(y1, TensorType({DT_INT32, DT_INT64}))
  492. .OUTPUT(y2, TensorType({DT_INT32, DT_INT64}))
  493. .OP_END_FACTORY_REG(BroadcastGradientArgs)
  494. /**
  495. *@brief Stops gradient computation. None is returned for the node where the gradient computation is stopped.
  496. *@par Inputs:
  497. *x: A tensor. \n
  498. *@par Outputs:
  499. *y: The input tensor. \n
  500. *@par Third-party framework compatibility
  501. *Compatible with the TensorFlow operator StopGradient.
  502. */
  503. REG_OP(StopGradient)
  504. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  505. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  506. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  507. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  508. .OP_END_FACTORY_REG(StopGradient)
  509. /**
  510. *@brief Return a tensor with the same shape and contents as input. \n
  511. *@par Inputs:
  512. *x: A tensor. \n
  513. *@par Outputs:
  514. *y: A tensor with the same shape and contents as input. \n
  515. *@par Third-party framework compatibility
  516. *Compatible with the TensorFlow operator Identity.
  517. */
  518. REG_OP(Identity)
  519. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  520. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  521. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  522. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  523. .OP_END_FACTORY_REG(Identity)
  524. /**
  525. *@brief Returns a list of tensors with the same shapes and contents as the input tensors. \n
  526. *@par Inputs:
  527. *x: A list of input tensors. It's a dynamic input \n
  528. *@par Outputs:
  529. *y: A list of Tensor objects, with the same length as the input tensor list.
  530. It's a dynamic output. \n
  531. *@par Third-party framework compatibility
  532. *Compatible with the TensorFlow operator IdentityN.
  533. */
  534. REG_OP(IdentityN)
  535. .DYNAMIC_INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  536. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  537. .DYNAMIC_OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  538. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  539. .OP_END_FACTORY_REG(IdentityN)
  540. /**
  541. *@brief Inserts a dimension of 1 into a tensor's shape. Only the tensor shape is changed, without changing the data. \n
  542. *@par Inputs:
  543. *@li x: A tensor.
  544. *@li axis: The dimension index at which to expand. \n
  545. *@par Outputs:
  546. *y: A tensor with the same data as input, with an additional dimension inserted at the index specified by axis. \n
  547. *@par Third-party framework compatibility
  548. *Compatible with the TensorFlow operator ExpandDims.
  549. */
  550. REG_OP(ExpandDims)
  551. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8, DT_INT32,
  552. DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  553. .INPUT(axis, TensorType({DT_INT32, DT_INT64}))
  554. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8, DT_INT32,
  555. DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  556. .OP_END_FACTORY_REG(ExpandDims)
  557. /**
  558. *@brief Inserts a dimension of 1 into a tensor's shape. Only the tensor shape is changed, without changing the data. \n
  559. *@par Inputs:
  560. *@li x: Original tensor.
  561. *@par Attributes:
  562. *@li axes: List of ints indicating the dimensions to be inserted. \n
  563. *@par Outputs:
  564. *y: Reshape tensor with same data as input. \n
  565. *@par Third-party framework compatibility
  566. *Compatible with the Onnx operator Unsqueeze.
  567. */
  568. REG_OP(Unsqueeze)
  569. .INPUT(x, TensorType({DT_FLOAT32, DT_INT32, DT_UINT8, DT_BOOL}))
  570. .OUTPUT(y, TensorType({DT_FLOAT32, DT_INT32, DT_UINT8, DT_BOOL}))
  571. .ATTR(axes, ListInt, {})
  572. .OP_END_FACTORY_REG(Unsqueeze)
  573. /**
  574. *@brief Inserts a dimension of 1 into a tensor's shape. Only the tensor shape is changed, without changing the data. \n
  575. *@par Inputs:
  576. *@li x: Original tensor.
  577. *@par Attributes:
  578. *@li axes: List of ints indicating the dimensions to be inserted. \n
  579. *@par Outputs:
  580. *y: Reshape tensor with same data as input. \n
  581. *@par Third-party framework compatibility
  582. *Compatible with the Onnx operator Unsqueeze.
  583. *@par Restrictions:
  584. * Warning: THIS FUNCTION IS DEPRECATED. Please use Unsqueeze instead.
  585. */
  586. REG_OP(UnsqueezeV2)
  587. .INPUT(x, TensorType::ALL())
  588. .OUTPUT(y, TensorType::ALL())
  589. .ATTR(axis, ListInt, {})
  590. .OP_END_FACTORY_REG(UnsqueezeV2)
  591. /**
  592. *@brief Inserts a dimension of 1 into a tensor's shape. Only the tensor shape
  593. is changed, but the data is not changed. \n
  594. *@par Inputs:
  595. *x: A tensor.
  596. *axes: A list of int64, which indicates the dimensions to be inserted. \n
  597. *@par Outputs:
  598. *y: Reshape tensor with same data as input. \n
  599. *@par Third-party framework compatibility
  600. *Compatible with the Onnx operator Unsqueeze in V13. \n
  601. */
  602. REG_OP(UnsqueezeV3)
  603. .INPUT(x, TensorType::ALL())
  604. .INPUT(axes, ListInt)
  605. .OUTPUT(y, TensorType::ALL())
  606. .OP_END_FACTORY_REG(UnsqueezeV3)
  607. /**
  608. *@brief Reshapes a tensor. Only the tensor shape is changed, without changing the data. \n
  609. *@par Inputs:
  610. *@li x: A tensor.
  611. *@li shape: A tensor. Defines the shape of the output tensor. \n
  612. *@par Attributes:
  613. *@li axis: An optional int32 or int64. The first dimension to reshape. Defaults to "0".
  614. *@li num_axes: An optional int32 or int64. The extent of the reshape. Defaults to "-1". \n
  615. *@par Outputs:
  616. *y: A tensor. \n
  617. *@attention Constraints:
  618. *This operator cannot be directly called by the acllopExecute API. \n
  619. *@par Third-party framework compatibility
  620. *@li Compatible with the TensorFlow operator Reshape.
  621. *@li Compatible with the Caffe operator Reshape.
  622. */
  623. REG_OP(Reshape)
  624. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8, DT_INT32,
  625. DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  626. .INPUT(shape, TensorType({DT_INT32, DT_INT64}))
  627. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8, DT_INT32,
  628. DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  629. .ATTR(axis, Int, 0)
  630. .ATTR(num_axes, Int, -1)
  631. .OP_END_FACTORY_REG(Reshape)
  632. /**
  633. *@brief Removes dimensions of size 1 from the shape of a tensor. \n
  634. *@par Inputs:
  635. *x: A tensor. \n
  636. *@par Attributes:
  637. *axis: An optional list of int32 or int64. If not specified, squeezes all dimensions of size 1. If specified, only squeezes the dimensions listed. It is an error to squeeze a dimension that is not 1. \n
  638. *@par Outputs:
  639. *y: A tensor. \n
  640. *@par Third-party framework compatibility
  641. *Compatible with the TensorFlow operator Squeeze.
  642. */
  643. REG_OP(Squeeze)
  644. .INPUT(x, TensorType::ALL())
  645. .OUTPUT(y, TensorType::ALL())
  646. .ATTR(axis, ListInt, {})
  647. .OP_END_FACTORY_REG(Squeeze)
  648. /**
  649. *@brief Removes dimensions of size 1 from the shape of a tensor. \n
  650. *@par Inputs:
  651. *x: A tensor. \n
  652. *@par Attributes:
  653. *axis: An optional list of int32 or int64. If not specified, squeezes all dimensions of size 1. If specified, only squeezes the dimensions listed. It is an error to squeeze a dimension that is not 1. \n
  654. *@par Outputs:
  655. *y: A tensor. \n
  656. *@par Third-party framework compatibility
  657. *Compatible with the TensorFlow operator Squeeze.
  658. *@par Restrictions:
  659. * Warning: THIS FUNCTION IS DEPRECATED. Please use Squeeze instead.
  660. */
  661. REG_OP(SqueezeV2)
  662. .INPUT(x, TensorType::ALL())
  663. .OUTPUT(y, TensorType::ALL())
  664. .ATTR(axis, ListInt, {})
  665. .OP_END_FACTORY_REG(SqueezeV2)
  666. /**
  667. *@brief Removes dimensions of size 1 from the shape of a tensor according to axes. \n
  668. *@par Inputs:
  669. *x: A tensor.
  670. *axes: An optional list of int64. If not specified, squeezes all dimensions of
  671. size 1. If specified, only squeezes the dimensions listed. It is an error to
  672. squeeze a dimension that is not 1. \n
  673. *@par Outputs:
  674. *y: Reshape tensor with same data as input. \n
  675. *@par Third-party framework compatibility
  676. *Compatible with the onnx operator Squeeze in V13. \n
  677. */
  678. REG_OP(SqueezeV3)
  679. .INPUT(x, TensorType::ALL())
  680. .OPTIONAL_INPUT(axes, ListInt)
  681. .OUTPUT(y, TensorType::ALL())
  682. .OP_END_FACTORY_REG(SqueezeV3)
  683. /**
  684. *@brief Returns an integer representing the rank of input tensor. The rank of a tensor is the number of indices required to uniquely select each element of the tensor, that is, the dimension size of the tensor. \n
  685. *@par Inputs:
  686. *x: A Tensor of type float32, float16, int8, int16, uint16, uint8, int32, int64, uint32, uint64, bool, double. \n
  687. *@par Outputs:
  688. *y: A tensor. The rank of input tensor. Type is int32. \n
  689. *@par Third-party framework compatibility
  690. *Compatible with the TensorFlow operator Rank.
  691. */
  692. REG_OP(Rank)
  693. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  694. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  695. .OUTPUT(y, TensorType({DT_INT32}))
  696. .OP_END_FACTORY_REG(Rank)
  697. /**
  698. *@brief Returns the size of a tensor, that is, an integer of the number of elements of the tensor. \n
  699. *@par Inputs:
  700. *x: A tensor. \n
  701. *@par Attributes:
  702. *out_type: An optional int32 or int64. The output data type. Defaults to "int32". \n
  703. *@par Outputs:
  704. *y: A tensor. The size of the input tensor. \n
  705. *@par Third-party framework compatibility
  706. *Compatible with the TensorFlow operator Size.
  707. */
  708. REG_OP(Size)
  709. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  710. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  711. .OUTPUT(y, TensorType({DT_INT32,DT_INT64}))
  712. .ATTR(dtype, Int, DT_INT32)
  713. .OP_END_FACTORY_REG(Size)
  714. /**
  715. *@brief Input data for other operators. \n
  716. *@par Inputs:
  717. *x: A tensor. \n
  718. *@par Attributes:
  719. *index: Index of the input tensor.The data type must be int32 or int64.
  720. Assume that net has three data nodes, one should be set 0, another should
  721. be set 1, and the left should be set 2. \n
  722. *@par Outputs:
  723. *y: A tensor. \n
  724. *@par Third-party framework compatibility
  725. *Compatible with the Caffe operator Data.
  726. */
  727. REG_OP(Data)
  728. .INPUT(x, TensorType::ALL())
  729. .OUTPUT(y, TensorType::ALL())
  730. .ATTR(index, Int, 0)
  731. .OP_END_FACTORY_REG(Data)
  732. /**
  733. *@brief Inserts a placeholder for a tensor that will be always fed. \n
  734. *@par Inputs:
  735. *x: A tensor. \n
  736. *@par Attributes:
  737. *@li peerIndex: An integer type. The index of the corresponding "end" node connected to.
  738. *@li parentId: A string, used to check if the nodes are from the saved parent node.
  739. *@li parentOpType: A string. Op type of the original node.
  740. *@li anchorIndex: An integer, used to check if the node is from the saved anchor. \n
  741. *@par Outputs:
  742. *y: The created placeholder tensor. \n
  743. *@par Third-party framework compatibility
  744. *Compatible with the TensorFlow operator PlaceHolder.
  745. */
  746. REG_OP(PlaceHolder)
  747. .INPUT(x, TensorType::ALL())
  748. .OUTPUT(y, TensorType::ALL())
  749. .ATTR(peerIndex, Int, 0) // the index of the corresponding 'end' node it's connected to
  750. .ATTR(parentId, String, "") // check if these node are from save parent node
  751. .ATTR(parentOpType, String, "") // op type of original node
  752. .ATTR(anchorIndex, Int, 0) // check if these node are from save anchor
  753. .OP_END_FACTORY_REG(PlaceHolder)
  754. /**
  755. *@brief Inserts a placeholder with default value for a tensor. \n
  756. *@par Inputs:
  757. *x: A tensor. \n
  758. *@par Attributes:
  759. *@li shape: tensor shape. \n
  760. *@par Outputs:
  761. *y: The created placeholder tensor. \n
  762. *@par Third-party framework compatibility
  763. *Compatible with the TensorFlow operator PlaceholderWithDefault.
  764. */
  765. REG_OP(PlaceholderWithDefault)
  766. .INPUT(x, TensorType::ALL())
  767. .OUTPUT(y, TensorType::ALL())
  768. .REQUIRED_ATTR(shape, ListInt)
  769. .OP_END_FACTORY_REG(PlaceholderWithDefault)
  770. /**
  771. *@brief Reads and returns the value of the input variable tensor. \n
  772. *@par Inputs:
  773. *x: A tensor must have numeric type. \n
  774. *@par Attributes:
  775. *dtype: An optional int32 or int64. The output data type. Defaults to int32. \n
  776. *@par Outputs:
  777. *y: A tensor must have numeric type. \n
  778. *@par Third-party framework compatibility
  779. *Compatible with the TensorFlow operator ReadVariableOp.
  780. */
  781. REG_OP(ReadVariableOp)
  782. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  783. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  784. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  785. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  786. .ATTR(dtype, Int, DT_INT32)
  787. .OP_END_FACTORY_REG(ReadVariableOp)
  788. /**
  789. *@brief Mark outputs of one sub graph which partitioned by engine type.
  790. *@par Inputs:
  791. *x: A tensor. \n
  792. *@par Outputs:
  793. *y: A tensor. \n
  794. *@par Attributes:
  795. *@li peerIndex: The index of the corresponding 'placeholder' node it's connected to.
  796. *@li parentOpType: Op type of original node.
  797. *@par Restrictions:
  798. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  799. */
  800. REG_OP(End)
  801. .INPUT(x, TensorType::ALL())
  802. .OUTPUT(y, TensorType::ALL())
  803. .ATTR(peerIndex, Int, 0)
  804. .ATTR(parentOpType, String, "")
  805. .OP_END_FACTORY_REG(End)
  806. /**
  807. *@brief Operations for writing summary data, for use in analysis and visualization.
  808. *@par Inputs:
  809. * One input:
  810. *x: Collections of summary data.
  811. *@par Restrictions:
  812. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  813. */
  814. REG_OP(Summary)
  815. .INPUT(x, TensorType::ALL())
  816. .OP_END_FACTORY_REG(Summary)
  817. /**
  818. *@brief Returns the shape of a tensor. \n
  819. *@par Inputs:
  820. *x: A tensor. \n
  821. *@par Attributes:
  822. *dtype: An optional int32 or int64. The output data type. Defaults to int32. \n
  823. *@par Outputs:
  824. *y: A tensor. The shape of the input tensor. \n
  825. *@par Third-party framework compatibility
  826. *Compatible with the TensorFlow operator Size.
  827. */
  828. REG_OP(Shape)
  829. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  830. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  831. .OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
  832. .ATTR(dtype, Int, DT_INT32)
  833. .OP_END_FACTORY_REG(Shape)
  834. /**
  835. *@brief Gather selected dims of input which returns the shape of tensor shape after gathershapes.\n
  836. *@par Inputs:
  837. *x: A list of input tensors. It's a dynamic input. \n
  838. *@par Attributes:
  839. *axes: Select some dims of input. \n
  840. *@par Outputs:
  841. *shape: The shape of tensor shape after gathershapes. \n
  842. */
  843. REG_OP(GatherShapes)
  844. .DYNAMIC_INPUT(x, TensorType::ALL())
  845. .OUTPUT(shape, TensorType({DT_INT32, DT_INT64}))
  846. .REQUIRED_ATTR(axes, ListListInt)
  847. .ATTR(dtype, Int, DT_INT32)
  848. .OP_END_FACTORY_REG(GatherShapes)
  849. /**
  850. *@brief Returns shape of tensors. \n
  851. *@par Inputs:
  852. *x: A list of input tensors. It's a dynamic input. \n
  853. *@par Attributes:
  854. *dtype: An optional int32 or int64. The output data type. Defaults to "int32". \n
  855. *@par Outputs:
  856. *y: A list of tensors with the same length as the input list of tensors.
  857. It's a dynamic output. \n
  858. *@par Third-party framework compatibility
  859. *Compatible with the TensorFlow operator ShapeN.
  860. */
  861. REG_OP(ShapeN)
  862. .DYNAMIC_INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  863. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  864. .DYNAMIC_OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
  865. .ATTR(dtype, Int, DT_INT32)
  866. .OP_END_FACTORY_REG(ShapeN)
  867. /**
  868. *@brief Creates a tensor with the given "shape" and "dtype". \n
  869. *@par Inputs:
  870. *shape: The shape of the output tensor. \n
  871. *@par Attributes:
  872. *@li dtype: Optional. The data type of the output tensor. Defaults to "int32".
  873. *@li init: An optional bool. If true, initializes the returned tensor with the default value of "dtype". Defaults to "false". \n
  874. *@par Outputs:
  875. *y: A tensor. \n
  876. *@par Third-party framework compatibility
  877. *Compatible with the TensorFlow operator Empty.
  878. */
  879. REG_OP(Empty)
  880. .INPUT(shape, TensorType({DT_INT32}))
  881. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  882. DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  883. .ATTR(dtype, Int, DT_INT32)
  884. .ATTR(init, Bool, 0)
  885. .OP_END_FACTORY_REG(Empty)
  886. /**
  887. *@brief Gradient op for MirrorPad op. Folds a mirror-padded tensor. \n
  888. *@par Inputs:
  889. *Inputs "x" and "y" are 1D vectors.
  890. * @li x: A Tensor. The input tensor to be folded.
  891. * @li paddings: A Tensor of type int32 or int64. A two-column matrix
  892. specifying the padding sizes. \n
  893. *@par Attributes:
  894. *mode: A string from: "REFLECT", "SYMMETRIC". The mode used in the MirrorPad op. \n
  895. *@par Outputs:
  896. *y: A Tensor. Has the same type as "x". \n
  897. *@attention Constraints:
  898. *MirrorPadGrad runs on the Ascend AI CPU, which delivers poor performance. \n
  899. *@par Third-party framework compatibility
  900. *Compatible with the TensorFlow operator MirrorPadGrad.
  901. */
  902. REG_OP(MirrorPadGrad)
  903. .INPUT(x, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  904. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  905. DT_COMPLEX64, DT_COMPLEX128 }))
  906. .INPUT(paddings, TensorType({DT_INT32, DT_INT64}))
  907. .OUTPUT(y, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  908. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  909. DT_COMPLEX64, DT_COMPLEX128 }))
  910. .REQUIRED_ATTR(mode, String)
  911. .OP_END_FACTORY_REG(MirrorPadGrad)
  912. /**
  913. * @brief Returns locations of nonzero / true values in a tensor. \n
  914. * @par Inputs:
  915. * Including:
  916. * @li x: A Tensor. Must be one of the following types:
  917. DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT8, DT_UINT8, DT_QINT8,
  918. DT_QUINT8, DT_INT16, DT_UINT16, DT_INT32, DT_UINT32, DT_QINT32,
  919. DT_INT64, DT_UINT64, DT_BOOL, DT_COMPLEX64, DT_COMPLEX128 \n
  920. * @par Outputs:
  921. * @li y: A Tensor of type DT_INT64. \n
  922. * @attention Constraints:
  923. * Where runs on the Ascend AI CPU, which delivers poor performance.\n
  924. * @par Third-party framework compatibility
  925. * Compatible with the TensorFlow operator Where.
  926. */
  927. REG_OP(Where)
  928. .INPUT(x, TensorType({BasicType(), DT_BOOL}))
  929. .OUTPUT(y, TensorType({DT_INT64}))
  930. .OP_END_FACTORY_REG(Where)
  931. /**
  932. *@brief Derived from the Caffe operator Split that splits an input blob to
  933. * multiple output blobs for feeding a blob into multiple output layers.
  934. *The Split node is removed from the graph after the split operation is completed. \n
  935. *@par Inputs:
  936. *x: A Tensor. Must be one of the following types:
  937. fp16, fp32, int8, uint8, int16, uint16, int32, uint32, int64, uint64. \n
  938. *@par Outputs:
  939. *y: A Tensor. Has the same type as "x".It's required and the value should equal to output_num. \n
  940. *@par Attributes:
  941. *@li N: A required int. The parameter will get the number of dynamic outputs.
  942. */
  943. REG_OP(Copy)
  944. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_UINT8, DT_INT16, \
  945. DT_UINT16, DT_INT32, DT_UINT32, DT_INT64, DT_UINT64}))
  946. .DYNAMIC_OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_UINT8, DT_INT16, \
  947. DT_UINT16, DT_INT32, DT_UINT32, DT_INT64, DT_UINT64}))
  948. .REQUIRED_ATTR(N, Int)
  949. .OP_END_FACTORY_REG(Copy);
  950. /**
  951. *@brief copy the src tensor to the dst tensor according the special parameter . \n
  952. *@par Inputs:
  953. *Eight inputs, including:
  954. *dst: A tensor. Must be one of the following types:
  955. * double, float32, float16, int8, uint8, int16, uint16, int32, uint32, int64, uint64, bool
  956. *dst_size: A tensor with type int32
  957. *dst_stride: A tensor with type int32
  958. *dst_storage_offset: A tensor with type int32
  959. *src: A tensor. Must be one of the following types:
  960. * double, float32, float16, int8, uint8, int16, uint16, int32, uint32, int64, uint64, bool
  961. *src_size: A tensor with type int32
  962. *src_stride: A tensor with type int32
  963. *src_storage_offset: the storage_offset of src tensor . \n
  964. *@par Outputs:
  965. *dst: An ref tensor.Must be one of the following types:
  966. * double, float32, float16, int8, uint8, int16, uint16, int32, uint32, int64, uint64, bool . \n
  967. */
  968. REG_OP(ViewCopy)
  969. .INPUT(dst, TensorType::BasicType())
  970. .INPUT(dst_size, TensorType::IndexNumberType())
  971. .INPUT(dst_stride, TensorType::IndexNumberType())
  972. .INPUT(dst_storage_offset, TensorType::IndexNumberType())
  973. .INPUT(src, TensorType::BasicType())
  974. .INPUT(src_size, TensorType::IndexNumberType())
  975. .INPUT(src_stride, TensorType::IndexNumberType())
  976. .INPUT(src_storage_offset, TensorType::IndexNumberType())
  977. .OUTPUT(dst, TensorType::BasicType())
  978. .OP_END_FACTORY_REG(ViewCopy)
  979. /**
  980. *@brief Generates fingerprint values. \n
  981. *@par Inputs:
  982. *@li data: Must have rank 1 or higher.
  983. *@li method: Fingerprint method used by this op. Currently available method is
  984. `farmhash::fingerprint64`. \n
  985. *@par Outputs:
  986. y: A two-dimensional `Tensor` of type `tf.uint8`. The first dimension equals to
  987. `data`'s first dimension, and the second dimension size depends on the
  988. fingerprint algorithm. \n
  989. *@par Third-party framework compatibility
  990. * Compatible with TensorFlow Fingerprint operator.
  991. */
  992. REG_OP(Fingerprint)
  993. .INPUT(data, TensorType({DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT8, DT_UINT8, DT_INT16, \
  994. DT_UINT16, DT_INT32, DT_UINT32, DT_INT64, DT_UINT64, DT_BOOL}))
  995. .INPUT(method, TensorType({DT_STRING}))
  996. .OUTPUT(y, TensorType({DT_UINT8}))
  997. .OP_END_FACTORY_REG(Fingerprint)
  998. /**
  999. *@brief Change the shape of output according to the attr outShape
  1000. *
  1001. *@par Inputs:
  1002. *x: A Tensor. \n
  1003. *@par Outputs:
  1004. *y: A Tensor. Has the same type as "x".It's required and the value should equal to output_num. \n
  1005. *@par Attributes:
  1006. *outShape: The shape of output will be inferred according to the attribute
  1007. */
  1008. REG_OP(TransShape)
  1009. .INPUT(x, TensorType::ALL())
  1010. .OUTPUT(y, TensorType::ALL())
  1011. .ATTR(outShape,ListInt ,{})
  1012. .OP_END_FACTORY_REG(TransShape);
  1013. /**
  1014. *@brief Computes the (possibly normalized) Levenshtein Edit Distance. \n
  1015. *@par Inputs:
  1016. *@li hypothesis_indices: The indices of the hypothesis list SparseTensor.
  1017. This is an N x R int64 matrix.
  1018. *@li hypothesis_shape: The values of the hypothesis list SparseTensor.
  1019. This is an N-length vector.
  1020. *@li hypothesis_shape: The shape of the hypothesis list SparseTensor.
  1021. This is an R-length vector.
  1022. *@li truth_indices: The indices of the truth list SparseTensor.
  1023. This is an M x R int64 matrix.
  1024. *@li truth_shape: The values of the truth list SparseTensor.
  1025. This is an M-length vector.
  1026. *@li truth_shape: The shape of the truth list SparseTensor.
  1027. This is an R-length vector
  1028. *@par Attributes:
  1029. *normalize: boolean (if true, edit distances are normalized by length of truth). \n
  1030. *@par Outputs:
  1031. *output: A dense float tensor with rank R - 1. \n
  1032. *@par Third-party framework compatibility
  1033. * Compatible with TensorFlow EditDistance operator.
  1034. */
  1035. REG_OP(EditDistance)
  1036. .INPUT(hypothesis_indices, TensorType({DT_INT64}))
  1037. .INPUT(hypothesis_values, TensorType::BasicType())
  1038. .INPUT(hypothesis_shape, TensorType({DT_INT64}))
  1039. .INPUT(truth_indices, TensorType({DT_INT64}))
  1040. .INPUT(truth_values, TensorType::BasicType())
  1041. .INPUT(truth_shape, TensorType({DT_INT64}))
  1042. .ATTR(normalize, Bool, true)
  1043. .OUTPUT(output, TensorType({DT_FLOAT}))
  1044. .OP_END_FACTORY_REG(EditDistance)
  1045. /**
  1046. * @brief sort the input tensor without returning the value of index.
  1047. * @par Inputs:
  1048. * x: An ND tensor of type float16.
  1049. * @par Attributes:
  1050. * @li axis: An optional int. The dimension to sort along. This value defaults to -1.
  1051. * @li descending: An optional bool. Controls the sorting order (ascending or descending). This value defaults to False.
  1052. * @par Outputs:
  1053. * y: An ND tensor of type float16.
  1054. * @attention Constraints:
  1055. * @li Axis should select the last dim.
  1056. * @li When the sorting data is less than 150K, it is recommended to use this tbe ops,
  1057. and the descending performance is better than the ascending.
  1058. * @li The upper limit of data on Ascend910 is 2000K.
  1059. */
  1060. REG_OP(SortV2)
  1061. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  1062. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  1063. .ATTR(axis, Int, -1)
  1064. .ATTR(descending, Bool, false)
  1065. .OP_END_FACTORY_REG(SortV2)
  1066. /**
  1067. * @brief Expand the input tensor to a compatible shape. \n
  1068. * @par Inputs:
  1069. * One inputs, including:
  1070. * @li x: A Tensor. Must be one of the following types:
  1071. * float16, float32, int32, int8, uint8, bool. \n
  1072. * @li shape: A Tensor to specify the shape that the input tensor expanded to. \n
  1073. * @par Outputs:
  1074. * @li y: A Tensor. Has the same type as "x", and the shape specified by input and attr shape \n
  1075. * @par Third-party framework compatibility
  1076. * Compatible with the ONNX operator Expand.
  1077. */
  1078. REG_OP(Expand)
  1079. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32,DT_INT64, DT_INT8, DT_UINT8, DT_BOOL}))
  1080. .INPUT(shape, TensorType({DT_INT16, DT_INT32, DT_INT64}))
  1081. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32,DT_INT64, DT_INT8, DT_UINT8, DT_BOOL}))
  1082. .OP_END_FACTORY_REG(Expand)
  1083. /**
  1084. *@Returns a tensor containing the indices of all non-zero elements of input. \n
  1085. *@par Inputs:
  1086. *x: A Tensor. Must be one of the following types: float16, float32, int32, int64.
  1087. *@par Attributes:
  1088. * transpose: the output tensor will be transposed if true. \n
  1089. *@par Outputs:
  1090. * y: A Tensor. Has the same type as "x" . \n
  1091. *@par Third-party framework compatibility
  1092. *Compatible with the PyTorch operator NonZero.
  1093. */
  1094. REG_OP(NonZero)
  1095. .INPUT(x, TensorType({DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT8, DT_UINT8, DT_INT16, \
  1096. DT_UINT16, DT_INT32, DT_UINT32, DT_INT64, DT_UINT64, DT_BOOL}))
  1097. .OUTPUT(y, TensorType({DT_INT64, DT_INT32}))
  1098. .ATTR(transpose, Bool, false)
  1099. .ATTR(dtype, Type, DT_INT64)
  1100. .OP_END_FACTORY_REG(NonZero)
  1101. /**
  1102. *@Returns a tensor containing the indices of all non-zero elements of input. \n
  1103. *@par Inputs:
  1104. *x: A Tensor. Must be one of the following types: float16, float32, int32, int64.
  1105. *@par Attributes:
  1106. * transpose: the output tensor will be transposed if true. \n
  1107. *@par Outputs:
  1108. * value: A Tensor. Has the same type as "x" . \n
  1109. * index: A Tensor. The type is INT32, means index for input. \n
  1110. * count: A Scalar. The type is INT32, means count for non_zero ele in input. \n
  1111. *@par Third-party framework compatibility
  1112. *Compatible with the PyTorch operator NonZeroWithValue.
  1113. */
  1114. REG_OP(NonZeroWithValue)
  1115. .INPUT(x, TensorType({DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT8, DT_UINT8, DT_INT16, \
  1116. DT_UINT16, DT_INT32, DT_UINT32, DT_INT64, DT_UINT64, DT_BOOL}))
  1117. .OUTPUT(value, TensorType({DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT8, DT_UINT8, DT_INT16, \
  1118. DT_UINT16, DT_INT32, DT_UINT32, DT_INT64, DT_UINT64, DT_BOOL}))
  1119. .OUTPUT(index, TensorType({DT_INT32}))
  1120. .OUTPUT(count, TensorType({DT_INT32}))
  1121. .ATTR(transpose, Bool, false)
  1122. .ATTR(dtype, Type, DT_INT32)
  1123. .OP_END_FACTORY_REG(NonZeroWithValue)
  1124. /**
  1125. *@Returns a tensor with updated shape from NonZeroWithValue. \n
  1126. *@par Inputs:
  1127. *value: A Tensor. The output of NonZeroWithValue. \n
  1128. *index: A Tensor. The output of NonZeroWithValue. \n
  1129. *count: A Tensor. The type is INT32, means count for non_zero ele in input. \n
  1130. * out_value: A Tensor. Has the same type as "value" . \n
  1131. * out_index: A Tensor. Has the same type as "index". \n
  1132. */
  1133. REG_OP(NonZeroWithValueShape)
  1134. .INPUT(value, TensorType({DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT8, DT_UINT8, DT_INT16,
  1135. DT_UINT16, DT_INT32, DT_UINT32, DT_INT64, DT_UINT64, DT_BOOL}))
  1136. .INPUT(index, TensorType({DT_INT32}))
  1137. .INPUT(count, TensorType({DT_INT32}))
  1138. .OUTPUT(out_value, TensorType({DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT8, DT_UINT8, DT_INT16,
  1139. DT_UINT16, DT_INT32, DT_UINT32, DT_INT64, DT_UINT64, DT_BOOL}))
  1140. .OUTPUT(out_index, TensorType({DT_INT32}))
  1141. .OP_END_FACTORY_REG(NonZeroWithValueShape)
  1142. /**
  1143. * @brief Expand the input tensor to a compatible shape. \n
  1144. * @par Inputs:
  1145. * One inputs, including:
  1146. * x: A Tensor. Must be one of the following types:
  1147. * float16, float32, int32, int8, uint8, bool. \n
  1148. * @par Attributes:
  1149. * shape: A required listInt to specify the shape that the input tensor expanded to. \n
  1150. * @par Outputs:
  1151. * y: A Tensor. Has the same type as "x", and the shape specified by input and attr shape \n
  1152. * @par Third-party framework compatibility
  1153. * Compatible with the ONNX operator Expand.
  1154. */
  1155. REG_OP(ExpandD)
  1156. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8, DT_BOOL}))
  1157. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8, DT_BOOL}))
  1158. .REQUIRED_ATTR(shape, ListInt)
  1159. .OP_END_FACTORY_REG(ExpandD)
  1160. /**
  1161. *@brief Get dim number in tensordesc. \n
  1162. *@par Inputs:
  1163. *x: A Tensor. \n
  1164. *@par Outputs:
  1165. *y: A 1D tensor. The data type must be int32. \n
  1166. *@par Restrictions:
  1167. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1168. */
  1169. REG_OP(GetShape)
  1170. .DYNAMIC_INPUT(x, TensorType({DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_INT8, DT_UINT8, DT_INT16, \
  1171. DT_UINT16, DT_INT32, DT_UINT32, DT_INT64, DT_UINT64, DT_BOOL}))
  1172. .OUTPUT(y, TensorType({DT_INT32}))
  1173. .OP_END_FACTORY_REG(GetShape)
  1174. /**
  1175. *@brief Update the tensor_desc of the output. \n
  1176. * @par attributes:
  1177. * @li shape: A listInt contains the data to update. \n
  1178. *@par outputs:
  1179. * y: a tensor_desc, type is int.\n
  1180. *@par Restrictions:
  1181. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1182. */
  1183. REG_OP(UpdateTensorDesc)
  1184. .INPUT(x, TensorType({DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32, DT_UINT32, DT_UINT8,
  1185. DT_INT64, DT_UINT64, DT_INT16, DT_UINT16, DT_DOUBLE}))
  1186. .OUTPUT(y, TensorType({DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32, DT_UINT32, DT_UINT8,
  1187. DT_INT64, DT_UINT64, DT_INT16, DT_UINT16, DT_DOUBLE}))
  1188. .REQUIRED_ATTR(shape, ListInt)
  1189. .OP_END_FACTORY_REG(UpdateTensorDesc)
  1190. /**
  1191. *@brief Queue data for other operators. \n
  1192. *@par Attributes:
  1193. *index: Index of the input tensor.The data type must be int32 or int64.
  1194. Assume that net has three data nodes, one should be set 0, another should
  1195. be set 1, and the left should be set 2. \n
  1196. *queue_name: queue name
  1197. *output_types: types of outputs data
  1198. *output_shapes: shapes of outputs data
  1199. *@par Outputs:
  1200. *y: A DT_UINT8 tensor. \n
  1201. */
  1202. REG_OP(QueueData)
  1203. .OUTPUT(y, TensorType({DT_UINT8}))
  1204. .ATTR(index, Int, 0)
  1205. .ATTR(queue_name, String, "")
  1206. .ATTR(output_types, ListType, {})
  1207. .ATTR(output_shapes, ListListInt, {{}, {}})
  1208. .OP_END_FACTORY_REG(QueueData)
  1209. /**
  1210. * @brief Ensures that the tensor's shape matches the expected shape. \n
  1211. * @par Inputs:
  1212. * input: A Tensor. that need to be checked with desired shape
  1213. * Must be one of the following types:
  1214. * int8, uint8, int16, uint16, int32, int64, float16, float
  1215. * double, complex64 complex128 \n
  1216. * @par Attributes:
  1217. * shape: required, a desired tensor shape. type: list int \n
  1218. * @par Outputs:
  1219. * output: A tensor. has the same type and contents as input
  1220. * Must be one of the following types:
  1221. * int8, uint8, int16, uint16, int32, int64, float16, float
  1222. * double, complex64 complex128 \n
  1223. */
  1224. REG_OP(EnsureShape)
  1225. .INPUT(input, TensorType({DT_INT8,DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, DT_INT64, DT_FLOAT16, \
  1226. DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  1227. .OUTPUT(output, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, DT_INT64, DT_FLOAT16, \
  1228. DT_FLOAT,DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  1229. .REQUIRED_ATTR(shape, ListInt)
  1230. .OP_END_FACTORY_REG(EnsureShape)
  1231. /**
  1232. * @brief Finds the first unique element from every consecutive group of equivalent elements.
  1233. * @par Inputs:
  1234. * x: A ND tensor.
  1235. * @par Attributes:
  1236. * @li return_idx: An optional bool. Whether to also return the indices. The default value is False
  1237. * @li return_count: An optional bool. Whether to also return the counts for each element. The default is False.
  1238. * @li axis: An optional int. Which one axis to apply unique. The default is 1000, which means None.
  1239. * @par Outputs:
  1240. * @li y: "x" in the unique output "y".
  1241. * @li idx: The index of each value of "x".
  1242. * @li count: The counts of each value of "y".
  1243. * @attention Constraints:
  1244. * UniqueConsecutive runs on the Ascend AI CPU, which delivers poor performance.
  1245. * @par Third-party framework compatibility
  1246. * Compatible with the PyTorch operator UniqueConsecutive.
  1247. */
  1248. REG_OP(UniqueConsecutive)
  1249. .INPUT(x, TensorType::BasicType())
  1250. .OUTPUT(y, TensorType::BasicType())
  1251. .OUTPUT(idx, TensorType::IndexNumberType())
  1252. .OUTPUT(count, TensorType::IndexNumberType())
  1253. .ATTR(return_idx, Bool, false)
  1254. .ATTR(return_counts, Bool, false)
  1255. .ATTR(axis, Int, 1000)
  1256. .OP_END_FACTORY_REG(UniqueConsecutive)
  1257. /**
  1258. * @brief Decodes a variant Tensor into a RaggedTensor. \n
  1259. *
  1260. * @par Input:
  1261. * @li encoded_ragged: A Tensor of type variant. A variant Tensor containing encoded RaggedTensors. \n
  1262. *
  1263. * @par Outputs:
  1264. * @li output_nested_splits: A list of output_ragged_rank Tensor objects with type int32 or int64.
  1265. * @li output_dense_values: A Tensor, which must be one of the following types:
  1266. * double, float32, float16, int8, uint8, int16, uint16, int32, uint32, int64, uint64, bool. \n
  1267. *
  1268. * @par Attributes:
  1269. * @li input_ragged_rank: An int that is >= -1. The ragged rank of each encoded RaggedTensor component in the input.
  1270. * If set to -1, this is inferred as output_n - rank(encoded_ragged).
  1271. * @li output_ragged_rank: An int that is >= 0. The expected ragged rank of the output RaggedTensor.
  1272. * The following must hold: output_n = rank(encoded_ragged) + input_n.
  1273. * @li Tvalues: The data type of output_dense_values.
  1274. * @li Tsplits: The data type of output_nested_splits. An optional DType of "int32, int64". Defaults to `int64`. \n
  1275. *
  1276. * @par Third-party framework compatibility.
  1277. * Compatible with tensorflow RaggedTensorFromVariant operator.
  1278. */
  1279. REG_OP(RaggedTensorFromVariant)
  1280. .INPUT(encoded_ragged, TensorType({DT_VARIANT}))
  1281. .DYNAMIC_OUTPUT(output_nested_splits, TensorType({DT_INT32, DT_INT64}))
  1282. .OUTPUT(output_dense_values, TensorType::BasicType())
  1283. .REQUIRED_ATTR(input_ragged_rank, Int)
  1284. .REQUIRED_ATTR(output_ragged_rank, Int)
  1285. .REQUIRED_ATTR(Tvalues, Type)
  1286. .ATTR(Tsplits, Type, DT_INT64)
  1287. .OP_END_FACTORY_REG(RaggedTensorFromVariant)
  1288. } // namespace ge
  1289. #endif // OPS_BUILT_IN_OP_PROTO_INC_ARRAY_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示