You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

data_flow_ops.h 98 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
3 years ago
1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file data_flow_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_DATA_FLOW_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_DATA_FLOW_OPS_H_
  22. #include <algorithm>
  23. #include "graph/operator_reg.h"
  24. #include "graph/operator.h"
  25. namespace ge {
  26. /**
  27. *@brief This operation returns true if the queue is closed and false if
  28. the queue is open. \n
  29. *@par Inputs:
  30. *The input handle must have the resource type. Inputs include:
  31. *handle:A Tensor of type resource. The handle to a queue. \n
  32. *@par Outputs:
  33. *is_closed:A Tensor of type bool. \n
  34. *@par Third-party framework compatibility
  35. *Compatible with tensorflow QueueIsClosed operator.
  36. */
  37. REG_OP(QueueIsClosed)
  38. .INPUT(handle, TensorType({DT_RESOURCE}))
  39. .OUTPUT(is_closed, TensorType({DT_BOOL}))
  40. .OP_END_FACTORY_REG(QueueIsClosed)
  41. /**
  42. *@brief Computes the number of elements in the given queue. \n
  43. *@par Inputs:
  44. *The input handle must have the resource type. Inputs include:
  45. *handle:A Tensor of type mutable resource. The handle to a queue. \n
  46. *@par Outputs:
  47. *size:A Tensor of type int32. \n
  48. *@par Third-party framework compatibility
  49. *Compatible with tensorflow QueueSize operator.
  50. */
  51. REG_OP(QueueSize)
  52. .INPUT(handle, TensorType({DT_RESOURCE}))
  53. .OUTPUT(size, TensorType({DT_INT32}))
  54. .OP_END_FACTORY_REG(QueueSize)
  55. /**
  56. *@brief A queue that produces elements in first-in first-out order. \n
  57. *@par Attributes:
  58. *@li component_types: A list of DType objects. The length of component_types
  59. must equal the number of tensors in each queue element.
  60. *@li shapes:(Optional.) A list of fully-defined TensorShape objects with the
  61. same length as dtypes, or None.
  62. *@li capacity:An integer. The upper bound on the number of elements that may
  63. be stored in this queue.
  64. *@li container: An optional string. Defaults to "". If non-empty, this queue
  65. is placed in the given container. Otherwise, a default container is used.
  66. *@li shared_name:(Optional.) If non-empty, this queue will be shared under
  67. the given name across multiple sessions. \n
  68. *@par Outputs:
  69. *handle:A Tensor of type mutable resource. The handle to a queue. \n
  70. *@par Third-party framework compatibility
  71. *Compatible with tensorflow FIFOQueue operator.
  72. */
  73. REG_OP(FIFOQueue)
  74. .OUTPUT(handle, TensorType({DT_RESOURCE}))
  75. .REQUIRED_ATTR(component_types, ListType)
  76. .ATTR(shapes, ListListInt, {})
  77. .ATTR(capacity, Int, -1)
  78. .ATTR(container, String, "")
  79. .ATTR(shared_name, String, "")
  80. .OP_END_FACTORY_REG(FIFOQueue)
  81. /**
  82. *@brief Enqueues a tuple of one or more tensors in the given queue. \n
  83. *@par Inputs:
  84. *The input handle must have the resource type. Inputs include:
  85. *@li handle:A Tensor of type mutable resource. The handle to a queue.
  86. *@li components: A list of Tensor objects. One or more tensors from which
  87. the enqueued tensors should be taken. It's a dynamic input. \n
  88. *@par Attributes:
  89. *timeout_ms: An optional int. Defaults to -1. If the queue is full, this
  90. operation will block for up to timeout_ms milliseconds. Note: This option
  91. is not supported yet. \n
  92. *@par Third-party framework compatibility
  93. *Compatible with tensorflow QueueEnqueue operator.
  94. */
  95. REG_OP(QueueEnqueue)
  96. .INPUT(handle, TensorType({DT_RESOURCE}))
  97. .DYNAMIC_INPUT(components, TensorType({DT_FLOAT, DT_FLOAT16, \
  98. DT_INT8, DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, \
  99. DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE, DT_RESOURCE, \
  100. DT_STRING, DT_COMPLEX64, DT_COMPLEX128, DT_QINT16, DT_QUINT16, \
  101. DT_QINT8, DT_QUINT8, DT_QINT32}))
  102. .ATTR(timeout_ms, Int, -1)
  103. .OP_END_FACTORY_REG(QueueEnqueue)
  104. /**
  105. *@brief Enqueues zero or more tuples of one or more tensors in the given queue. \n
  106. *@par Inputs:
  107. *The input handle must have the resource type. Inputs include:
  108. *@li handle:A Tensor of type mutable resource. The handle to a queue.
  109. *@li components: A list of Tensor objects. One or more tensors from which
  110. the enqueued tensors should be taken. It's a dynamic input. \n
  111. *@par Attributes:
  112. *timeout_ms: An optional int. Defaults to -1. If the queue is full, this
  113. operation will block for up to timeout_ms milliseconds. Note: This option
  114. is not supported yet. \n
  115. *@par Third-party framework compatibility
  116. *Compatible with tensorflow QueueEnqueueMany operator.
  117. */
  118. REG_OP(QueueEnqueueMany)
  119. .INPUT(handle, TensorType({DT_RESOURCE}))
  120. .DYNAMIC_INPUT(components, TensorType({DT_FLOAT, DT_FLOAT16, \
  121. DT_INT8, DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, \
  122. DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE, DT_RESOURCE, \
  123. DT_STRING, DT_COMPLEX64, DT_COMPLEX128, DT_QINT16, DT_QUINT16, \
  124. DT_QINT8, DT_QUINT8, DT_QINT32}))
  125. .ATTR(timeout_ms, Int, -1)
  126. .OP_END_FACTORY_REG(QueueEnqueueMany)
  127. /**
  128. *@brief Dequeues n tuples of one or more tensors from the given queue. \n
  129. *@par Inputs:
  130. *The input handle must have the resource type. Inputs include:
  131. *handle:A Tensor of type mutable resource. The handle to a queue. \n
  132. *@par Attributes:
  133. *@li timeout_ms: An optional int. Defaults to -1. If the queue is empty, this
  134. operation will block for up to timeout_ms milliseconds. Note: This option is
  135. not supported yet.
  136. *@li component_types: A list of DTypes that has length >= 1. The type of each
  137. component in a tuple. \n
  138. *@par Outputs:
  139. *components:A list of Tensor objects of type component_types. \n
  140. *@par Third-party framework compatibility
  141. *Compatible with tensorflow QueueDequeue operator.
  142. */
  143. REG_OP(QueueDequeue)
  144. .INPUT(handle, TensorType({DT_RESOURCE}))
  145. .DYNAMIC_OUTPUT(components, TensorType({DT_FLOAT, DT_FLOAT16, \
  146. DT_INT8, DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, \
  147. DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE, DT_RESOURCE, \
  148. DT_STRING, DT_COMPLEX64, DT_COMPLEX128, DT_QINT16, DT_QUINT16, \
  149. DT_QINT8, DT_QUINT8, DT_QINT32}))
  150. .ATTR(timeout_ms, Int, -1)
  151. .REQUIRED_ATTR(component_types, ListType)
  152. .OP_END_FACTORY_REG(QueueDequeue)
  153. /**
  154. *@brief Dequeues n tuples of one or more tensors from the given queue. \n
  155. *@par Inputs:
  156. *The input handle must have the resource type. Inputs include:
  157. *@li handle:A Tensor of type mutable resource. The handle to a queue.
  158. *@li n: A Tensor of type int32. The number of tuples to dequeue. \n
  159. *@par Attributes:
  160. *@li timeout_ms: An optional int. Defaults to -1. If the queue has fewer than
  161. n elements, this operation will block for up to timeout_ms milliseconds.
  162. Note: This option is not supported yet.
  163. *@li component_types: A list of DTypes that has length >= 1. The type of each
  164. component in a tuple. \n
  165. *@par Outputs:
  166. *components:A list of Tensor objects of type component_types. \n
  167. *@par Third-party framework compatibility
  168. *Compatible with tensorflow QueueDequeueMany operator.
  169. */
  170. REG_OP(QueueDequeueMany)
  171. .INPUT(handle, TensorType({DT_RESOURCE}))
  172. .INPUT(n, TensorType({DT_INT32}))
  173. .DYNAMIC_OUTPUT(components, TensorType({DT_FLOAT, DT_FLOAT16, \
  174. DT_INT8, DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, \
  175. DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE, DT_RESOURCE, \
  176. DT_STRING, DT_COMPLEX64, DT_COMPLEX128, DT_QINT16, DT_QUINT16, \
  177. DT_QINT8, DT_QUINT8, DT_QINT32}))
  178. .ATTR(timeout_ms, Int, -1)
  179. .REQUIRED_ATTR(component_types, ListType)
  180. .OP_END_FACTORY_REG(QueueDequeueMany)
  181. /**
  182. *@brief Dequeues n tuples of one or more tensors from the given queue. \n
  183. *@par Inputs:
  184. *The input handle must have the resource type. Inputs include:
  185. *@li handle:A Tensor of type mutable resource. The handle to a queue.
  186. *@li n: A Tensor of type int32. The number of tuples to dequeue. \n
  187. *@par Attributes:
  188. *@li timeout_ms: An optional int. Defaults to -1. If the queue has fewer than
  189. n elements, this operation will block for up to timeout_ms milliseconds.
  190. Note: This option is not supported yet.
  191. *@li component_types: A list of DTypes that has length >= 1. The type of each
  192. component in a tuple. \n
  193. *@par Outputs:
  194. *components:A list of Tensor objects of type component_types. \n
  195. *@par Third-party framework compatibility
  196. *Compatible with tensorflow QueueDequeueUpTo operator.
  197. */
  198. REG_OP(QueueDequeueUpTo)
  199. .INPUT(handle, TensorType({DT_RESOURCE}))
  200. .INPUT(n, TensorType({DT_INT32}))
  201. .DYNAMIC_OUTPUT(components, TensorType({DT_FLOAT, DT_FLOAT16, \
  202. DT_INT8, DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, \
  203. DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE, DT_RESOURCE, \
  204. DT_STRING, DT_COMPLEX64, DT_COMPLEX128, DT_QINT16, DT_QUINT16, \
  205. DT_QINT8, DT_QUINT8, DT_QINT32}))
  206. .ATTR(timeout_ms, Int, -1)
  207. .REQUIRED_ATTR(component_types, ListType)
  208. .OP_END_FACTORY_REG(QueueDequeueUpTo)
  209. /**
  210. *@brief Stage values similar to a lightweight Enqueue. \n
  211. *@par Inputs:
  212. *The input values must be a list of Tensor objects. Inputs include:
  213. *values: A list of Tensor objects. A list of data types that inserted values
  214. should adhere to. It's a dynamic input. \n
  215. *@par Attributes:
  216. *@li capacity: An optional int that is >= 0. Defaults to 0. Maximum number of
  217. elements in the Staging Area. If > 0, inserts on the container will block
  218. when the capacity is reached.
  219. *@li memory_limit: An optional int that is >= 0. Defaults to 0. The maximum
  220. number of bytes allowed for Tensors in the Staging Area. If > 0, inserts will
  221. block until sufficient space is available.
  222. *@li container: An optional string. Defaults to "". If non-empty, this queue
  223. is placed in the given container. Otherwise, a default container is used.
  224. *@li shared_name: An optional string. Defaults to "". It is necessary to
  225. match this name to the matching Unstage Op. \n
  226. *@see Unstage
  227. *@par Third-party framework compatibility
  228. *Compatible with tensorflow Stage operator.
  229. */
  230. REG_OP(Stage)
  231. .DYNAMIC_INPUT(values, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, \
  232. DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, \
  233. DT_DOUBLE, DT_UINT32, DT_UINT64}))
  234. .ATTR(capacity, Int, 0)
  235. .ATTR(memory_limit, Int, 0)
  236. .ATTR(container, String, "")
  237. .ATTR(shared_name, String, "")
  238. .OP_END_FACTORY_REG(Stage)
  239. /**
  240. *@brief Op removes all elements in the underlying container. \n
  241. *@par Attributes:
  242. *@li capacity: A list of DTypes
  243. *@li memory_limit: An optional int that is >= 0. Defaults to 0.
  244. *@li container: An optional string. Defaults to "".
  245. *@li shared_name: An optional string. Defaults to "".
  246. *@li dtypes: A list of DTypes. \n
  247. *@see Stage
  248. *@par Third-party framework compatibility
  249. *Compatible with tensorflow StageClear operator.
  250. */
  251. REG_OP(StageClear)
  252. .ATTR(capacity, Int, 0)
  253. .ATTR(memory_limit, Int, 0)
  254. .ATTR(container, String, "")
  255. .ATTR(shared_name, String, "")
  256. .ATTR(dtypes, ListType, {})
  257. .OP_END_FACTORY_REG(StageClear)
  258. /**
  259. *@brief Op peeks at the values at the specified index. If the underlying
  260. container does not contain sufficient elements this op will block until it does. \n
  261. *@par Inputs:
  262. *The input values must be type int32. Inputs include:
  263. *values: A Tensor of type int32. \n
  264. *@par Attributes:
  265. *@li capacity: An optional int that is >= 0. Defaults to 0.
  266. *@li memory_limit: An optional int that is >= 0. Defaults to 0.
  267. *@li container: An optional string. Defaults to "".
  268. *@li shared_name: An optional string. Defaults to "".
  269. *@li dtypes: A list of DTypes that has length >= 1. \n
  270. *@par Outputs:
  271. *y:A list of Tensor objects of type dtypes. \n
  272. *@par Third-party framework compatibility
  273. *Compatible with tensorflow StagePeek operator.
  274. */
  275. REG_OP(StagePeek)
  276. .INPUT(index, TensorType({DT_INT32}))
  277. .DYNAMIC_OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT16, \
  278. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, \
  279. DT_DOUBLE, DT_UINT32, DT_UINT64}))
  280. .ATTR(capacity, Int, 0)
  281. .ATTR(memory_limit, Int, 0)
  282. .ATTR(container, String, "")
  283. .ATTR(shared_name, String, "")
  284. .ATTR(dtypes, ListType, {})
  285. .OP_END_FACTORY_REG(StagePeek)
  286. /**
  287. *@brief Op returns the number of elements in the underlying container. \n
  288. *@par Attributes:
  289. *@li capacity: An optional int that is >= 0. Defaults to 0.
  290. *@li memory_limit: An optional int that is >= 0. Defaults to 0.
  291. *@li container: An optional string. Defaults to "".
  292. *@li shared_name: An optional string. Defaults to "".
  293. *@li dtypes: A list of DTypes that has length >= 1. \n
  294. *@par Outputs:
  295. *size:A Tensor of type int32. \n
  296. *@par Third-party framework compatibility
  297. *Compatible with tensorflow StageSize operator.
  298. */
  299. REG_OP(StageSize)
  300. .OUTPUT(size, TensorType({DT_INT32}))
  301. .ATTR(capacity, Int, 0)
  302. .ATTR(memory_limit, Int, 0)
  303. .ATTR(container, String, "")
  304. .ATTR(shared_name, String, "")
  305. .ATTR(dtypes, ListType, {})
  306. .OP_END_FACTORY_REG(StageSize)
  307. /**
  308. *@brief Pop the element at the top of the stack. \n
  309. *@par Inputs:
  310. *The input handle must be type resource. Inputs include:
  311. *handle: A Tensor of type resource. The handle to a stack. \n
  312. *@par Attributes:
  313. *elem_type: A DType. The type of the elem that is popped. \n
  314. *@par Outputs:
  315. *element:A Tensor of type elem_type. \n
  316. *@par Third-party framework compatibility
  317. *Compatible with tensorflow StackPop operator.
  318. */
  319. REG_OP(StackPop)
  320. .INPUT(handle, TensorType({DT_RESOURCE}))
  321. .OUTPUT(element, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT16, \
  322. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, \
  323. DT_DOUBLE, DT_UINT32, DT_UINT64}))
  324. .REQUIRED_ATTR(elem_type, Type)
  325. .OP_END_FACTORY_REG(StackPop)
  326. /**
  327. *@brief Push an element onto the stack. \n
  328. *@par Inputs:
  329. *The input handle must be type resource. Inputs include:
  330. *@li handle: A Tensor of type resource. The handle to a stack.
  331. *@li elem: A Tensor. The tensor to be pushed onto the stack. \n
  332. *@par Attributes:
  333. *swap_memory: An optional bool. Defaults to False. Swap elem to CPU. Default
  334. to false. \n
  335. *@par Outputs:
  336. *y:A Tensor. Has the same type as elem. \n
  337. *@par Third-party framework compatibility
  338. *Compatible with tensorflow StackPush operator.
  339. */
  340. REG_OP(StackPush)
  341. .INPUT(handle, TensorType({DT_RESOURCE}))
  342. .INPUT(element, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT16, \
  343. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, \
  344. DT_DOUBLE, DT_UINT32, DT_UINT64}))
  345. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT16, \
  346. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, \
  347. DT_DOUBLE, DT_UINT32, DT_UINT64}))
  348. .ATTR(swap_memory, Bool, false)
  349. .OP_END_FACTORY_REG(StackPush)
  350. /**
  351. *@brief Close the stack. \n
  352. *@par Inputs:
  353. *The input handle must be type resource. Inputs include:
  354. *handle: A Tensor of type resource. The handle to a stack. \n
  355. *@par Third-party framework compatibility
  356. *Compatible with tensorflow StackClose operator.
  357. */
  358. REG_OP(StackClose)
  359. .INPUT(handle, TensorType({DT_RESOURCE}))
  360. .OP_END_FACTORY_REG(StackClose)
  361. /**
  362. *@brief Create a stack. \n
  363. *@par Inputs:
  364. *The input max_size must be type int32. Inputs include:
  365. *max_size: A Tensor of type int32. The number of elements of a stack. \n
  366. *@par Attributes:
  367. *@li stack_name: An optional string. Defaults to "".
  368. *@li elem_type: The elements type of the created Stack. \n
  369. *@par Outputs:
  370. *handle: A Tensor of type resource. The handle to a stack. \n
  371. *@par Third-party framework compatibility
  372. *Compatible with tensorflow Stack operator.
  373. */
  374. REG_OP(Stack)
  375. .INPUT(max_size, TensorType({DT_INT32}))
  376. .OUTPUT(handle, TensorType({DT_RESOURCE}))
  377. .ATTR(stack_name, String, "")
  378. .REQUIRED_ATTR(elem_type, Type)
  379. .OP_END_FACTORY_REG(Stack)
  380. /**
  381. *@brief Partitions "x" into "num_partitions" tensors using indices from "partitions". \n
  382. *@par Inputs:
  383. *Including:
  384. * @li x: The Tensor to be sliced. Must be one of the following types:
  385. DT_INT8, DT_UINT8, DT_INT16, DT_UINT16,
  386. DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  387. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING.
  388. * @li partitions: A Tensor of type DT_INT32, with any shape. The indices. \n
  389. *@par Attributes:
  390. *num_partitions: The number of partitions to output. \n
  391. *@par Outputs:
  392. *y: A list of tensors of type DT_INT32. \n
  393. *@attention Constraints:
  394. *DynamicPartition runs on the Ascend AI CPU, which delivers poor performance.
  395. *@par Third-party framework compatibility
  396. *Compatible with the TensorFlow operator DynamicPartition. \n
  397. *@par Restrictions:
  398. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  399. */
  400. REG_OP(DynamicPartition)
  401. .INPUT(x, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  402. DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  403. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  404. .INPUT(partitions, TensorType({DT_INT32}))
  405. .DYNAMIC_OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  406. DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  407. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  408. .ATTR(num_partitions, Int, 1)
  409. .OP_END_FACTORY_REG(DynamicPartition)
  410. /**
  411. *@brief Interleaves the values from the "x" tensors into a single tensor. \n
  412. *@par Inputs:
  413. *Including:
  414. * @li indices: A list of at least 1 Tensor objects with type DT_INT32. It's a dynamic input.
  415. * @li x: A list with the same length as "indices" of Tensor objects.
  416. Must be one of the following types: DT_INT8, DT_UINT8, DT_INT16, DT_UINT16,
  417. DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_QINT32,
  418. DT_QUINT8, DT_QINT8, DT_STRING, DT_COMPLEX64, DT_COMPLEX128. It's a dynamic input. \n
  419. *@par Attributes:
  420. *N: An int that is >= 1. Defaults to "1". \n
  421. *@par Outputs:
  422. *y: A Tensor. Has the same type as "x". \n
  423. *@attention Constraints:
  424. *DynamicStitch runs on the Ascend AI CPU, which delivers poor performance.
  425. *@par Third-party framework compatibility
  426. *Compatible with the TensorFlow operator DynamicStitch. \n
  427. *@par Restrictions:
  428. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  429. */
  430. REG_OP(DynamicStitch)
  431. .DYNAMIC_INPUT(indices, TensorType({DT_INT32}))
  432. .DYNAMIC_INPUT(x, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  433. DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  434. DT_QINT32, DT_QUINT8, DT_QINT8, DT_STRING, DT_COMPLEX64, \
  435. DT_COMPLEX128}))
  436. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  437. DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  438. DT_QINT32, DT_QUINT8, DT_QINT8, DT_STRING, DT_COMPLEX64, \
  439. DT_COMPLEX128}))
  440. .ATTR(N, Int, 1)
  441. .OP_END_FACTORY_REG(DynamicStitch)
  442. /**
  443. *@brief Interleaves the values from the "x" tensors into a single tensor. \n
  444. *@par Inputs:
  445. *Including:
  446. * @li indices: A list of at least 1 Tensor objects with type DT_INT32. It's a dynamic input.
  447. * @li x: A list with the same length as "indices" of Tensor objects. It's a dynamic input.
  448. Must be one of the following types: DT_INT8, DT_UINT8, DT_INT16, DT_UINT16,
  449. DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_STRING,
  450. DT_COMPLEX64, DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT32. \n
  451. *@par Attributes:
  452. *N: An int that is >= 1. Defaults to "1". \n
  453. *@par Outputs:
  454. *y: A Tensor. Has the same type as "x". \n
  455. *@attention Constraints:
  456. *ParallelDynamicStitch runs on the Ascend AI CPU, which delivers poor performance.
  457. *@par Third-party framework compatibility
  458. *Compatible with the TensorFlow operator ParallelDynamicStitch.
  459. */
  460. REG_OP(ParallelDynamicStitch)
  461. .DYNAMIC_INPUT(indices, TensorType({DT_INT32}))
  462. .DYNAMIC_INPUT(x,
  463. TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, DT_INT64, \
  464. DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_STRING, DT_COMPLEX64, DT_COMPLEX128, \
  465. DT_QINT8, DT_QUINT8, DT_QINT32 }))
  466. .OUTPUT(y,
  467. TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, DT_INT64, \
  468. DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_STRING, DT_COMPLEX64, DT_COMPLEX128, \
  469. DT_QINT8, DT_QUINT8, DT_QINT32 }))
  470. .ATTR(N, Int, 1)
  471. .OP_END_FACTORY_REG(ParallelDynamicStitch)
  472. /**
  473. *@brief Removes all elements in the underlying container. \n
  474. *@par Attributes:An optional int that is >= 0. Defaults to "0".
  475. *@li capacity: An optional int that is >= 0. Defaults to "0".
  476. *@li memory_limit: An optional int that is >= 0. Defaults to "0".
  477. *@li dtypes: A list of tf.DTypes.
  478. *@li container: An optional string. Defaults to "".
  479. *@li shared_name: An optional string. Defaults to "". \n
  480. *@attention Constraints:
  481. *MapClear runs on the Ascend AI CPU, which delivers poor performance.
  482. *@par Third-party framework compatibility
  483. *Compatible with the TensorFlow operator MapClear.
  484. */
  485. REG_OP(MapClear)
  486. .ATTR(capacity, Int, 0)
  487. .ATTR(memory_limit, Int, 0)
  488. .ATTR(dtypes, ListType, {})
  489. .ATTR(container, String, "")
  490. .ATTR(shared_name, String, "")
  491. .OP_END_FACTORY_REG(MapClear)
  492. /**
  493. *@brief Returns the number of incomplete elements in the underlying container. \n
  494. *@par Attributes:
  495. *@li capacity: An optional int that is >= 0. Defaults to "0".
  496. *@li memory_limit: An optional int that is >= 0. Defaults to "0".
  497. *@li dtypes: A list of tf.DTypes.
  498. *@li container: An optional string. Defaults to "".
  499. *@li shared_name: An optional string. Defaults to "". \n
  500. *@par Outputs:
  501. *size: A Tensor of type DT_INT32. \n
  502. *@attention Constraints:
  503. *MapIncompleteSize runs on the Ascend AI CPU, which delivers poor performance.
  504. *@par Third-party framework compatibility
  505. *Compatible with the TensorFlow operator MapIncompleteSize.
  506. */
  507. REG_OP(MapIncompleteSize)
  508. .OUTPUT(size, TensorType({DT_INT32}))
  509. .ATTR(capacity, Int, 0)
  510. .ATTR(memory_limit, Int, 0)
  511. .ATTR(dtypes, ListType, {})
  512. .ATTR(container, String, "")
  513. .ATTR(shared_name, String, "")
  514. .OP_END_FACTORY_REG(MapIncompleteSize)
  515. /**
  516. *@brief Unstage Op is similar to a lightweight Dequeue. \n
  517. *@par Attributes:
  518. *@li capacity: An optional int that is >= 0. Defaults to 0.
  519. *@li memory_limit: An optional int that is >= 0. Defaults to 0.
  520. *@li container: An optional string. Defaults to "".
  521. *@li shared_name: An optional string. Defaults to "".
  522. *@li dtypes: A list of DTypes that has length >= 1. \n
  523. *@par Outputs:
  524. *y: A list of Tensor objects of type dtypes. \n
  525. *@par Third-party framework compatibility
  526. *Compatible with tensorflow Unstage operator.
  527. */
  528. REG_OP(Unstage)
  529. .DYNAMIC_OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT16, \
  530. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, \
  531. DT_DOUBLE, DT_UINT32, DT_UINT64}))
  532. .ATTR(capacity, Int, 0)
  533. .ATTR(memory_limit, Int, 0)
  534. .ATTR(container, String, "")
  535. .ATTR(shared_name, String, "")
  536. .REQUIRED_ATTR(dtypes, ListType)
  537. .OP_END_FACTORY_REG(Unstage)
  538. /**
  539. *@brief Stage (key, values) in the underlying container which behaves like a hashtable. \n
  540. *@par Inputs:
  541. *Including:
  542. * @li key: A Tensor of type DT_INT64.
  543. * @li indices: A Tensor of type DT_INT32.
  544. * @li values: A list of Tensor objects for tensor dtypes.
  545. A list of data types that inserted values should adhere to of.
  546. Must be one of the following types: DT_FLOAT, DT_FLOAT16,
  547. DT_INT8, DT_INT16, DT_UINT16, DT_UINT8, DT_INT32,
  548. DT_INT64, DT_BOOL, DT_DOUBLE, DT_UINT32, DT_UINT64,
  549. DT_RESOURCE, DT_STRING, DT_COMPLEX64, DT_COMPLEX128,
  550. DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32.
  551. It's a dynamic input. \n
  552. *@par Attributes:
  553. *@li capacity: An optional int that is >= 0. Defaults to "0".
  554. Maximum number of elements in the Staging Area. If > 0,
  555. inserts on the container will block when the capacity is reached.
  556. *@li memory_limit: An optional int that is >= 0. Defaults to "0".
  557. *@li dtypes: A list of tf.DTypes.
  558. *@li container: An optional string. Defaults to "".
  559. If non-empty, this queue is placed in the given container.
  560. Otherwise, a default container is used.
  561. *@li shared_name: An optional string. Defaults to "".
  562. It is necessary to match this name to the matching Unstage Op. \n
  563. *@attention Constraints:
  564. *MapStage runs on the Ascend AI CPU, which delivers poor performance.
  565. *@par Third-party framework compatibility
  566. *Compatible with the TensorFlow operator MapStage.
  567. */
  568. REG_OP(MapStage)
  569. .INPUT(key, TensorType({DT_INT64}))
  570. .INPUT(indices, TensorType({DT_INT32}))
  571. .DYNAMIC_INPUT(values,
  572. TensorType({ DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  573. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, DT_UINT32, \
  574. DT_UINT64, DT_RESOURCE, DT_STRING, DT_COMPLEX64, DT_COMPLEX128, \
  575. DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32 }))
  576. .ATTR(capacity, Int, 0)
  577. .ATTR(memory_limit, Int, 0)
  578. .ATTR(dtypes, ListType, {})
  579. .ATTR(container, String, "")
  580. .ATTR(shared_name, String, "")
  581. .OP_END_FACTORY_REG(MapStage)
  582. /**
  583. *@brief Removes and returns the values associated with the key. \n
  584. *@par Inputs:
  585. *Including:
  586. * @li key: A Tensor of type DT_INT64.
  587. * @li indices: A Tensor of type DT_INT32. \n
  588. *@par Attributes:
  589. *@li capacity: An optional int that is >= 0. Defaults to "0".
  590. *@li memory_limit: An optional int that is >= 0. Defaults to "0".
  591. *@li dtypes: A list of DTypes that has length >= 1.
  592. *@li container: An optional string. Defaults to "".
  593. *@li shared_name: An optional string. Defaults to "". \n
  594. *@par Outputs:
  595. *values: A list of Tensor objects. Must be one of the following types:
  596. DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8, DT_INT32,
  597. DT_INT64, DT_BOOL, DT_DOUBLE, DT_UINT32, DT_UINT64, DT_RESOURCE,
  598. DT_STRING, DT_COMPLEX64, DT_COMPLEX128, DT_QINT8, DT_QUINT8,
  599. DT_QINT16, DT_QUINT16, DT_QINT32. \n
  600. *@attention Constraints:
  601. *MapUnstage runs on the Ascend AI CPU, which delivers poor performance.
  602. *@par Third-party framework compatibility
  603. *Compatible with the TensorFlow operator MapUnstage.
  604. */
  605. REG_OP(MapUnstage)
  606. .INPUT(key, TensorType({DT_INT64}))
  607. .INPUT(indices, TensorType({DT_INT32}))
  608. .DYNAMIC_OUTPUT(values,
  609. TensorType({ DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  610. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, DT_UINT32, \
  611. DT_UINT64, DT_RESOURCE, DT_STRING, DT_COMPLEX64, DT_COMPLEX128, \
  612. DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32 }))
  613. .ATTR(capacity, Int, 0)
  614. .ATTR(memory_limit, Int, 0)
  615. .ATTR(dtypes, ListType, {})
  616. .ATTR(container, String, "")
  617. .ATTR(shared_name, String, "")
  618. .OP_END_FACTORY_REG(MapUnstage)
  619. /**
  620. *@brief Removes and returns a random (key, value). \n
  621. *@par Inputs:
  622. *Including:
  623. *indices: A Tensor of type DT_INT32. \n
  624. *@par Attributes:
  625. *@li capacity: An optional int that is >= 0. Defaults to "0".
  626. *@li memory_limit: An optional int that is >= 0. Defaults to "0".
  627. *@li dtypes: A list of DTypes that has length >= 1.
  628. *@li container: An optional string. Defaults to "".
  629. *@li shared_name: An optional string. Defaults to "". \n
  630. *@par Outputs:
  631. *@li key: A Tensor of type DT_INT64.
  632. *@li values: A list of Tensor objects.
  633. Must be one of the following types: DT_FLOAT, DT_FLOAT16, DT_INT8,
  634. DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE,
  635. DT_UINT32, DT_UINT64, DT_RESOURCE, DT_STRING, DT_COMPLEX64, DT_COMPLEX128,
  636. DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32. \n
  637. *@attention Constraints:
  638. *MapUnstageNoKey runs on the Ascend AI CPU, which delivers poor performance.
  639. *@par Third-party framework compatibility
  640. *Compatible with the TensorFlow operator MapUnstageNoKey.
  641. */
  642. REG_OP(MapUnstageNoKey)
  643. .INPUT(indices, TensorType({DT_INT32}))
  644. .OUTPUT(key, TensorType({DT_INT64}))
  645. .DYNAMIC_OUTPUT(values,
  646. TensorType({ DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  647. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, DT_UINT32, \
  648. DT_UINT64, DT_RESOURCE, DT_STRING, DT_COMPLEX64, DT_COMPLEX128, \
  649. DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32 }))
  650. .ATTR(capacity, Int, 0)
  651. .ATTR(memory_limit, Int, 0)
  652. .ATTR(dtypes, ListType, {})
  653. .ATTR(container, String, "")
  654. .ATTR(shared_name, String, "")
  655. .OP_END_FACTORY_REG(MapUnstageNoKey)
  656. /**
  657. *@brief Peeks at the values at the specified key. \n
  658. *@par Inputs:
  659. *Including:
  660. * @li key: A Tensor of type DT_INT64.
  661. * @li indices: A Tensor of type DT_INT32. \n
  662. *@par Attributes:
  663. *@li capacity: An optional int that is >= 0. Defaults to "0".
  664. *@li memory_limit: An optional int that is >= 0. Defaults to "0".
  665. *@li dtypes: A list of tf.DTypes that has length >= 1.
  666. *@li container: An optional string. Defaults to "".
  667. *@li shared_name: An optional string. Defaults to "". \n
  668. *@par Outputs:
  669. *values: A list of Tensor objects of type "dtypes".
  670. Must be one of the following types: DT_FLOAT, DT_FLOAT16, DT_INT8,
  671. DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL,
  672. DT_DOUBLE, DT_UINT32, DT_UINT64, DT_RESOURCE, DT_STRING, DT_COMPLEX64,
  673. DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32. \n
  674. *@attention Constraints:
  675. *MapPeek runs on the Ascend AI CPU, which delivers poor performance.
  676. *@par Third-party framework compatibility
  677. *Compatible with the TensorFlow operator MapPeek.
  678. */
  679. REG_OP(MapPeek)
  680. .INPUT(key, TensorType({DT_INT64}))
  681. .INPUT(indices, TensorType({DT_INT32}))
  682. .DYNAMIC_OUTPUT(values,
  683. TensorType({ DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  684. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, DT_UINT32, \
  685. DT_UINT64, DT_RESOURCE, DT_STRING, DT_COMPLEX64, DT_COMPLEX128, \
  686. DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32 }))
  687. .ATTR(capacity, Int, 0)
  688. .ATTR(memory_limit, Int, 0)
  689. .ATTR(dtypes, ListType, {})
  690. .ATTR(container, String, "")
  691. .ATTR(shared_name, String, "")
  692. .OP_END_FACTORY_REG(MapPeek)
  693. /**
  694. *@brief Returns the number of elements in the underlying container. \n
  695. *@par Attributes:
  696. *@li capacity: An optional int that is >= 0. Defaults to "0".
  697. *@li memory_limit: An optional int that is >= 0. Defaults to "0".
  698. *@li dtypes: A list of tf.DTypes.
  699. *@li container: An optional string. Defaults to "".
  700. *@li shared_name: An optional string. Defaults to "". \n
  701. *@par Outputs:
  702. *size: A Tensor of type DT_INT32. \n
  703. *@attention Constraints:
  704. *MatMul runs on the Ascend AI CPU, which delivers poor performance.
  705. *@par Third-party framework compatibility
  706. *Compatible with the TensorFlow operator MapSize.
  707. */
  708. REG_OP(MapSize)
  709. .OUTPUT(size, TensorType({DT_INT32}))
  710. .ATTR(capacity, Int, 0)
  711. .ATTR(memory_limit, Int, 0)
  712. .ATTR(dtypes, ListType, {})
  713. .ATTR(container, String, "")
  714. .ATTR(shared_name, String, "")
  715. .OP_END_FACTORY_REG(MapSize)
  716. /**
  717. *@brief Class wrapping dynamic-sized, per-time-step, write-once Tensor arrays. \n
  718. *@par Inputs:
  719. *The input size must be type int32. Inputs include:
  720. *@li size: int32 scalar Tensor: the size of the TensorArray. Required if
  721. handle is not provided. \n
  722. *@par Attributes:
  723. *@li dtype: The data type of this TensorArray.
  724. *@li element_shape: The TensorShape of elements in this TensorArray.
  725. *@li dynamic_size: A boolean that determines whether writes to the
  726. TensorArray are allowed to grow the size.
  727. *@li clear_after_read: Boolean (optional, default: True). If True, clear
  728. TensorArray values
  729. after reading them. This disables read-many semantics, but allows early
  730. release of memory.
  731. *@li identical_element_shapes: If true (default is false), then all elements
  732. in the TensorArray will be expected to have have identical shapes.
  733. *@li tensor_array_name: String: the name of the TensorArray. \n
  734. *@par Outputs:
  735. *@li handle: The handle to the TensorArray.
  736. *@li flow: A scalar used to control gradient flow. \n
  737. *@par Third-party framework compatibility
  738. *Compatible with tensorflow TensorArray operator.
  739. */
  740. REG_OP(TensorArray)
  741. .INPUT(size, TensorType({DT_INT32}))
  742. .OUTPUT(handle, TensorType({DT_RESOURCE}))
  743. .OUTPUT(flow, TensorType({DT_FLOAT}))
  744. .REQUIRED_ATTR(dtype, Type)
  745. .ATTR(element_shape, ListInt, ge::UNKNOWN_RANK)
  746. .ATTR(dynamic_size, Bool, false)
  747. .ATTR(clear_after_read, Bool, true)
  748. .ATTR(identical_element_shapes, Bool, false)
  749. .ATTR(tensor_array_name, String, "")
  750. .OP_END_FACTORY_REG(TensorArray)
  751. /**
  752. *@brief Delete the TensorArray from its resource container. \n
  753. *@par Inputs:
  754. *The input handle must be type resource. Inputs include:
  755. *handle: A Tensor of type resource. The handle to a TensorArray
  756. (output of TensorArray or TensorArrayGrad). \n
  757. *@par Third-party framework compatibility
  758. *Compatible with tensorflow TensorArrayClose operator.
  759. */
  760. REG_OP(TensorArrayClose)
  761. .INPUT(handle, TensorType({DT_RESOURCE}))
  762. .OP_END_FACTORY_REG(TensorArrayClose)
  763. /**
  764. *@brief Concat the elements from the TensorArray into value value. \n
  765. *@par Inputs:
  766. *The input handle must be type resource. Inputs include:
  767. *@li handle: The handle to a TensorArray.
  768. *@li flow_in: A float scalar that enforces proper chaining of operations. \n
  769. *@par Attributes:
  770. *@li dtype: The type of the elem that is returned.
  771. *@li element_shape_except0: The expected shape of an element, if known,
  772. excluding the first dimension. \n
  773. *@par Outputs:
  774. *@li value: All of the elements in the TensorArray, concatenated along
  775. the first axis.
  776. *@li lengths: A vector of the row sizes of the original T elements in the
  777. value output. \n
  778. *@par Third-party framework compatibility
  779. *Compatible with tensorflow TensorArrayConcat operator.
  780. */
  781. REG_OP(TensorArrayConcat)
  782. .INPUT(handle, TensorType({DT_RESOURCE}))
  783. .INPUT(flow_in, TensorType({DT_FLOAT}))
  784. .OUTPUT(value, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_INT8,
  785. DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL,
  786. DT_STRING, DT_COMPLEX64, DT_COMPLEX128, DT_QINT8,
  787. DT_QUINT8, DT_QINT32}))
  788. .OUTPUT(lengths, TensorType({DT_INT64}))
  789. .REQUIRED_ATTR(dtype, Type)
  790. .ATTR(element_shape_except0, ListInt, ge::UNKNOWN_RANK)
  791. .OP_END_FACTORY_REG(TensorArrayConcat)
  792. /**
  793. *@brief All elements selected by indices must have the same shape. \n
  794. *@par Inputs:
  795. *The input handle must be type resource. Inputs include:
  796. *@li handle: The handle to a TensorArray.
  797. *@li indices: The locations in the TensorArray from which to read tensor
  798. elements.
  799. *@li flow_in: A float scalar that enforces proper chaining of operations. \n
  800. *@par Attributes:
  801. *@li dtype: The type of the elem that is returned.
  802. *@li element_shape: The expected shape of an element, if known. Used to
  803. validate the shapes of TensorArray elements. If this shape is not fully
  804. specified, gathering zero-size TensorArrays is an error. \n
  805. *@par Outputs:
  806. *value: All of the elements in the TensorArray, concatenated along a new
  807. axis (the new dimension 0). \n
  808. *@par Third-party framework compatibility
  809. *Compatible with tensorflow TensorArrayGather operator.
  810. */
  811. REG_OP(TensorArrayGather)
  812. .INPUT(handle, TensorType({DT_RESOURCE}))
  813. .INPUT(indices, TensorType({DT_INT32}))
  814. .INPUT(flow_in, TensorType({DT_FLOAT}))
  815. .OUTPUT(value, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_INT8,
  816. DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL,
  817. DT_STRING, DT_COMPLEX64, DT_COMPLEX128, DT_QINT8,
  818. DT_QUINT8, DT_QINT32}))
  819. .REQUIRED_ATTR(dtype, Type)
  820. .ATTR(element_shape, ListInt, ge::UNKNOWN_RANK)
  821. .OP_END_FACTORY_REG(TensorArrayGather)
  822. /**
  823. *@brief Creates a TensorArray for storing the gradients of values in the
  824. given handle. \n
  825. *@par Inputs:
  826. *The input handle must be type resource. Inputs include:
  827. *@li handle: The handle to a TensorArray.
  828. *@li flow_in: A float scalar that enforces proper chaining of operations. \n
  829. *@par Attributes:
  830. *source: The gradient source string, used to decide which gradient
  831. TensorArray to return. \n
  832. *@par Outputs:
  833. *@li grad_handle: A Tensor of type resource.
  834. *@li flow_out: A Tensor of type float. \n
  835. *@par Third-party framework compatibility
  836. *Compatible with tensorflow TensorArrayGrad operator.
  837. */
  838. REG_OP(TensorArrayGrad)
  839. .INPUT(handle, TensorType({DT_RESOURCE}))
  840. .INPUT(flow_in, TensorType({DT_FLOAT}))
  841. .OUTPUT(grad_handle, TensorType({DT_RESOURCE}))
  842. .OUTPUT(flow_out, TensorType({DT_FLOAT}))
  843. .REQUIRED_ATTR(source, String)
  844. .OP_END_FACTORY_REG(TensorArrayGrad)
  845. /**
  846. *@brief Push an element onto the tensor_array. \n
  847. *@par Inputs:
  848. *The input handle must be type resource. Inputs include:
  849. *@li handle: The handle to a TensorArray.
  850. *@li index: The position to write to inside the TensorArray.
  851. *@li value: The tensor to write to the TensorArray.
  852. *@li flow_in: A float scalar that enforces proper chaining of operations. \n
  853. *@par Outputs:
  854. *flow_out: A float scalar that enforces proper chaining of operations. \n
  855. *@par Third-party framework compatibility
  856. *Compatible with tensorflow TensorArrayWrite operator.
  857. */
  858. REG_OP(TensorArrayWrite)
  859. .INPUT(handle, TensorType({DT_RESOURCE}))
  860. .INPUT(index, TensorType({DT_INT32}))
  861. .INPUT(value, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_INT8,
  862. DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL,
  863. DT_STRING, DT_COMPLEX64, DT_COMPLEX128}))
  864. .INPUT(flow_in, TensorType({DT_FLOAT}))
  865. .OUTPUT(flow_out, TensorType({DT_FLOAT}))
  866. .OP_END_FACTORY_REG(TensorArrayWrite)
  867. /**
  868. *@brief Creates a TensorArray for storing multiple gradients of values in
  869. the given handle. \n
  870. *@par Inputs:
  871. *The input handle must be type resource. Inputs include:
  872. *@li handle: A Tensor of type resource. The handle to the forward TensorArray.
  873. *@li flow_in: A Tensor of type float. A float scalar that enforces proper
  874. chaining of operations.
  875. *@li shape_to_prepend: A Tensor of type int32. An int32 vector representing
  876. a shape. \n
  877. *@par Attributes:
  878. *source: A string. The gradient source string, used to decide which gradient
  879. TensorArray to return. \n
  880. *@par Outputs:
  881. *@li grad_handle: A Tensor of type resource.
  882. *@li flow_out: A Tensor of type float. \n
  883. *@par Third-party framework compatibility
  884. *Compatible with tensorflow TensorArrayGradWithShape operator.
  885. */
  886. REG_OP(TensorArrayGradWithShape)
  887. .INPUT(handle, TensorType({ DT_RESOURCE }))
  888. .INPUT(flow_in, TensorType({ DT_FLOAT }))
  889. .INPUT(shape_to_prepend, TensorType({ DT_INT32 }))
  890. .OUTPUT(grad_handle, TensorType({ DT_RESOURCE }))
  891. .OUTPUT(flow_out, TensorType({ DT_FLOAT }))
  892. .ATTR(source, String, "")
  893. .OP_END_FACTORY_REG(TensorArrayGradWithShape)
  894. /**
  895. *@brief Read an element from the TensorArray into output value. \n
  896. *@par Inputs:
  897. *The input handle must be type resource. Inputs include:
  898. *@li handle: A Tensor of type resource. The handle to a TensorArray.
  899. *@li index: A Tensor of type int32.
  900. *@li flow_in: A Tensor of type float. \n
  901. *@par Attributes:
  902. *dtype: A DType. \n
  903. *@par Outputs:
  904. *y: A Tensor of type dtype. \n
  905. *@par Third-party framework compatibility
  906. *Compatible with tensorflow TensorArrayRead operator.
  907. */
  908. REG_OP(TensorArrayRead)
  909. .INPUT(handle, TensorType({ DT_RESOURCE }))
  910. .INPUT(index, TensorType({ DT_INT32 }))
  911. .INPUT(flow_in, TensorType({ DT_FLOAT }))
  912. .OUTPUT(y, TensorType({ DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16,
  913. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE,
  914. DT_STRING, DT_COMPLEX64, DT_COMPLEX128}))
  915. .REQUIRED_ATTR(dtype, Type)
  916. .OP_END_FACTORY_REG(TensorArrayRead)
  917. /**
  918. *@brief Scatter the data from the input value into specific TensorArray
  919. elements. \n
  920. *@par Inputs:
  921. *The input handle must be type resource. Inputs include:
  922. *@li handle: The handle to a TensorArray.
  923. *@li indices: The locations at which to write the tensor elements.
  924. *@li value: The concatenated tensor to write to the TensorArray.
  925. *@li flow_in: A float scalar that enforces proper chaining of operations. \n
  926. *@par Outputs:
  927. *flow_out: A float scalar that enforces proper chaining of operations. \n
  928. *@par Third-party framework compatibility
  929. *Compatible with tensorflow TensorArrayScatter operator.
  930. */
  931. REG_OP(TensorArrayScatter)
  932. .INPUT(handle, TensorType({ DT_RESOURCE }))
  933. .INPUT(indices, TensorType({ DT_INT32 }))
  934. .INPUT(value, TensorType({ DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16,
  935. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE,
  936. DT_STRING, DT_COMPLEX64, DT_COMPLEX128 }))
  937. .INPUT(flow_in, TensorType({ DT_FLOAT }))
  938. .OUTPUT(flow_out, TensorType({ DT_FLOAT }))
  939. .OP_END_FACTORY_REG(TensorArrayScatter)
  940. /**
  941. *@brief Split the data from the input value into TensorArray elements. \n
  942. *@par Inputs:
  943. *The input handle must be type resource. Inputs include:
  944. *@li handle: The handle to a TensorArray.
  945. *@li value: The concatenated tensor to write to the TensorArray.
  946. *@li lengths: The vector of lengths, how to split the rows of value into
  947. the TensorArray.
  948. *@li flow_in: A float scalar that enforces proper chaining of operations. \n
  949. *@par Outputs:
  950. *flow_out: A float scalar that enforces proper chaining of operations. \n
  951. *@par Third-party framework compatibility
  952. *Compatible with tensorflow TensorArraySplit operator.
  953. */
  954. REG_OP(TensorArraySplit)
  955. .INPUT(handle, TensorType({ DT_RESOURCE }))
  956. .INPUT(value, TensorType({ DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16,
  957. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE,
  958. DT_STRING, DT_COMPLEX64, DT_COMPLEX128 }))
  959. .INPUT(lengths, TensorType({ DT_INT64 }))
  960. .INPUT(flow_in, TensorType({ DT_FLOAT }))
  961. .OUTPUT(flow_out, TensorType({ DT_FLOAT }))
  962. .OP_END_FACTORY_REG(TensorArraySplit)
  963. /**
  964. *@brief Return the number of elements in a TensorArray. \n
  965. *@par Inputs:
  966. *The input handle must be type resource. Inputs include:
  967. *@li handle: The handle to a TensorArray.
  968. *@li flow_in: A float scalar that enforces proper chaining of operations. \n
  969. *@par Outputs:
  970. *size: The number of elements in a TensorArray.. \n
  971. *@par Third-party framework compatibility
  972. *Compatible with tensorflow TensorArraySize operator.
  973. */
  974. REG_OP(TensorArraySize)
  975. .INPUT(handle, TensorType({ DT_RESOURCE }))
  976. .INPUT(flow_in, TensorType({ DT_FLOAT }))
  977. .OUTPUT(size, TensorType({ DT_INT32 }))
  978. .OP_END_FACTORY_REG(TensorArraySize)
  979. /**
  980. *@brief A queue implementation that dequeues elements in a random order. \n
  981. *@par Attributes:
  982. *@li component_types:A list of fully-defined Tensortype objects with
  983. the same length as shapes, or None.
  984. *@li shapes: (Optional.) A list of fully-defined TensorShape objects with
  985. the same length as dtypes, or None.
  986. *@li capacity: An integer. The upper bound on the number of elements that may
  987. be stored in this queue.
  988. *@li min_after_dequeue: An integer (described above).
  989. *@li seed: An integer. Used to create a random seed.
  990. *@li seed2: An integer. Used to create a random seed.
  991. *@li container: An optional string. Defaults to "".
  992. *@li shared_name: An optional string. Defaults to "". \n
  993. *@par Outputs:
  994. *handle: A Tensor of type resource. The handle to a stack. \n
  995. *@par Third-party framework compatibility
  996. *Compatible with tensorflow RandomShuffleQueue operator.
  997. */
  998. REG_OP(RandomShuffleQueue)
  999. .OUTPUT(handle, TensorType({DT_RESOURCE}))
  1000. .REQUIRED_ATTR(component_types, ListType)
  1001. .ATTR(shapes, ListListInt, {})
  1002. .ATTR(capacity, Int, -1)
  1003. .ATTR(min_after_dequeue, Int, 0)
  1004. .ATTR(seed, Int, 0)
  1005. .ATTR(seed2, Int, 0)
  1006. .ATTR(container, String, "")
  1007. .ATTR(shared_name, String, "")
  1008. .OP_END_FACTORY_REG(RandomShuffleQueue)
  1009. /**
  1010. *@brief A queue that produces elements in first-in first-out order. \n
  1011. *@par Attributes:
  1012. *@li shapes: An optional list of shapes for each component of
  1013. a queue element. Defaults to {}. The length of this attr must be
  1014. either 0 or the same as the length of "component_types". Shapes of fixed
  1015. rank but variable size are allowed by setting any shape dimension to "-1".
  1016. In this case, the inputs' shape may vary along the given dimension,
  1017. and DequeueMany will pad the given dimension with zeros up to the maximum
  1018. shape of all elements in the given batch. If the length of this attr is "0",
  1019. different queue elements may have different ranks and shapes, but only one
  1020. element may be dequeued at a time.
  1021. *@li capacity: An optional int. Defaults to "-1". The upper bound on the number
  1022. of elements in this queue. Negative numbers mean no limit.
  1023. *@li container: An optional string. Defaults to "". If non-empty, this queue
  1024. is placed in the given container. Otherwise, a default container is used.
  1025. *@li shared_name: An optional string. Defaults to "". If non-empty, this queue
  1026. will be shared under the given name across multiple sessions. \n
  1027. *@par Outputs:
  1028. *handle: A Tensor of type DT_RESOURCE. \n
  1029. *@attention Constraints:
  1030. *PaddingFIFOQueue runs on the Ascend AI CPU, which delivers poor performance.
  1031. *@par Third-party framework compatibility
  1032. *Compatible with the TensorFlow operator PaddingFIFOQueue.
  1033. */
  1034. REG_OP(PaddingFIFOQueue)
  1035. .OUTPUT(handle, TensorType({DT_RESOURCE}))
  1036. .REQUIRED_ATTR(component_types, ListType)
  1037. .ATTR(shapes, ListListInt, {})
  1038. .ATTR(capacity, Int, -1)
  1039. .ATTR(container, String, "")
  1040. .ATTR(shared_name, String, "")
  1041. .OP_END_FACTORY_REG(PaddingFIFOQueue)
  1042. /**
  1043. *@brief A queue that produces elements sorted by the first component value. \n
  1044. *@par Attributes:
  1045. *@li component_types: An optional list of tf.DTypes. Defaults to {}.
  1046. The type of each component in a value.
  1047. *@li shapes: A list of shapes for each component of a queue element.
  1048. The length of this attr must be either 0 or the same as the length of
  1049. "component_types". If the length of this attr is 0, the shapes of queue
  1050. elements are not constrained, and only one element may be dequeued at a time.
  1051. *@li container: An optional string. Defaults to "". If non-empty, this queue
  1052. is placed in the given container. Otherwise, a default container is used.
  1053. *@li capacity:An integer. The upper bound on the number of elements that may be stored in this queue.
  1054. *@li shared_name: An optional string. Defaults to "". If non-empty, this
  1055. queue will be shared under the given name across multiple sessions. \n
  1056. *@par Outputs:
  1057. *handle: A Tensor of type DT_RESOURCE. \n
  1058. *@attention Constraints:
  1059. *PriorityQueue runs on the Ascend AI CPU, which delivers poor performance.
  1060. *@par Third-party framework compatibility
  1061. *Compatible with the TensorFlow operator PriorityQueue.
  1062. */
  1063. REG_OP(PriorityQueue)
  1064. .OUTPUT(handle, TensorType({DT_RESOURCE}))
  1065. .ATTR(component_types, ListType, {})
  1066. .ATTR(shapes, ListListInt, {})
  1067. .ATTR(capacity, Int, -1)
  1068. .ATTR(container, String, "")
  1069. .ATTR(shared_name, String, "")
  1070. .OP_END_FACTORY_REG(PriorityQueue)
  1071. /**
  1072. *@brief Multiplies the matrix "x1" by the matrix "x2". \n
  1073. *@par Inputs:
  1074. *Including:
  1075. *handle: A Tensor of type DT_RESOURCE. The handle to a queue. \n
  1076. *@par Attributes:
  1077. *cancel_pending_enqueues: An optional bool. Defaults to "False".
  1078. If true, all pending enqueue requests that are blocked on
  1079. the given queue will be canceled. \n
  1080. *@attention Constraints:
  1081. *QueueClose runs on the Ascend AI CPU, which delivers poor performance.
  1082. *@par Third-party framework compatibility
  1083. *Compatible with the TensorFlow operator QueueClose.
  1084. */
  1085. REG_OP(QueueClose)
  1086. .INPUT(handle, TensorType({DT_RESOURCE}))
  1087. .ATTR(cancel_pending_enqueues, Bool, false)
  1088. .OP_END_FACTORY_REG(QueueClose)
  1089. /**
  1090. *@brief Stage (key, values) in the underlying container which behaves like an ordered associative container. \n
  1091. *@par Inputs:
  1092. *Including:
  1093. * @li key: A Tensor of type DT_INT64.
  1094. * @li indices: A Tensor of type DT_INT32.
  1095. * @li values: A list of Must be one of the following types:
  1096. DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  1097. DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, DT_UINT32, DT_UINT64,
  1098. DT_RESOURCE, DT_STRING, DT_COMPLEX64, DT_COMPLEX128, DT_QINT8,
  1099. DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32 that inserted
  1100. values should adhere to. It's a dynamic input. \n
  1101. *@par Attributes:
  1102. *@li capacity: An optional int that is >= 0. Defaults to "0".
  1103. Maximum number of elements in the Staging Area.
  1104. If > 0, inserts on the container will block
  1105. when the capacity is reached.
  1106. *@li memory_limit: An optional int that is >= 0. Defaults to "0".
  1107. *@li dtypes: A list of DTypes.
  1108. *@li container: An optional string. Defaults to "".
  1109. If non-empty, this queue is placed in the given container.
  1110. Otherwise, a default container is used.
  1111. *@li shared_name: An optional string. Defaults to "".
  1112. It is necessary to match this name to the matching Unstage Op. \n
  1113. *@attention Constraints:
  1114. *OrderedMapStage runs on the Ascend AI CPU, which delivers poor performance.
  1115. *@par Third-party framework compatibility
  1116. *Compatible with the TensorFlow operator OrderedMapStage.
  1117. */
  1118. REG_OP(OrderedMapStage)
  1119. .INPUT(key, TensorType({DT_INT64}))
  1120. .INPUT(indices, TensorType({DT_INT32}))
  1121. .DYNAMIC_INPUT(values,
  1122. TensorType({ DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  1123. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, DT_UINT32, \
  1124. DT_UINT64, DT_RESOURCE, DT_STRING, DT_COMPLEX64, DT_COMPLEX128, \
  1125. DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32 }))
  1126. .ATTR(capacity, Int, 0)
  1127. .ATTR(memory_limit, Int, 0)
  1128. .ATTR(dtypes, ListType, {})
  1129. .ATTR(container, String, "")
  1130. .ATTR(shared_name, String, "")
  1131. .OP_END_FACTORY_REG(OrderedMapStage)
  1132. /**
  1133. *@brief Returns the number of elements in the underlying container. \n
  1134. *@par Attributes:
  1135. *@li capacity: An optional int that is >= 0. Defaults to "0".
  1136. *@li memory_limit: An optional int that is >= 0. Defaults to "0".
  1137. *@li dtypes: A list of DTypes.
  1138. *@li container: An optional string. Defaults to "".
  1139. *@li shared_name: An optional string. Defaults to "". \n
  1140. *@par Outputs:
  1141. *size: A Tensor of type DT_INT32. \n
  1142. *@attention Constraints:
  1143. *OrderedMapSize runs on the Ascend AI CPU, which delivers poor performance.
  1144. *@par Third-party framework compatibility
  1145. *Compatible with the TensorFlow operator OrderedMapSize.
  1146. */
  1147. REG_OP(OrderedMapSize)
  1148. .OUTPUT(size, TensorType({DT_INT32}))
  1149. .ATTR(capacity, Int, 0)
  1150. .ATTR(memory_limit, Int, 0)
  1151. .ATTR(dtypes, ListType, {})
  1152. .ATTR(container, String, "")
  1153. .ATTR(shared_name, String, "")
  1154. .OP_END_FACTORY_REG(OrderedMapSize)
  1155. /**
  1156. *@brief Removes all elements in the underlying container. \n
  1157. *@par Attributes:
  1158. *@li capacity: An optional int that is >= 0. Defaults to "0".
  1159. *@li memory_limit: An optional int that is >= 0. Defaults to "0".
  1160. *@li dtypes: A list of DTypes.
  1161. *@li container: An optional string. Defaults to "".
  1162. *@li shared_name: An optional string. Defaults to "". \n
  1163. *@attention Constraints:
  1164. *OrderedMapClear runs on the Ascend AI CPU, which delivers poor performance.
  1165. *@par Third-party framework compatibility
  1166. *Compatible with the TensorFlow operator OrderedMapClear.
  1167. */
  1168. REG_OP(OrderedMapClear)
  1169. .ATTR(capacity, Int, 0)
  1170. .ATTR(memory_limit, Int, 0)
  1171. .ATTR(dtypes, ListType, {})
  1172. .ATTR(container, String, "")
  1173. .ATTR(shared_name, String, "")
  1174. .OP_END_FACTORY_REG(OrderedMapClear)
  1175. /**
  1176. *@brief FakeQueue, support tf api FixedLengthRecordReader. \n
  1177. *@par Inputs:
  1178. *Including:
  1179. * resource: A Tensor of type DT_RESOURCE.
  1180. *@par Outputs:
  1181. *handle: A Tensor of type DT_STRING ref. \n
  1182. *@par Third-party framework compatibility
  1183. *Compatible with the TensorFlow operator FakeQueue.
  1184. */
  1185. REG_OP(FakeQueue)
  1186. .INPUT(resource, TensorType({DT_RESOURCE}))
  1187. .OUTPUT(handle, TensorType({DT_STRING}))
  1188. .OP_END_FACTORY_REG(FakeQueue)
  1189. /**
  1190. *@brief Returns the number of incomplete elements in the underlying container. \n
  1191. *@par Attributes:
  1192. *@li capacity: An optional int that is >= 0. Defaults to "0".
  1193. *@li memory_limit: An optional int that is >= 0. Defaults to "0".
  1194. *@li dtypes: A list of DTypes.
  1195. *@li container: An optional string. Defaults to "".
  1196. *@li shared_name: An optional string. Defaults to "". \n
  1197. *@par Outputs:
  1198. *size: A Tensor of type DT_INT32. \n
  1199. *@attention Constraints:
  1200. *OrderedMapIncompleteSize runs on the Ascend AI CPU,
  1201. which delivers poor performance.
  1202. *@par Third-party framework compatibility
  1203. *Compatible with the TensorFlow operator OrderedMapIncompleteSize.
  1204. */
  1205. REG_OP(OrderedMapIncompleteSize)
  1206. .OUTPUT(size, TensorType({DT_INT32}))
  1207. .ATTR(capacity, Int, 0)
  1208. .ATTR(memory_limit, Int, 0)
  1209. .ATTR(dtypes, ListType, {})
  1210. .ATTR(container, String, "")
  1211. .ATTR(shared_name, String, "")
  1212. .OP_END_FACTORY_REG(OrderedMapIncompleteSize)
  1213. /**
  1214. *@brief Peeks at the values at the specified key. \n
  1215. *@par Inputs:
  1216. *Including:
  1217. * @li key: A Tensor of type DT_INT64.
  1218. * @li indices: A Tensor of type DT_INT32. \n
  1219. *@par Attributes:
  1220. *@li capacity: An optional int that is >= 0. Defaults to "0".
  1221. *@li memory_limit: An optional int that is >= 0. Defaults to "0".
  1222. *@li dtypes: A list of DTypes that has length >= 1.
  1223. *@li container: An optional string. Defaults to "".
  1224. *@li shared_name: An optional string. Defaults to "". \n
  1225. *@par Outputs:
  1226. *values: A list of Tensor objects. Must be one of the following types:
  1227. DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8, DT_INT32,
  1228. DT_INT64, DT_BOOL, DT_DOUBLE, DT_UINT32, DT_UINT64, DT_RESOURCE, DT_STRING,
  1229. DT_COMPLEX64, DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32. \n
  1230. *@attention Constraints:
  1231. *OrderedMapPeek runs on the Ascend AI CPU, which delivers poor performance.
  1232. *@par Third-party framework compatibility
  1233. *Compatible with the TensorFlow operator OrderedMapPeek.
  1234. */
  1235. REG_OP(OrderedMapPeek)
  1236. .INPUT(key, TensorType({DT_INT64}))
  1237. .INPUT(indices, TensorType({DT_INT32}))
  1238. .DYNAMIC_OUTPUT(values,
  1239. TensorType({ DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  1240. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, DT_UINT32, \
  1241. DT_UINT64, DT_RESOURCE, DT_STRING, DT_COMPLEX64, DT_COMPLEX128, \
  1242. DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32 }))
  1243. .ATTR(capacity, Int, 0)
  1244. .ATTR(memory_limit, Int, 0)
  1245. .ATTR(dtypes, ListType, {})
  1246. .ATTR(container, String, "")
  1247. .ATTR(shared_name, String, "")
  1248. .OP_END_FACTORY_REG(OrderedMapPeek)
  1249. /**
  1250. *@brief Removes and returns the (key, value) element with the smallest. \n
  1251. *@par Inputs:
  1252. *Including:
  1253. * indices: A Tensor of type DT_INT32. \n
  1254. *@par Attributes:
  1255. *@li capacity: An optional int that is >= 0. Defaults to "0".
  1256. *@li memory_limit: An optional int that is >= 0. Defaults to "0".
  1257. *@li dtypes: A list of DTypes that has length >= 1.
  1258. *@li container: An optional string. Defaults to "".
  1259. *@li shared_name: An optional string. Defaults to "". \n
  1260. *@par Outputs:
  1261. *@li key: A Tensor of type DT_INT64.
  1262. *@li values: A list of Tensor objects. Must be one of the following types:
  1263. DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8, DT_INT32,
  1264. DT_INT64, DT_BOOL, DT_DOUBLE, DT_UINT32, DT_UINT64, DT_RESOURCE, DT_STRING,
  1265. DT_COMPLEX64, DT_COMPLEX128, DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32. \n
  1266. *@attention Constraints:
  1267. *OrderedMapUnstageNoKey runs on the Ascend AI CPU,
  1268. which delivers poor performance.
  1269. *@par Third-party framework compatibility
  1270. *Compatible with the TensorFlow operator OrderedMapUnstageNoKey.
  1271. */
  1272. REG_OP(OrderedMapUnstageNoKey)
  1273. .INPUT(indices, TensorType({DT_INT32}))
  1274. .OUTPUT(key, TensorType({DT_INT64}))
  1275. .DYNAMIC_OUTPUT(values,
  1276. TensorType({ DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  1277. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, DT_UINT32, \
  1278. DT_UINT64, DT_RESOURCE, DT_STRING, DT_COMPLEX64, DT_COMPLEX128, \
  1279. DT_QINT8, DT_QUINT8, DT_QINT16, DT_QUINT16, DT_QINT32 }))
  1280. .ATTR(capacity, Int, 0)
  1281. .ATTR(memory_limit, Int, 0)
  1282. .ATTR(dtypes, ListType, {})
  1283. .ATTR(container, String, "")
  1284. .ATTR(shared_name, String, "")
  1285. .OP_END_FACTORY_REG(OrderedMapUnstageNoKey)
  1286. /**
  1287. *@brief Removes and returns the values associated with the key. \n
  1288. *@par Inputs:
  1289. *Including:
  1290. * @li key: A Tensor of type DT_INT64.
  1291. * @li indices: A Tensor of type DT_INT32. \n
  1292. *@par Attributes:
  1293. *@li capacity: An optional int that is >= 0. Defaults to "0".
  1294. *@li memory_limit: An optional int that is >= 0. Defaults to "0".
  1295. *@li dtypes: A list of tf.DTypes that has length >= 1.
  1296. *@li container: An optional string. Defaults to "".
  1297. *@li shared_name: An optional string. Defaults to "". \n
  1298. *@par Outputs:
  1299. *values: A list of Tensor objects. Must be one of the following types:
  1300. DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, DT_INT64, DT_FLOAT,
  1301. DT_FLOAT16, DT_DOUBLE, DT_BOOL, DT_UINT32, DT_UINT64. \n
  1302. *@attention Constraints:
  1303. *OrderedMapUnstage runs on the Ascend AI CPU, which delivers poor performance.
  1304. *@par Third-party framework compatibility
  1305. *Compatible with the TensorFlow operator OrderedMapUnstage.
  1306. */
  1307. REG_OP(OrderedMapUnstage)
  1308. .INPUT(key, TensorType({DT_INT64}))
  1309. .INPUT(indices, TensorType({DT_INT32}))
  1310. .DYNAMIC_OUTPUT(values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16,
  1311. DT_INT32, DT_INT64, DT_FLOAT, DT_FLOAT16,
  1312. DT_DOUBLE, DT_BOOL, DT_UINT32, DT_UINT64}))
  1313. .ATTR(capacity, Int, 0)
  1314. .ATTR(memory_limit, Int, 0)
  1315. .ATTR(dtypes, ListType, {})
  1316. .ATTR(container, String, "")
  1317. .ATTR(shared_name, String, "")
  1318. .OP_END_FACTORY_REG(OrderedMapUnstage)
  1319. /**
  1320. *@brief A barrier represents a key-value map, where each key is a string,
  1321. and each value is a tuple of tensors. \n
  1322. *@par Attributes:
  1323. *@li component_types: The type of each component in a value.
  1324. *@li shapes: A list of shapes for each component of a queue element.
  1325. Each shape must be 1 in the first dimension.
  1326. The length of this attr must be the same as
  1327. the length of "component_types".
  1328. *@li capacity: The capacity of the barrier.
  1329. The default capacity is MAX_INT32,
  1330. which is the largest capacity of the underlying queue.
  1331. *@li container: If non-empty, this barrier is placed in the given container.
  1332. Otherwise, a default container is used.
  1333. *@li shared_name: If non-empty, this barrier will be shared under
  1334. the given name across multiple sessions. \n
  1335. *@par Outputs:
  1336. *handle: A Tensor of type DT_STRING_REF. The handle to the barrier. \n
  1337. *@attention Constraints:
  1338. *Barrier runs on the Ascend AI CPU, which delivers poor performance.
  1339. *@par Third-party framework compatibility
  1340. *Compatible with the TensorFlow operator Barrier. \n
  1341. *@par Restrictions:
  1342. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1343. */
  1344. REG_OP(Barrier)
  1345. .OUTPUT(handle, TensorType({DT_STRING_REF}))
  1346. .REQUIRED_ATTR(component_types, ListType)
  1347. .ATTR(shapes, ListListInt, {})
  1348. .ATTR(capacity, Int, -1)
  1349. .ATTR(container, String, "")
  1350. .ATTR(shared_name, String, "")
  1351. .OP_END_FACTORY_REG(Barrier)
  1352. /**
  1353. *@brief For each key, assigns the respective value to the specified component. \n
  1354. *@par Inputs:
  1355. *Including:
  1356. * @li handle: A Tensor of type DT_STRING_REF. The handle to a barrier.
  1357. * @li keys: A Tensor of type DT_STRING. A 1D tensor of keys.
  1358. * @li values: An any-dimensional tensor of values, which are associated
  1359. with the respective keys. The 0th dimension must have length n
  1360. Must be one of the following types: DT_FLOAT, DT_FLOAT16, DT_INT8,
  1361. DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL,
  1362. DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING. \n
  1363. *@par Attributes:
  1364. *component_index: The component of the barrier elements that is being assigned. \n
  1365. *@attention Constraints:
  1366. *BarrierInsertMany runs on the Ascend AI CPU, which delivers poor performance.
  1367. *@par Third-party framework compatibility
  1368. *Compatible with the TensorFlow operator BarrierInsertMany. \n
  1369. *@par Restrictions:
  1370. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1371. */
  1372. REG_OP(BarrierInsertMany)
  1373. .INPUT(handle, TensorType({DT_STRING_REF}))
  1374. .INPUT(keys, TensorType({DT_STRING}))
  1375. .INPUT(values,
  1376. TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  1377. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  1378. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  1379. .REQUIRED_ATTR(component_index, Int)
  1380. .OP_END_FACTORY_REG(BarrierInsertMany)
  1381. /**
  1382. *@brief Takes the given number of completed elements from a barrier. \n
  1383. *@par Inputs:
  1384. *Including:
  1385. * @li handle: A Tensor of type DT_STRING_REF. The handle to a barrier.
  1386. * @li num_elements: A Tensor of type DT_INT32.
  1387. A single-element tensor containing the number of elements to take. \n
  1388. *@par Attributes:
  1389. *@li component_types: The type of each component in a value.
  1390. *@li allow_small_batch: Allow to return less than "num_elements"
  1391. items if barrier is already closed.
  1392. *@li wait_for_incomplete: An any-dimensional tensor
  1393. for each component in the barrier element.
  1394. *@li timeout_ms: If the queue is empty, this operation will block for up to
  1395. "timeout_ms" milliseconds. Note: This option is not supported yet. \n
  1396. *@par Outputs:
  1397. *@li indices: A 1D tensor of type DT_INT64. The indices, with length "num_elems".
  1398. These indices refer to the batch in which the values were
  1399. placed into the barrier.
  1400. *@li keys: A 1D tensor of keys,
  1401. with length "num_elements" of type DT_STRING.
  1402. *@li values: A 1D tensor per component in a barrier element.
  1403. All values have length "num_elements" along the 0th dimension.
  1404. Must be one of the following types:
  1405. DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, DT_UINT8,
  1406. DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128,
  1407. DT_RESOURCE, DT_STRING. \n
  1408. *@attention Constraints:
  1409. *BarrierTakeMany runs on the Ascend AI CPU, which delivers poor performance.
  1410. *@par Third-party framework compatibility
  1411. *Compatible with the TensorFlow operator BarrierTakeMany. \n
  1412. *@par Restrictions:
  1413. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1414. */
  1415. REG_OP(BarrierTakeMany)
  1416. .INPUT(handle, TensorType({DT_STRING_REF}))
  1417. .INPUT(num_elements, TensorType(DT_INT32))
  1418. .OUTPUT(indices, TensorType({DT_INT64}))
  1419. .OUTPUT(keys, TensorType({DT_STRING}))
  1420. .DYNAMIC_OUTPUT(values,
  1421. TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  1422. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  1423. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  1424. .REQUIRED_ATTR(component_types, ListType)
  1425. .ATTR(allow_small_batch, Bool, false)
  1426. .ATTR(wait_for_incomplete, Bool, false)
  1427. .ATTR(timeout_ms, Int, -1)
  1428. .OP_END_FACTORY_REG(BarrierTakeMany)
  1429. /**
  1430. *@brief Closes the given barrier. \n
  1431. *@par Inputs:
  1432. *Including:
  1433. *handle: A Tensor of type DT_STRING_REF. The handle to a barrier. \n
  1434. *@par Attributes:
  1435. *cancel_pending_enqueues: If true, all pending enqueue requests
  1436. that are blocked on the barrier's queue will
  1437. be canceled. InsertMany will fail,
  1438. even if no new key is introduced. \n
  1439. *@attention Constraints:
  1440. *BarrierClose runs on the Ascend AI CPU, which delivers poor performance.
  1441. *@par Third-party framework compatibility
  1442. *Compatible with the TensorFlow operator BarrierClose. \n
  1443. *@par Restrictions:
  1444. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1445. */
  1446. REG_OP(BarrierClose)
  1447. .INPUT(handle, TensorType({DT_STRING_REF}))
  1448. .ATTR(cancel_pending_enqueues, Bool, false)
  1449. .OP_END_FACTORY_REG(BarrierClose)
  1450. /**
  1451. *@brief Computes the number of complete elements in the given barrier. \n
  1452. *@par Inputs:
  1453. *Including:
  1454. *handle: A Tensor of type DT_STRING_REF. The handle to a barrier. \n
  1455. *@par Outputs:
  1456. *size: A Tensor of type DT_INT32. The number of complete elements. \n
  1457. *@attention Constraints:
  1458. *BarrierReadySize runs on the Ascend AI CPU, which delivers poor performance.
  1459. *@par Third-party framework compatibility
  1460. *Compatible with the TensorFlow operator BarrierReadySize. \n
  1461. *@par Restrictions:
  1462. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1463. */
  1464. REG_OP(BarrierReadySize)
  1465. .INPUT(handle, TensorType({DT_STRING_REF}))
  1466. .OUTPUT(size, TensorType(DT_INT32))
  1467. .OP_END_FACTORY_REG(BarrierReadySize)
  1468. /**
  1469. *@brief Computes the number of incomplete elements in the given barrier. \n
  1470. *@par Inputs:
  1471. *Including:
  1472. *handle: A Tensor of type DT_STRING_REF. The handle to a barrier. \n
  1473. *@par Outputs:
  1474. *size: A Tensor of type DT_INT32. The number of incomplete elements in the barrier. \n
  1475. *@attention Constraints:
  1476. *BarrierIncompleteSize runs on the Ascend AI CPU, which delivers poor performance.
  1477. *@par Third-party framework compatibility
  1478. *Compatible with the TensorFlow operator BarrierIncompleteSize. \n
  1479. *@par Restrictions:
  1480. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1481. */
  1482. REG_OP(BarrierIncompleteSize)
  1483. .INPUT(handle, TensorType({DT_STRING_REF}))
  1484. .OUTPUT(size, TensorType(DT_INT32))
  1485. .OP_END_FACTORY_REG(BarrierIncompleteSize)
  1486. /**
  1487. *@brief Emits randomized records. \n
  1488. *@par Attributes:
  1489. *@li file_pattern: A string. Glob pattern for the data files.
  1490. *@li file_random_seed: An optional int. Defaults to 301. Random seeds used to
  1491. produce randomized records.
  1492. *@li file_shuffle_shift_ratio: An optional float. Defaults to 0. Shifts the
  1493. list of files after the list is randomly shuffled.
  1494. *@li file_buffer_size: An optional int. Defaults to 10000. The randomization
  1495. shuffling buffer.
  1496. *@li file_parallelism: An optional int. Defaults to 16. How many sstables are
  1497. opened and concurrently iterated over.
  1498. *@li batch_size: An optional int. Defaults to 32. The batch size.
  1499. *@li compression_type: An optional string. Defaults to "". The type of
  1500. compression for the file. Currently ZLIB and GZIP are supported. \n
  1501. *@par Outputs:
  1502. *records: A Tensor of type string. \n
  1503. *@par Third-party framework compatibility
  1504. *Compatible with tensorflow RecordInput operator.
  1505. */
  1506. REG_OP(RecordInput)
  1507. .OUTPUT(records, TensorType({DT_STRING}))
  1508. .REQUIRED_ATTR(file_pattern, String)
  1509. .ATTR(file_random_seed, Int, 301)
  1510. .ATTR(file_shuffle_shift_ratio, Float, 0)
  1511. .ATTR(file_buffer_size, Int, 10000)
  1512. .ATTR(file_parallelism, Int, 16)
  1513. .ATTR(batch_size, Int, 32)
  1514. .ATTR(compression_type, String, "")
  1515. .OP_END_FACTORY_REG(RecordInput)
  1516. /**
  1517. *@brief A conditional accumulator for aggregating gradients. \n
  1518. *@par Attributes:
  1519. *@li dtype: The type of the value being accumulated.
  1520. *@li shape: The shape of the values, can be [], in which case shape is unknown.
  1521. *@li container: If non-empty, this accumulator is placed in the given container.
  1522. Otherwise, a default container is used.
  1523. *@li shared_name: If non-empty, this accumulator will be shared under the given
  1524. name across multiple sessions.
  1525. *@li reduction_type: reduction operator type, default "MEAN". \n
  1526. *@par Outputs:
  1527. *handle: A Tensor of type DT_STRING_REF. The handle to the accumulator. \n
  1528. *@attention Constraints:
  1529. *ConditionalAccumulator runs on the Ascend AI CPU, which delivers poor performance.
  1530. *@par Third-party framework compatibility
  1531. *Compatible with the TensorFlow operator ConditionalAccumulator. \n
  1532. *@par Restrictions:
  1533. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1534. */
  1535. REG_OP(ConditionalAccumulator)
  1536. .OUTPUT(handle, TensorType({DT_STRING_REF}))
  1537. .REQUIRED_ATTR(dtype, Type)
  1538. .REQUIRED_ATTR(shape, ListInt)
  1539. .ATTR(container, String, "")
  1540. .ATTR(shared_name, String, "")
  1541. .ATTR(reduction_type, String, "MEAN")
  1542. .OP_END_FACTORY_REG(ConditionalAccumulator)
  1543. /**
  1544. *@brief Applies a gradient to a given accumulator. \n
  1545. *@par Inputs:
  1546. *Does not add if "local_step" is lesser than the accumulator's "global_step".
  1547. * @li handle: A Tensor of type DT_STRING_REF. The handle to an accumulator.
  1548. * @li local_step: A Tensor of type DT_INT64.
  1549. The "local_step" value at which the gradient was computed. \n
  1550. * @li gradient: A tensor of the gradient to be accumulated.
  1551. Must be one of the following types:
  1552. DT_FLOAT16, DT_FLOAT, DT_DOUBLE
  1553. *@par Attributes:
  1554. *dtype: Must be one of the following types:
  1555. DT_FLOAT16, DT_FLOAT, DT_DOUBLE
  1556. *@attention Constraints:
  1557. *AccumulatorApplyGradient runs on the Ascend AI CPU,
  1558. which delivers poor performance.
  1559. *@par Third-party framework compatibility
  1560. *Compatible with the TensorFlow operator AccumulatorApplyGradient. \n
  1561. *@par Restrictions:
  1562. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1563. */
  1564. REG_OP(AccumulatorApplyGradient)
  1565. .INPUT(handle, TensorType({DT_STRING_REF}))
  1566. .INPUT(local_step, TensorType({DT_INT64}))
  1567. .INPUT(gradient, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  1568. .REQUIRED_ATTR(dtype, Type)
  1569. .OP_END_FACTORY_REG(AccumulatorApplyGradient)
  1570. /**
  1571. *@brief Returns the number of gradients aggregated in the given accumulators. \n
  1572. *@par Inputs:
  1573. *Including:
  1574. *handle: A Tensor of type DT_STRING_REF. The handle to an accumulator. \n
  1575. *@par Outputs:
  1576. *y: A Tensor of type DT_INT32. The number of gradients aggregated
  1577. in the given accumulator. \n
  1578. *@attention Constraints:
  1579. *AccumulatorNumAccumulated runs on the Ascend AI CPU,
  1580. which delivers poor performance.
  1581. *@par Third-party framework compatibility
  1582. *Compatible with the TensorFlow operator AccumulatorNumAccumulated. \n
  1583. *@par Restrictions:
  1584. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1585. */
  1586. REG_OP(AccumulatorNumAccumulated)
  1587. .INPUT(handle, TensorType({DT_STRING_REF}))
  1588. .OUTPUT(y, TensorType({DT_INT32}))
  1589. .OP_END_FACTORY_REG(AccumulatorNumAccumulated)
  1590. /**
  1591. *@brief Updates the accumulator with a new value for "global_step". \n
  1592. *@par Inputs:
  1593. *Input "new_global_step" is a scalar.
  1594. * @li handle: A Tensor of type DT_STRING_REF. The handle to an accumulator.
  1595. * @li new_global_step: The new "global_step" value to set A Tensor of type DT_INT64. \n
  1596. *@attention Constraints:
  1597. *AccumulatorSetGlobalStep runs on the Ascend AI CPU, which delivers poor performance.
  1598. *@par Third-party framework compatibility
  1599. *Compatible with the TensorFlow operator AccumulatorSetGlobalStep. \n
  1600. *@par Restrictions:
  1601. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1602. */
  1603. REG_OP(AccumulatorSetGlobalStep)
  1604. .INPUT(handle, TensorType({DT_STRING_REF}))
  1605. .INPUT(new_global_step, TensorType({DT_INT64}))
  1606. .OP_END_FACTORY_REG(AccumulatorSetGlobalStep)
  1607. /**
  1608. *@brief Extracts the average gradient in the given ConditionalAccumulator. \n
  1609. *@par Inputs:
  1610. * Input "num_required" is a scalar.
  1611. * @li handle: A Tensor of type DT_STRING_REF. The handle to an accumulator.
  1612. * @li num_required: A Tensor of type DT_INT32.
  1613. Number of gradients required before an aggregate is returned. \n
  1614. *@par Attributes:
  1615. *dtype: The data type of accumulated gradients.
  1616. Needs to correspond to the type of the accumulator. \n
  1617. *@par Outputs:
  1618. *y: The average of the accumulated gradients.
  1619. Must be one of the following types:
  1620. DT_FLOAT16, DT_FLOAT, DT_DOUBLE. \n
  1621. *@attention Constraints:
  1622. *AccumulatorTakeGradient runs on the Ascend AI CPU,
  1623. which delivers poor performance.
  1624. *@par Third-party framework compatibility
  1625. *Compatible with the TensorFlow operator AccumulatorTakeGradient. \n
  1626. *@par Restrictions:
  1627. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1628. */
  1629. REG_OP(AccumulatorTakeGradient)
  1630. .INPUT(handle, TensorType({DT_STRING_REF}))
  1631. .INPUT(num_required, TensorType({DT_INT32}))
  1632. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  1633. .REQUIRED_ATTR(dtype, Type)
  1634. .OP_END_FACTORY_REG(AccumulatorTakeGradient)
  1635. /**
  1636. *@brief A conditional accumulator for aggregating sparse gradients. \n
  1637. *@par Attributes:
  1638. *@li shape: The shape of the values.
  1639. *@li dtype: The type of the value being accumulated.
  1640. *@li container: If non-empty, this accumulator is placed in the given
  1641. container. Otherwise, a default container is used.
  1642. *@li shared_name: If non-empty, this accumulator will be shared under the
  1643. given name across multiple sessions.
  1644. *@li reduction_type: The reduction method whose type is string,
  1645. default is "MEAN". \n
  1646. *@par Outputs:
  1647. *handle: The handle to the accumulator. \n
  1648. *@par Third-party framework compatibility
  1649. *Compatible with tensorflow SparseConditionalAccumulator operator. \n
  1650. *@par Restrictions:
  1651. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1652. */
  1653. REG_OP(SparseConditionalAccumulator)
  1654. .OUTPUT(handle, TensorType({DT_STRING_REF}))
  1655. .REQUIRED_ATTR(shape, ListInt)
  1656. .REQUIRED_ATTR(dtype, Type)
  1657. .ATTR(container, String, "")
  1658. .ATTR(shared_name, String, "")
  1659. .ATTR(reduction_type, String, "MEAN")
  1660. .OP_END_FACTORY_REG(SparseConditionalAccumulator)
  1661. /**
  1662. *@brief Applies a sparse gradient to a given accumulator. \n
  1663. *@par Inputs:
  1664. *The input handle must be type string_ref. Inputs include:
  1665. *@li handle: A Tensor of type mutable string. The handle to a accumulator.
  1666. *@li local_step: A Tensor of type int64. The local_step value at which the
  1667. sparse gradient was computed.
  1668. *@li indices: A Tensor of type int64. Indices of the sparse gradient to be
  1669. accumulated. Must be a vector.
  1670. *@li values: A Tensor. Values are the non-zero slices of the gradient,
  1671. and must have the same first dimension as indices, i.e., the nnz represented
  1672. by indices and values must be consistent.
  1673. *@li shape: A Tensor of type int64. \n
  1674. *@par Attributes:
  1675. *@li has_known_shape: A bool. Boolean indicating whether gradient_shape is
  1676. unknown, in which case the input is ignored during validation.
  1677. *@li dtype: The data type of accumulated gradients. Needs to correspond to
  1678. the type of the accumulator. \n
  1679. *@par Third-party framework compatibility
  1680. *Compatible with tensorflow SparseAccumulatorApplyGradient operator. \n
  1681. *@par Restrictions:
  1682. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1683. */
  1684. REG_OP(SparseAccumulatorApplyGradient)
  1685. .INPUT(handle, TensorType({DT_STRING_REF}))
  1686. .INPUT(local_step, TensorType({DT_INT64}))
  1687. .INPUT(indices, TensorType({DT_INT64}))
  1688. .INPUT(values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  1689. DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, DT_UINT32, \
  1690. DT_UINT64, DT_COMPLEX64, DT_COMPLEX128, DT_QINT16, DT_QUINT16, \
  1691. DT_QINT8, DT_QUINT8, DT_QINT32}))
  1692. .INPUT(shape, TensorType({DT_INT64}))
  1693. .REQUIRED_ATTR(has_known_shape, Bool)
  1694. .REQUIRED_ATTR(dtype, Type)
  1695. .OP_END_FACTORY_REG(SparseAccumulatorApplyGradient)
  1696. /**
  1697. *@brief Extracts the average sparse gradient in a SparseConditionalAccumulator. \n
  1698. *@par Inputs:
  1699. *The input handle must be type string_ref. Inputs include:
  1700. *@li handle: The handle to a SparseConditionalAccumulator.
  1701. *@li num_required: Number of gradients required before we return an aggregate. \n
  1702. *@par Attributes:
  1703. *dtype: The data type of accumulated gradients. Needs to correspond to the
  1704. type of the accumulator. \n
  1705. *@par Outputs:
  1706. *@li indices: Indices of the average of the accumulated sparse gradients.
  1707. *@li values: Values of the average of the accumulated sparse gradients.
  1708. *@li shape: Shape of the average of the accumulated sparse gradients. \n
  1709. *@par Third-party framework compatibility
  1710. *Compatible with tensorflow SparseAccumulatorTakeGradient operator. \n
  1711. *@par Restrictions:
  1712. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1713. */
  1714. REG_OP(SparseAccumulatorTakeGradient)
  1715. .INPUT(handle, TensorType({DT_STRING_REF}))
  1716. .INPUT(num_required, TensorType({DT_INT32}))
  1717. .OUTPUT(indices, TensorType({DT_INT64}))
  1718. .OUTPUT(values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  1719. DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT}))
  1720. .OUTPUT(shape, TensorType({DT_INT64}))
  1721. .REQUIRED_ATTR(dtype, Type)
  1722. .OP_END_FACTORY_REG(SparseAccumulatorTakeGradient)
  1723. /**
  1724. *@brief A conditional accumulator for aggregating gradients. \n
  1725. *@par Attributes:
  1726. * @li dtype: The type of the value being accumulated.
  1727. * @li shape: The shape of the values, can be [], in which case shape is unknown.
  1728. * @li container: If non-empty, this accumulator is placed in the given container.
  1729. Otherwise, a default container is used.
  1730. * @li shared_name: If non-empty, this accumulator will be shared under the given
  1731. name across multiple sessions.
  1732. * @li reduction_type: reduction operator type, default "MEAN". \n
  1733. *@par Outputs:
  1734. *handle: A Tensor of type DT_RESOURCE. The handle to the accumulator. \n
  1735. *@attention Constraints:
  1736. *ResourceConditionalAccumulator runs on the Ascend AI CPU, which delivers poor performance. \n
  1737. *@par Third-party framework compatibility
  1738. *Compatible with the TensorFlow operator ResourceConditionalAccumulator. \n
  1739. *@par Restrictions:
  1740. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1741. */
  1742. REG_OP(ResourceConditionalAccumulator)
  1743. .OUTPUT(handle, TensorType({DT_RESOURCE}))
  1744. .REQUIRED_ATTR(dtype, Type)
  1745. .REQUIRED_ATTR(shape, ListInt)
  1746. .ATTR(container, String, "")
  1747. .ATTR(shared_name, String, "")
  1748. .ATTR(reduction_type, String, "MEAN")
  1749. .OP_END_FACTORY_REG(ResourceConditionalAccumulator)
  1750. /**
  1751. *@brief Applies a gradient to a given accumulator.
  1752. Does not add if "local_step" is lesser than the accumulator's "global_step". \n
  1753. *@par Inputs:
  1754. * @li handle: The handle to an accumulator.
  1755. * @li local_step: The "local_step" value at which the gradient was computed.
  1756. * @li gradient: A tensor of the gradient to be accumulated.
  1757. Must be one of the following types:
  1758. DT_FLOAT16, DT_FLOAT, DT_DOUBLE
  1759. *@attention Constraints:
  1760. *ResourceAccumulatorApplyGradient runs on the Ascend AI CPU, which delivers poor performance. \n
  1761. *@par Third-party framework compatibility
  1762. *Compatible with the TensorFlow operator ResourceAccumulatorApplyGradient. \n
  1763. *@par Restrictions:
  1764. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1765. */
  1766. REG_OP(ResourceAccumulatorApplyGradient)
  1767. .INPUT(handle, TensorType({DT_RESOURCE}))
  1768. .INPUT(local_step, TensorType({DT_INT64}))
  1769. .INPUT(gradient, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  1770. .OP_END_FACTORY_REG(ResourceAccumulatorApplyGradient)
  1771. /**
  1772. *@brief Returns the number of gradients aggregated in the given accumulators. \n
  1773. *@par Inputs:
  1774. *handle: The handle to an accumulator. \n
  1775. *@par Outputs:
  1776. *num_accumulated: The number of gradients aggregated in the given accumulator. \n
  1777. *@attention Constraints:
  1778. *ResourceAccumulatorNumAccumulated runs on the Ascend AI CPU, which delivers poor performance. \n
  1779. *@par Third-party framework compatibility
  1780. *Compatible with the TensorFlow operator ResourceAccumulatorNumAccumulated. \n
  1781. *@par Restrictions:
  1782. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1783. */
  1784. REG_OP(ResourceAccumulatorNumAccumulated)
  1785. .INPUT(handle, TensorType({DT_RESOURCE}))
  1786. .OUTPUT(num_accumulated, TensorType({DT_INT32}))
  1787. .OP_END_FACTORY_REG(ResourceAccumulatorNumAccumulated)
  1788. /**
  1789. *@brief Updates the accumulator with a new value for "global_step". \n
  1790. *@par Inputs:
  1791. * @li handle: The handle to an accumulator.
  1792. * @li new_global_step: The new "global_step" value to set. \n
  1793. *@attention Constraints:
  1794. *ResourceAccumulatorSetGlobalStep runs on the Ascend AI CPU, which delivers poor performance. \n
  1795. *@par Third-party framework compatibility
  1796. *Compatible with the TensorFlow operator ResourceAccumulatorSetGlobalStep. \n
  1797. *@par Restrictions:
  1798. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1799. */
  1800. REG_OP(ResourceAccumulatorSetGlobalStep)
  1801. .INPUT(handle, TensorType({DT_RESOURCE}))
  1802. .INPUT(new_global_step, TensorType({DT_INT64}))
  1803. .OP_END_FACTORY_REG(ResourceAccumulatorSetGlobalStep)
  1804. /**
  1805. *@brief Extracts the average gradient in the given ConditionalAccumulator. \n
  1806. *@par Inputs:
  1807. * @li handle: The handle to an accumulator.
  1808. * @li num_required: Number of gradients required before an aggregate is returned. \n
  1809. *@par Attributes:
  1810. *dtype: The data type of accumulated gradients.
  1811. Needs to correspond to the type of the accumulator. \n
  1812. *@par Outputs:
  1813. *average: The average of the accumulated gradients.
  1814. Must be one of the following types:
  1815. DT_FLOAT16, DT_FLOAT, DT_DOUBLE. \n
  1816. *@attention Constraints:
  1817. *ResourceAccumulatorTakeGradient runs on the Ascend AI CPU, which delivers poor performance. \n
  1818. *@par Third-party framework compatibility
  1819. *Compatible with the TensorFlow operator ResourceAccumulatorTakeGradient. \n
  1820. *@par Restrictions:
  1821. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1822. */
  1823. REG_OP(ResourceAccumulatorTakeGradient)
  1824. .INPUT(handle, TensorType({DT_RESOURCE}))
  1825. .INPUT(num_required, TensorType({DT_INT32}))
  1826. .OUTPUT(average, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  1827. .REQUIRED_ATTR(dtype, Type)
  1828. .OP_END_FACTORY_REG(ResourceAccumulatorTakeGradient)
  1829. /**
  1830. *@brief Enqueue a Tensor on the computation outfeed. \n
  1831. *@par Inputs:
  1832. *Inputs include:
  1833. *x: A Tensor. Must be one of the following types: float16, float32,
  1834. float64, int8, int16, uint16, uint8, int32, int64, uint32, uint64,
  1835. bool, double, string. It's a dynamic input. \n
  1836. *@par Attributes:
  1837. *channel_name: name of operator channel, default "". \n
  1838. *@attention Constraints:
  1839. *The implementation for OutfeedEnqueueOp on Ascend uses AICPU, with bad performance.
  1840. *@par Third-party framework compatibility
  1841. *@li compatible with tensorflow OutfeedEnqueueOp operator.
  1842. */
  1843. REG_OP(OutfeedEnqueueOp)
  1844. .DYNAMIC_INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8,
  1845. DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_UINT32,
  1846. DT_UINT64, DT_BOOL, DT_DOUBLE, DT_STRING}))
  1847. .ATTR(channel_name, String, "")
  1848. .OP_END_FACTORY_REG(OutfeedEnqueueOp)
  1849. /**
  1850. *@brief Enqueue a Tensor on the computation outfeed. \n
  1851. *@par Inputs:
  1852. *Inputs include:
  1853. *x: A Tensor. Must be one of the following types: float16, float32,
  1854. float64, int8, int16, uint16, uint8, int32, int64, uint32, uint64,
  1855. bool, double, string. It's a dynamic input. \n
  1856. *tensor_name: A Tensor. Must be string types. \n
  1857. *@par Attributes:
  1858. *channel_name: name of operator channel, default "". \n
  1859. *@attention Constraints:
  1860. *The implementation for OutfeedEnqueueOpV2 on Ascend uses AICPU, with bad performance.
  1861. *@par Third-party framework compatibility
  1862. *@li compatible with tensorflow OutfeedEnqueueOpV2 operator.
  1863. */
  1864. REG_OP(OutfeedEnqueueOpV2)
  1865. .DYNAMIC_INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8,
  1866. DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_UINT32,
  1867. DT_UINT64, DT_BOOL, DT_DOUBLE, DT_STRING}))
  1868. .INPUT(tensor_name, TensorType({DT_STRING}))
  1869. .ATTR(channel_name, String, "")
  1870. .OP_END_FACTORY_REG(OutfeedEnqueueOpV2)
  1871. /**
  1872. *@brief LruCache, create cache resource.
  1873. *@par Inputs:
  1874. *No input.
  1875. *@par Attributes:
  1876. *cache_size: cache size An optional "int64". Defaults to "100000".
  1877. *load_factor: rate which show if cache is full An optional "float", Defaults to "1".
  1878. *@par Outputs:
  1879. *cache: cache resource.
  1880. *@par Restrictions:
  1881. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1882. */
  1883. REG_OP(LruCache)
  1884. .OUTPUT(cache, TensorType({DT_RESOURCE}))
  1885. .ATTR(container, String, "")
  1886. .ATTR(shared_name, String, "LruCache")
  1887. .ATTR(cache_size, Int, 100000)
  1888. .ATTR(load_factor, Float, 1)
  1889. .REQUIRED_ATTR(dtype, Type)
  1890. .OP_END_FACTORY_REG(LruCache)
  1891. /**
  1892. *@brief CacheAdd, get id new come in cache and id get out of cache.
  1893. *@par Inputs:
  1894. *cache: resource data
  1895. *ids: Tensor stored id need to insert cache
  1896. *@par Outputs:
  1897. *swap_in_id: id come in cache.
  1898. *swap_in_idx: id in cache which come in cache
  1899. *swap_out_id: id get out of cache
  1900. *swap_out_idx: id in cache which get out of cache
  1901. *@par Restrictions:
  1902. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1903. */
  1904. REG_OP(CacheAdd)
  1905. .INPUT(cache, TensorType({DT_RESOURCE}))
  1906. .INPUT(ids, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32}))
  1907. .OUTPUT(swap_in_id, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32}))
  1908. .OUTPUT(swap_in_idx, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32}))
  1909. .OUTPUT(swap_out_id, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32}))
  1910. .OUTPUT(swap_out_idx, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32}))
  1911. .OP_END_FACTORY_REG(CacheAdd)
  1912. /**
  1913. *@brief CacheRemoteToLocalIndex, get id in cache from id.
  1914. *@par Inputs:
  1915. *cache: resource data
  1916. *ids: Tensor stored id need to insert cache
  1917. *@par Outputs:
  1918. *local_idx: id in cache.
  1919. *@par Restrictions:
  1920. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1921. */
  1922. REG_OP(CacheRemoteIndexToLocal)
  1923. .INPUT(cache, TensorType({DT_RESOURCE}))
  1924. .INPUT(ids, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32}))
  1925. .OUTPUT(local_idx, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32}))
  1926. .OP_END_FACTORY_REG(CacheRemoteIndexToLocal)
  1927. /**
  1928. *@brief CacheAllToLocalIndex, get id in cache
  1929. *@par Inputs:
  1930. *cache: resource data
  1931. *local_idx: id in cache.
  1932. *@par Restrictions:
  1933. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1934. */
  1935. REG_OP(CacheAllIndexToLocal)
  1936. .INPUT(cache, TensorType({DT_RESOURCE}))
  1937. .OUTPUT(local_idx, TensorType({DT_INT64, DT_INT32, DT_UINT64, DT_UINT32}))
  1938. .REQUIRED_ATTR(dtype, Type)
  1939. .OP_END_FACTORY_REG(CacheAllIndexToLocal)
  1940. /**
  1941. *@brief LRUCacheV2, aicore LRUCache.
  1942. *@par Inputs:
  1943. *index_list: exchange index list
  1944. *data: host data
  1945. *cache: gm cache
  1946. *tag: cache's tag
  1947. *is_last_call: if is last call write all cache to data
  1948. *@par Outputs:
  1949. *data: output data
  1950. *cache: gm cache
  1951. *tag: cache's tag
  1952. *index_offset_list: index_offset_list
  1953. *not_in_cache_index_list: output not in cache's index_list
  1954. *not_in_cache_number: scalar
  1955. *@par Attributes:
  1956. *pre_route_count: types of all outputs
  1957. *@par Restrictions:
  1958. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1959. */
  1960. REG_OP(LRUCacheV2)
  1961. .INPUT(index_list, TensorType::BasicType())
  1962. .INPUT(data, TensorType::BasicType())
  1963. .INPUT(cache, TensorType::BasicType())
  1964. .INPUT(tag, TensorType::BasicType())
  1965. .INPUT(is_last_call, TensorType::BasicType())
  1966. .OUTPUT(data, TensorType::BasicType())
  1967. .OUTPUT(cache, TensorType::BasicType())
  1968. .OUTPUT(tag, TensorType::BasicType())
  1969. .OUTPUT(index_offset_list, TensorType::BasicType())
  1970. .OUTPUT(not_in_cache_index_list, TensorType::BasicType())
  1971. .OUTPUT(not_in_cache_number, TensorType::BasicType())
  1972. .REQUIRED_ATTR(pre_route_count, Int)
  1973. .OP_END_FACTORY_REG(LRUCacheV2)
  1974. /**
  1975. *@brief DynamicGetNext, dynamic get next data
  1976. *@par Inputs:
  1977. *x: the iterator, all types are available
  1978. *@par Outputs:
  1979. *y: the date in iterator, all types are available
  1980. *@par Attributes:
  1981. *output_types: types of all outputs
  1982. *output_shapes: shapes of all outputs
  1983. *_dynamic_graph_execute_mode: dynamic graph execution mode,
  1984. value is one of lazy_recompile and dynamic_execute
  1985. *_getnext_inputs_shape_range: shape ranges of outputs,
  1986. it works where _dynamic_graph_execute_mode is dynamic_execute
  1987. *@par Restrictions:
  1988. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1989. */
  1990. REG_OP(DynamicGetNext)
  1991. .INPUT(x, TensorType::ALL())
  1992. .DYNAMIC_OUTPUT(y, TensorType::ALL())
  1993. .ATTR(output_types, ListType, {})
  1994. .ATTR(output_shapes, ListListInt, {{}, {}})
  1995. .ATTR(_dynamic_graph_execute_mode, String, "lazy_recompile")
  1996. .ATTR(_getnext_inputs_shape_range, String, "")
  1997. .OP_END_FACTORY_REG(DynamicGetNext)
  1998. /**
  1999. @brief DynamicGetNextV2, dynamic get next data
  2000. * @par Inputs:
  2001. *x: the iterator, all types are available
  2002. * @par Outputs:
  2003. * y: the date in iterator, all types are available
  2004. * @par Attributes:
  2005. * output_types: types of all outputs
  2006. * output_shapes: shapes of all outputs
  2007. *_dynamic_graph_execute_mode: dynamic graph execution mode,
  2008. value is one of lazy_recompile and dynamic_execute
  2009. *_getnext_inputs_shape_range: shape ranges of outputs,
  2010. it works where _dynamic_graph_execute_mode is dynamic_execute
  2011. *@par Restrictions:
  2012. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  2013. */
  2014. REG_OP(DynamicGetNextV2)
  2015. .DYNAMIC_OUTPUT(y, TensorType::ALL())
  2016. .ATTR(output_types, ListType, {})
  2017. .ATTR(channel_name, String, "")
  2018. .ATTR(output_shapes, ListListInt, {{}, {}})
  2019. .ATTR(_dynamic_graph_execute_mode, String, "lazy_recompile")
  2020. .ATTR(_getnext_inputs_shape_range, String, "")
  2021. .OP_END_FACTORY_REG(DynamicGetNextV2)
  2022. /**
  2023. *@brief AdpGetNext
  2024. *@par Outputs:
  2025. *y: the data in iterator, all types are available
  2026. *@par Attributes:
  2027. *output_types: types of all outputs
  2028. *output_shapes: shapes of all outputs
  2029. *queue_name: cdqm queue name
  2030. *@par Restrictions:
  2031. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  2032. */
  2033. REG_OP(AdpGetNext)
  2034. .DYNAMIC_OUTPUT(y, TensorType::ALL())
  2035. .ATTR(output_types, ListType, {})
  2036. .ATTR(output_shapes, ListListInt, {{}, {}})
  2037. .ATTR(queue_name, String, "")
  2038. .OP_END_FACTORY_REG(AdpGetNext)
  2039. /**
  2040. *@brief GetNextV2
  2041. *@par Outputs:
  2042. *y: the data in iterator, all types are available
  2043. *@par Attributes:
  2044. *output_types: types of all outputs
  2045. *output_shapes: shapes of all outputs
  2046. *queue_name: cdqm queue name
  2047. *@par Restrictions:
  2048. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  2049. */
  2050. REG_OP(GetNextV2)
  2051. .DYNAMIC_OUTPUT(y, TensorType::ALL())
  2052. .ATTR(output_types, ListType, {})
  2053. .ATTR(output_shapes, ListListInt, {{}, {}})
  2054. .ATTR(channel_name, String, "")
  2055. .OP_END_FACTORY_REG(GetNextV2)
  2056. /**
  2057. *@brief GetNextFromQueue
  2058. *@par Inputs:
  2059. *x: the data, only support uint8
  2060. *@par Outputs:
  2061. *y: the data in iterator, all types are available
  2062. *@par Attributes:
  2063. *output_types: types of all outputs
  2064. *output_shapes: shapes of all outputs
  2065. *@par Restrictions:
  2066. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  2067. */
  2068. REG_OP(GetNextFromQueue)
  2069. .INPUT(x, TensorType({DT_UINT8}))
  2070. .DYNAMIC_OUTPUT(y, TensorType::ALL())
  2071. .ATTR(output_types, ListType, {})
  2072. .ATTR(output_shapes, ListListInt, {{}, {}})
  2073. .OP_END_FACTORY_REG(GetNextFromQueue)
  2074. /**
  2075. *@brief Get the batch of data in data processing . \n
  2076. *@par Attributes:
  2077. *@li output_types: A nested structure of DType objects corresponding to each
  2078. component of an element of this dataset.
  2079. *@li output_shapes: A nested structure of TensorShape objects corresponding
  2080. to each component of an element of this dataset.
  2081. *@li channel_name: A string. Default "" . \n
  2082. *@par Outputs:
  2083. *y:A nested structure of Tensor objects . \n
  2084. *@par Third-party framework compatibility
  2085. *Compatible with tensorflow GetNext operator
  2086. */
  2087. REG_OP(PeekData)
  2088. .DYNAMIC_OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, DT_INT64, DT_UINT32, DT_UINT64,
  2089. DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_BOOL}))
  2090. .ATTR(output_types, ListType, {})
  2091. .ATTR(output_shapes, ListListInt, {})
  2092. .ATTR(channel_name, String, "")
  2093. .OP_END_FACTORY_REG(PeekData)
  2094. /**
  2095. * @brief OptionalGetValue
  2096. * @par Inputs:
  2097. * optional: A tensor of type variant
  2098. * @par Outputs:
  2099. * components: A list of Tensor objects of output_types
  2100. * @par Attributes:
  2101. * output_types: types of all outputs
  2102. * output_shapes: shapes of all outputs
  2103. * @par Restrictions:
  2104. * Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  2105. */
  2106. REG_OP(OptionalGetValue)
  2107. .INPUT(optional, TensorType({DT_VARIANT}))
  2108. .DYNAMIC_OUTPUT(components, TensorType::BasicType())
  2109. .REQUIRED_ATTR(output_types, ListType)
  2110. .REQUIRED_ATTR(output_shapes, ListListInt)
  2111. .OP_END_FACTORY_REG(OptionalGetValue)
  2112. /**
  2113. * @brief User define function process. \n
  2114. * @par Inputs:
  2115. * @li x: A list of input tensor objects. It's a dynamic input. \n
  2116. * @par Outputs:
  2117. * @li y: A list of output tensor objects. It's a dynamic output. \n
  2118. * @par Attributes:
  2119. * @li bin_path: User's binary path.
  2120. * @li func_name: User defined function name.
  2121. * @li output_types: Types of outputs data.
  2122. * @li output_shapes: Shapes of outputs data.
  2123. * @li _flow_attr_process_node_engine_id: Default process node engine of FlowFunc.
  2124. */
  2125. REG_OP(FlowFunc)
  2126. .DYNAMIC_INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, \
  2127. DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  2128. .DYNAMIC_OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, \
  2129. DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  2130. .REQUIRED_ATTR(bin_path, String)
  2131. .REQUIRED_ATTR(func_name, String)
  2132. .ATTR(output_shapes, ListListInt, {})
  2133. .REQUIRED_ATTR(output_types, ListType)
  2134. .OP_END_FACTORY_REG(FlowFunc)
  2135. /**
  2136. * @brief ouputs a tensor copy from the tensor at 'position' in input_sequence. \n
  2137. * @par Inputs:
  2138. * @li handle: the handle of sequence.
  2139. * @li index: position of the tensor in the sequence. negative value means
  2140. * counting position from back, accepted range in [-n, n - 1],
  2141. * where n is the number of tensors in sequence,
  2142. * it must be a scalar(tensor of empty shape), it is scalar. \n
  2143. * @par Outputs:
  2144. * @li y: output tensor at the specified position in the input sequence. \n
  2145. */
  2146. REG_OP(SequenceAt)
  2147. .INPUT(handle, TensorType({DT_RESOURCE}))
  2148. .INPUT(index, TensorType({DT_INT32, DT_INT64}))
  2149. .OUTPUT(y, TensorType({DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, DT_INT16, \
  2150. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_BOOL, DT_COMPLEX64, \
  2151. DT_COMPLEX128}))
  2152. .OP_END_FACTORY_REG(SequenceAt)
  2153. /**
  2154. * @brief constrct a tensor sequence cotaining 'input' tensors,
  2155. * all tensors in 'inputs' must have the same data type. \n
  2156. * @par Inputs:
  2157. * @li inputs: A list of input tensor objects. It's a dynamic input. \n
  2158. * @par Outputs:
  2159. * @li handle: Sequence enclosing the input tensors. \n
  2160. */
  2161. REG_OP(SequenceConstruct)
  2162. .DYNAMIC_INPUT(inputs, TensorType({DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, \
  2163. DT_INT16, DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_BOOL, DT_COMPLEX64, \
  2164. DT_COMPLEX128}))
  2165. .OUTPUT(handle, TensorType({DT_RESOURCE}))
  2166. .OP_END_FACTORY_REG(SequenceConstruct)
  2167. /**
  2168. * @brief construct an empty tensor sequence, with given data type. \n
  2169. * @par Outputs:
  2170. * @li handle: empty sequence. \n
  2171. * @par Attributes:
  2172. * @li dtype: the data type of the tensors in the output sequence,
  2173. * the default value is float. \n
  2174. */
  2175. REG_OP(SequenceEmpty)
  2176. .OUTPUT(handle, TensorType({DT_RESOURCE}))
  2177. .ATTR(dtype, Type, DT_FLOAT)
  2178. .OP_END_FACTORY_REG(SequenceEmpty)
  2179. /**
  2180. * @brief ouputs a tensor sequence that remove the tensor at 'position' from input_sequence. \n
  2181. * @par Inputs:
  2182. * @li handle: the handle of sequence.
  2183. * @li index: position of the tensor in the sequence. negative value means
  2184. * counting position from back, accepted range in [-n, n - 1],
  2185. * where n is the number of tensors in sequence,
  2186. * it must be a scalar(tensor of empty shape), it is scalar. \n
  2187. * @par Outputs:
  2188. * @li handle_y: the handle of the sequence that has the tensor
  2189. * at the specified position removed. \n
  2190. */
  2191. REG_OP(SequenceErase)
  2192. .INPUT(handle, TensorType({DT_RESOURCE}))
  2193. .OPTIONAL_INPUT(index, TensorType({DT_INT32, DT_INT64}))
  2194. .OUTPUT(handle_y, TensorType({DT_RESOURCE}))
  2195. .OP_END_FACTORY_REG(SequenceErase)
  2196. /**
  2197. * @brief ouputs a tensor sequence that insert tensor into sequence at 'position',
  2198. * tensor must have the same data type as input_sequence. \n
  2199. * @par Inputs:
  2200. * @li handle: the handle of sequence.
  2201. * @li value: tensor to be inserted into the input sequence. \n
  2202. * @li index: position of the tensor in the sequence. negative value means
  2203. * counting position from back, accepted range in [-n, n - 1],
  2204. * where n is the number of tensors in sequence,
  2205. * it must be a scalar(tensor of empty shape), it is scalar. \n
  2206. * @par Outputs:
  2207. * @li handle_y: output sequence that contains the inserted tensor
  2208. * at the given positon. \n
  2209. */
  2210. REG_OP(SequenceInsert)
  2211. .INPUT(handle, TensorType({DT_RESOURCE}))
  2212. .INPUT(value, TensorType({DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, \
  2213. DT_INT16, DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_BOOL, DT_COMPLEX64, \
  2214. DT_COMPLEX128}))
  2215. .OPTIONAL_INPUT(index, TensorType({DT_INT32, DT_INT64}))
  2216. .OUTPUT(handle_y, TensorType({DT_RESOURCE}))
  2217. .OP_END_FACTORY_REG(SequenceInsert)
  2218. /**
  2219. * @brief produces a scalar(tensor of empty shape) containing the number
  2220. * of tensors in input_sequence. \n
  2221. * @par Inputs:
  2222. * @li handle: the handle of sequence. \n
  2223. * @par Outputs:
  2224. * @li length: length of input sequence, it must be a scalar (tensor of empty shape) \n
  2225. */
  2226. REG_OP(SequenceLength)
  2227. .INPUT(handle, TensorType({DT_RESOURCE}))
  2228. .OUTPUT(length, TensorType({DT_INT64}))
  2229. .OP_END_FACTORY_REG(SequenceLength)
  2230. /**
  2231. * @brief split a tensor into a sequence of tensors, along the specified axis,
  2232. * length of the parts can be specified using argument 'split'. \n
  2233. * @par Inputs:
  2234. * @li x: the tensor to split. \n
  2235. * @li split: length of each output, it cat be either a scalar or 1-D tensor,
  2236. * all value must be >= 0. if split is a scalar, then input will be split into
  2237. * equally sized chunks, last chunk will be smaller if input size along the given
  2238. * axis is not divisible by split, otherwise the tensor is split into size(split)
  2239. * chunks, with lengths of the parts on axis specified in split. in the scenario,
  2240. * the sum of entries in split must be equal to the dimision size of input tensor
  2241. * on axis \n
  2242. * @par Outputs:
  2243. * @li handle: one or more outputs forming a sequence of tensor after spliting. \n
  2244. * @par Attributes:
  2245. * @li axis: which axis to split on, a negative value means counting dimensions from
  2246. * the back, accepted range is [-rank, rank - 1].
  2247. * @li keep_dims: keep the split dimension or not, default 1,which means we keep split
  2248. * dimension,if input 'split' is specified, this attribute is ignored.
  2249. */
  2250. REG_OP(SplitToSequence)
  2251. .INPUT(x, TensorType({DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, \
  2252. DT_INT16, DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_BOOL, DT_COMPLEX64, \
  2253. DT_COMPLEX128}))
  2254. .OPTIONAL_INPUT(split, TensorType({DT_INT32, DT_INT64}))
  2255. .OUTPUT(handle, TensorType({DT_RESOURCE}))
  2256. .ATTR(axis, Int, 0)
  2257. .ATTR(keepdims, Bool, true)
  2258. .OP_END_FACTORY_REG(SplitToSequence)
  2259. /**
  2260. * @brief concatenate a sequence of tensors into a single tensor, all input tensors must
  2261. * have the same shape, except for the dimension size of the axis to concatenate on. by default
  2262. * new_axis is 0, the behavior is similar to numpy.concatenate. when new_axis is 1, the behavior
  2263. * is similar to numpy.stack. \n
  2264. * @par Inputs:
  2265. * @li handle: sequence of tensors for concatenation. \n
  2266. * @par Outputs:
  2267. * @li y: concatenated tensor. \n
  2268. * @par Attributes:
  2269. * @li axis: which axis to concat on, accepted range in [-r, r - 1], where r is the rank of input
  2270. * tensor, when new_axis is 1,accepted range is [-r - 1, r]
  2271. * @li new_axis: insert and concatnate on a new axis or not. default 0 means do not insert new axis.
  2272. */
  2273. REG_OP(ConcatFromSequence)
  2274. .INPUT(handle, TensorType({DT_RESOURCE}))
  2275. .OUTPUT(y, TensorType({DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, \
  2276. DT_INT16, DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_BOOL, DT_COMPLEX64, \
  2277. DT_COMPLEX128}))
  2278. .REQUIRED_ATTR(axis, Int)
  2279. .ATTR(new_axis, Int, 0)
  2280. .OP_END_FACTORY_REG(ConcatFromSequence)
  2281. } // namespace ge
  2282. #endif // OPS_BUILT_IN_OP_PROTO_INC_DATA_FLOW_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示