You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

selection_ops.h 93 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
3 years ago
3 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file selection_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_SELECTION_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_SELECTION_OPS_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. *@brief Creates a sequence of numbers . \n
  26. *@par Inputs:
  27. *Three inputs, including:
  28. * @li start: A 0D Tensor (scalar). Acts as first entry in the range if "limit"
  29. * is not "None"; otherwise, acts as range limit and first entry defaults to "0".
  30. * The supported types are: float32, int32, double, int64.
  31. * @li limit: A 0D Tensor (scalar). Upper limit of sequence, exclusive. If "None",
  32. * defaults to the value of "start" while the first entry of the range
  33. * defaults to "0". The supported types are: float32, int32, double, int64.
  34. * @li delta: A 0D Tensor (scalar). Number that increments "start".
  35. * Defaults to "1". The supported types are: float32, int32, double, int64 . \n
  36. *@par Outputs:
  37. *y: A 1D Tensor . \n
  38. *@par Third-party framework compatibility
  39. *Compatible with the TensorFlow operator Range.
  40. */
  41. REG_OP(Range)
  42. .INPUT(start, TensorType({DT_FLOAT,DT_INT32,DT_DOUBLE,DT_INT64}))
  43. .INPUT(limit, TensorType({DT_FLOAT,DT_INT32,DT_DOUBLE,DT_INT64}))
  44. .INPUT(delta, TensorType({DT_FLOAT,DT_INT32,DT_DOUBLE,DT_INT64}))
  45. .OUTPUT(y, TensorType({DT_FLOAT,DT_INT32,DT_DOUBLE,DT_INT64}))
  46. .OP_END_FACTORY_REG(Range)
  47. /**
  48. *@brief: Creates a sequence of numbers . \n
  49. *@par Inputs:
  50. *Four inputs, including:
  51. * @li x: A 1D Tensor of type float32 or int32. The assistant data.
  52. * @li start: A 0D Tensor (scalar) of type float32 or int32. Acts as first entry in the range if "limit"
  53. * is not "None"; otherwise, acts as range limit and first entry defaults to "0".
  54. * @li limit: A 0D Tensor (scalar) of type float32 or int32.
  55. * Upper limit of sequence, exclusive. If "None",
  56. * defaults to the value of "start" while the first entry of the range
  57. * defaults to "0".
  58. * @li delta: A 0D Tensor (scalar) of type float32 or int32.
  59. * Number that increments "start". Defaults to "1" . \n
  60. *@par Outputs:
  61. *y: A 1D Tensor . \n
  62. *@par Quantization supported or not
  63. *Not supported
  64. *@par Quantized inference supported or not
  65. *Not supported
  66. *@par Multiple batches supported or not
  67. *Supported
  68. *@see Range()
  69. *@since V100R001C33
  70. *
  71. * @par Restrictions:
  72. * Warning: THIS FUNCTION IS DEPRECATED. Please use Range instead.
  73. */
  74. REG_OP(RangeD)
  75. .INPUT(x, TensorType({DT_FLOAT,DT_INT32}))
  76. .OUTPUT(y, TensorType({DT_FLOAT,DT_INT32}))
  77. .REQUIRED_ATTR(start, Float)
  78. .REQUIRED_ATTR(limit, Float)
  79. .REQUIRED_ATTR(delta, Float)
  80. .OP_END_FACTORY_REG(RangeD)
  81. /**
  82. *@brief Constructs a tensor by tiling a given tensor . \n
  83. *@par Inputs:
  84. *Two inputs, including:
  85. * @li x: A Tensor.
  86. * Must be one of the following types: float16, float32, double, int64, int32, uint8, uint16,
  87. uint32, uint64, int8, int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32.
  88. * @li multiples: A 1D Tensor of type int32 or int64.
  89. * The length must be the same as the number of dimensions in "input"
  90. *@par Outputs:
  91. *y: A Tensor. Has the same type as "x" . \n
  92. *@see TileD()
  93. *@par Third-party framework compatibility
  94. *Compatible with the TensorFlow operator Tile.
  95. */
  96. REG_OP(Tile)
  97. .INPUT(x, TensorType::BasicType())
  98. .INPUT(multiples, TensorType::IndexNumberType())
  99. .OUTPUT(y, TensorType::BasicType())
  100. .OP_END_FACTORY_REG(Tile)
  101. /**
  102. *@brief Constructs a tensor by tiling a given tensor . \n
  103. *@par Inputs:
  104. *x: A Tensor. Must be one of the following types: float32, float16, int32 . \n
  105. *@par Attributes:
  106. *multiples: A required Tensor of type int32 or int64.
  107. * Number of replication times . \n
  108. *@par Outputs:
  109. *y: A Tensor. Has the same type as "x" . \n
  110. *@see Tile()
  111. *@par Third-party framework compatibility
  112. *Compatible with the TensorFlow operator Tile.
  113. *@par Restrictions:
  114. *Warning: THIS FUNCTION IS DEPRECATED. Please use Tile instead.
  115. */
  116. REG_OP(TileD)
  117. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
  118. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
  119. .REQUIRED_ATTR(multiples, ListInt)
  120. .OP_END_FACTORY_REG(TileD)
  121. /**
  122. * @brief Gather slices from "x" into a tensor with shape specified by
  123. * "indices". "indices" is an K-dimensional integer tensor, best thought of as a
  124. * (K-1)-dimensional tensor of "indices" into "params", where each element
  125. * defines a slice of "params":
  126. * output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]]
  127. * "indices" defines slices into the first N dimensions of
  128. * "params", where
  129. * N = indices.shape[-1]
  130. * indices = [[0, 0], [1, 1]]
  131. * x = [['a', 'b'], ['c', 'd']]
  132. * output = ['a', 'd']
  133. * @par Inputs:
  134. * @li x: A Tensor of type BasicType.
  135. * @li indices: A Tensor of type IndexNumberType . \n
  136. * @par Outputs:
  137. * y: A Tensor of type BasicType.
  138. * @see GatherNd()
  139. * @attention Constraints:
  140. * @li "x" is one of the following types: float16, float32, double, int32,
  141. * uint8, int16, int8, complex64, int64, qint8, quint8, qint32, uint16,
  142. * complex128, uint32, uint64 . \n
  143. * @par Third-party framework compatibility
  144. * Compatible with the TensorFlow operator GatherNd.
  145. */
  146. REG_OP(GatherNd)
  147. .INPUT(x, TensorType::BasicType())
  148. .INPUT(indices, TensorType::IndexNumberType())
  149. .OUTPUT(y, TensorType::BasicType())
  150. .OP_END_FACTORY_REG(GatherNd)
  151. /**
  152. *@brief Gather slices from "x" according to "indices" by corresponding axis .
  153. *@par Inputs:
  154. *Three inputs, including:
  155. * @li x: A Tensor. Must be one of the following types: float32, float64, int32,
  156. * uint8, int16, int8, int64, qint8, quint8, qint32, qint16, quint16,
  157. * uint16, complex128, float16, uint32, uint64, complex64, complex128.
  158. * @li indices: A Tensor of type int32 or int64.
  159. * @li axis: A Tensor of type as int32 or int64,
  160. * Must be in the range [-rank(input_tensor), rank(input_tensor)) .
  161. *@par Attributes:
  162. * batch_dims: An optional int. Defaults to 0.
  163. *@par Outputs:
  164. *y: A Tensor. Has the same type as "x" .
  165. *@attention Constraints:
  166. *Value in indices must be in range [0, x.shape[axis])
  167. *@par Third-party framework compatibility
  168. * Compatible with the TensorFlow operator GatherV2 .
  169. */
  170. REG_OP(GatherV2)
  171. .INPUT(x, TensorType::BasicType())
  172. .INPUT(indices, TensorType::IndexNumberType())
  173. .INPUT(axis, TensorType::IndexNumberType())
  174. .OUTPUT(y, TensorType::BasicType())
  175. .ATTR(batch_dims, Int, 0)
  176. .OP_END_FACTORY_REG(GatherV2)
  177. /**
  178. *@brief Gather slices from "x" according to "indices" by corresponding axis . \n
  179. *@par Inputs:
  180. *Two inputs, including:
  181. * @li x: A Tensor. Must be one of the following types: float32, float16, int32, uint32, int8, uint8,
  182. * int16, uint16, int64, uint64.
  183. * @li indices: A Tensor of type int32 or int64 . \n
  184. *@par Attributes:
  185. *axis: A int32 specifying the axis to gather from . \n
  186. *@par Outputs:
  187. *y: A Tensor. Has the same type as "x" . \n
  188. *@attention Constraints:
  189. *@par Third-party framework compatibility
  190. * Compatible with the TensorFlow operator GatherV2.
  191. *
  192. * @par Restrictions:
  193. * Warning: THIS FUNCTION IS DEPRECATED. Please use GatherV2 instead.
  194. */
  195. REG_OP(GatherV2D)
  196. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_UINT32, DT_INT8, DT_UINT8,
  197. DT_INT16, DT_UINT16, DT_INT64, DT_UINT64}))
  198. .INPUT(indices, TensorType::IndexNumberType())
  199. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_UINT32, DT_INT8, DT_UINT8,
  200. DT_INT16, DT_UINT16, DT_INT64, DT_UINT64}))
  201. .REQUIRED_ATTR(axis, Int)
  202. .OP_END_FACTORY_REG(GatherV2D)
  203. /**
  204. *@Gathers values along an axis specified by dim . \n
  205. *@par Inputs:
  206. *@li x: A Tensor. Must be one of the following types: float16, float32, int32, int64.
  207. *@li index: A Tensor. Must be one of the following types: int64 . \n
  208. *@par Attributes:
  209. * dim: the axis along which to index . \n
  210. *@par Outputs:
  211. * y: A Tensor. Has the same type as "x" . \n
  212. *@par Third-party framework compatibility
  213. *Compatible with the PyTorch operator Gather.
  214. */
  215. REG_OP(GatherElements)
  216. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT64}))
  217. .INPUT(index, TensorType({DT_INT32, DT_INT64}))
  218. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT64}))
  219. .ATTR(dim, Int, 0)
  220. .OP_END_FACTORY_REG(GatherElements)
  221. /**
  222. *@brief Extracts a strided slice of a tensor. Roughly speaking, this op
  223. extracts a slice of size (end-begin)/stride from the given input tensor.
  224. Starting at the location specified by begin the slice continues by
  225. adding stride to the index until all dimensions are not less than end.
  226. *@par Inputs:
  227. *Four inputs, including:
  228. * @li x: A Tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8,
  229. * complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16,
  230. * complex128, float16, uint32, uint64, complex64, complex128.
  231. * @li begin: A Tensor of type int32 or int64, for the index of the first value to select . \n
  232. * @li end: A Tensor of type int32 or int64, for the index of the last value to select . \n
  233. * @li strides: A Tensor of type int32 or int64, for the increment . \n
  234. *@par Attributes:
  235. * @li begin_mask: A Tensor of type int32.
  236. A bitmask where a bit "i" being "1" means to ignore the begin
  237. value and instead use the largest interval possible.
  238. * @li end_mask: A Tensor of type int32.
  239. Analogous to "begin_mask".
  240. * @li ellipsis_mask: A Tensor of type int32.
  241. A bitmask where bit "i" being "1" means the "i"th position
  242. is actually an ellipsis.
  243. * @li new_axis_mask: A Tensor of type int32.
  244. A bitmask where bit "i" being "1" means the "i"th
  245. specification creates a new shape 1 dimension.
  246. * @li shrink_axis_mask: A Tensor of type int32.
  247. A bitmask where bit "i" implies that the "i"th
  248. specification should shrink the dimensionality . \n
  249. *@par Outputs:
  250. *y: A Tensor. Has the same type as "x" . \n
  251. *@par Third-party framework compatibility
  252. * Compatible with the TensorFlow operator StridedSlice.
  253. */
  254. REG_OP(StridedSlice)
  255. .INPUT(x, TensorType::BasicType())
  256. .INPUT(begin, TensorType::IndexNumberType())
  257. .INPUT(end, TensorType::IndexNumberType())
  258. .INPUT(strides, TensorType::IndexNumberType())
  259. .ATTR(begin_mask, Int, 0)
  260. .ATTR(end_mask, Int, 0)
  261. .ATTR(ellipsis_mask, Int, 0)
  262. .ATTR(new_axis_mask, Int, 0)
  263. .ATTR(shrink_axis_mask, Int, 0)
  264. .OUTPUT(y, TensorType::BasicType())
  265. .OP_END_FACTORY_REG(StridedSlice)
  266. /**
  267. *@brief Extracts a strided slice of a tensor. Roughly speaking, this op
  268. extracts a slice of size "(end-begin)/stride" from the given input tensor.
  269. Starting at the location specified by "begin" the slice continues by
  270. adding "stride" to the index until all dimensions are not less than "end" . \n
  271. *@par Inputs:
  272. *x: A Tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8,
  273. * complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16,
  274. * complex128, float16, uint32, uint64, complex64, complex128 . \n
  275. *@par Attributes:
  276. * @li begin: A Tensor of type int32 or int64.
  277. The index of the first value to select.
  278. * @li end: A Tensor of type int32 or int64.
  279. The index of the last value to select.
  280. * @li strides: A Tensor of type int32 or int64, for the increment.
  281. * @li begin_mask: A Tensor of type int32.
  282. A bitmask where a bit "i" being "1" means to ignore the begin
  283. value and instead use the largest interval possible.
  284. * @li end_mask: Analogous to "begin_mask". A Tensor of type as int32.
  285. * @li ellipsis_mask: A Tensor of type int32.
  286. A bitmask where bit "i" being "1" means the "i"th position
  287. is actually an ellipsis.
  288. * @li new_axis_mask: A Tensor of type int32.
  289. A bitmask where bit "i" being "1" means the "i"th
  290. specification creates a new shape 1 dimension.
  291. * @li shrink_axis_mask: A Tensor of type int32.
  292. A bitmask where bit "i" implies that the "i"th
  293. specification should shrink the dimensionality . \n
  294. *@par Outputs:
  295. *y: A Tensor. Has the same type as "x" . \n
  296. *@par Third-party framework compatibility
  297. * Compatible with the TensorFlow operator StridedSlice.
  298. * @par Restrictions:
  299. * Warning: THIS FUNCTION IS DEPRECATED. Please use StridedSlice instead.
  300. */
  301. REG_OP(StridedSliceD)
  302. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_UINT8, DT_INT8,
  303. DT_BOOL}))
  304. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_UINT8, DT_INT8,
  305. DT_BOOL}))
  306. .REQUIRED_ATTR(begin, ListInt)
  307. .REQUIRED_ATTR(end, ListInt)
  308. .REQUIRED_ATTR(strides, ListInt)
  309. .ATTR(begin_mask, Int, 0)
  310. .ATTR(end_mask, Int, 0)
  311. .ATTR(ellipsis_mask, Int, 0)
  312. .ATTR(new_axis_mask, Int, 0)
  313. .ATTR(shrink_axis_mask, Int, 0)
  314. .OP_END_FACTORY_REG(StridedSliceD)
  315. /**
  316. *@brief Since StridedSlice cuts out pieces of its "input" which is size "dy",
  317. its gradient will have the same shape (which is passed here as "shape").
  318. The gradient will be zero in any element that the slice does not select . \n
  319. *@par Inputs:
  320. *dy: A Tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8,
  321. * complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16,
  322. * complex128, float16, uint32, uint64, complex64, complex128 . \n
  323. *@par Attributes:
  324. * @li shape: A Tensor of type int32 or int64.
  325. * @li begin: A Tensor of type int32 or int64.
  326. The index of the first value to select.
  327. * @li end: A Tensor of type int32 or int64.
  328. The index of the last value to select.
  329. * @li strides: A Tensor of type int32 or int64, for the increment.
  330. * @li begin_mask: A Tensor of type int32.
  331. A bitmask where a bit "i" being "1" means to ignore the begin
  332. value and instead use the largest interval possible.
  333. * @li end_mask: A Tensor of type int32.
  334. Analogous to "begin_mask".
  335. * @li ellipsis_mask: A Tensor of type int32.
  336. A bitmask where bit "i" being "1" means the "i"th position
  337. is actually an ellipsis.
  338. * @li new_axis_mask: A Tensor of type int32.
  339. A bitmask where bit "i" being "1" means the "i"th
  340. specification creates a new shape 1 dimension.
  341. * @li shrink_axis_mask: A Tensor of type int32.
  342. A bitmask where bit "i" implies that the "i"th
  343. specification should shrink the dimensionality . \n
  344. *@par Outputs:
  345. *output: A Tensor. Has the same type as "dy" . \n
  346. *@par Third-party framework compatibility
  347. * Compatible with the TensorFlow operator StridedSliceGradD.
  348. * @par Restrictions:
  349. * Warning: THIS FUNCTION IS DEPRECATED. Please use StridedSliceGrad instead.
  350. */
  351. REG_OP(StridedSliceGradD)
  352. .INPUT(dy, TensorType::BasicType())
  353. .OUTPUT(output, TensorType::BasicType())
  354. .REQUIRED_ATTR(shape, ListInt)
  355. .REQUIRED_ATTR(begin, ListInt)
  356. .REQUIRED_ATTR(end, ListInt)
  357. .REQUIRED_ATTR(strides, ListInt)
  358. .ATTR(begin_mask, Int, 0)
  359. .ATTR(end_mask, Int, 0)
  360. .ATTR(ellipsis_mask, Int, 0)
  361. .ATTR(new_axis_mask, Int, 0)
  362. .ATTR(shrink_axis_mask, Int, 0)
  363. .OP_END_FACTORY_REG(StridedSliceGradD)
  364. /**
  365. *@brief Since StridedSlice cuts out pieces of its "input" which is size "dy",
  366. its gradient will have the same shape (which is passed here as "shape").
  367. The gradient will be zero in any element that the slice does not select . \n
  368. *@par Inputs:
  369. *Five inputs, including:
  370. * @li shape: A Tensor of type int32 or int64.
  371. * @li begin: A Tensor of type int32 or int64.
  372. The index of the first value to select.
  373. * @li end: A Tensor of type int32 or int64.
  374. The index of the last value to select.
  375. * @li strides: A Tensor of type int32 or int64, for the increment.
  376. * @li dy: A Tensor. Must be one of the following types:
  377. * float32, float64, int32, uint8, int16, int8,
  378. * complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16,
  379. * complex128, float16, uint32, uint64, complex64, complex128 . \n
  380. *@par Attributes:
  381. * @li begin_mask: A Tensor of type int32.
  382. A bitmask where a bit "i" being "1" means to ignore the begin
  383. value and instead use the largest interval possible.
  384. * @li end_mask: A Tensor of type int32.
  385. Analogous to "begin_mask".
  386. * @li ellipsis_mask: A Tensor of type int32.
  387. A bitmask where bit "i" being "1" means the "i"th position
  388. is actually an ellipsis.
  389. * @li new_axis_mask: A Tensor of type int32.
  390. A bitmask where bit "i" being "1" means the "i"th
  391. specification creates a new shape 1 dimension.
  392. * @li shrink_axis_mask: A Tensor of type int32.
  393. A bitmask where bit "i" implies that the "i"th
  394. specification should shrink the dimensionality . \n
  395. *@par Outputs:
  396. *output: A Tensor has the same type as "dy" . \n
  397. *@par Third-party framework compatibility
  398. * Compatible with the TensorFlow operator StridedSliceGrad.
  399. */
  400. REG_OP(StridedSliceGrad)
  401. .INPUT(shape, TensorType::IndexNumberType())
  402. .INPUT(begin, TensorType::IndexNumberType())
  403. .INPUT(end, TensorType::IndexNumberType())
  404. .INPUT(strides, TensorType::IndexNumberType())
  405. .INPUT(dy, TensorType::BasicType())
  406. .OUTPUT(output, TensorType::BasicType())
  407. .ATTR(begin_mask, Int, 0)
  408. .ATTR(end_mask, Int, 0)
  409. .ATTR(ellipsis_mask, Int, 0)
  410. .ATTR(new_axis_mask, Int, 0)
  411. .ATTR(shrink_axis_mask, Int, 0)
  412. .OP_END_FACTORY_REG(StridedSliceGrad)
  413. /**
  414. *@brief Computes the sum along segments of a tensor . \n
  415. *@par Inputs:
  416. *Three inputs, including:
  417. * @li x: A Tensor of type NumberType.
  418. * @li segment_ids: A Tensor of type IndexNumberType, whose shape is a prefix
  419. * of "x.shape".
  420. * @li num_segments: A Tensor of type IndexNumberType . \n
  421. *@par Outputs:
  422. *y: A Tensor of type NumberType . \n
  423. *@par Third-party framework compatibility
  424. * Compatible with the TensorFlow operator UnsortedSegmentSum.
  425. */
  426. REG_OP(UnsortedSegmentSum)
  427. .INPUT(x, TensorType::NumberType())
  428. .INPUT(segment_ids, TensorType::IndexNumberType())
  429. .INPUT(num_segments, TensorType::IndexNumberType())
  430. .OUTPUT(y, TensorType::NumberType())
  431. .OP_END_FACTORY_REG(UnsortedSegmentSum)
  432. /**
  433. *@brief Creates a one-dimensional tensor of size steps whose values are evenly spaced from start to
  434. * end, inclusive, on a logarithmic scale with base base. \n
  435. *@par Inputs:
  436. *One inputs, including:
  437. * assist: A tensor. Must be one of the following types:
  438. * float16, float32. \n
  439. * @par Attributes:
  440. * @li start: An required float. Used to select the start. \n
  441. * @li end: An required float. Used to select the end. \n
  442. * @li steps: An optional int.Defaults to 100. \n
  443. * @li base: An optional float.Defaults to 10.0. \n
  444. * @li dtype: An optional int.Defaults to 1. \n
  445. *@par Outputs:
  446. *y: A Tensor with the same type and shape of input_x's. \n
  447. *@par Third-party framework compatibility
  448. *Compatible with the Pytorch operator logspaced. \n
  449. */
  450. REG_OP(LogSpaceD)
  451. .INPUT(assist, TensorType({DT_FLOAT, DT_FLOAT16}))
  452. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16}))
  453. .REQUIRED_ATTR (start, Float)
  454. .REQUIRED_ATTR (end, Float)
  455. .ATTR(steps, Int, 100)
  456. .ATTR(base, Float, 10.0)
  457. .ATTR(dtype, Int, 1)
  458. .OP_END_FACTORY_REG(LogSpaceD)
  459. /**
  460. *@brief Computes the sum along segments of a tensor . \n
  461. *@par Inputs:
  462. *Two inputs, including:
  463. * @li x: A Tensor of type float16, float32, int32, int8, uint8.
  464. * @li segment_ids: A Tensor of type int32, whose shape is a prefix
  465. * of "x.shape" . \n
  466. *@par Attributes:
  467. *num_segments: An int32, specifying the number of distinct segment IDs . \n
  468. *@par Outputs:
  469. *y: A Tensor with same type as "x" . \n
  470. *@par Third-party framework compatibility
  471. * Compatible with the TensorFlow operator UnsortedSegmentSum.
  472. * @par Restrictions:
  473. * Warning: THIS FUNCTION IS DEPRECATED. Please use UnsortedSegmentSum instead.
  474. */
  475. REG_OP(UnsortedSegmentSumD)
  476. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8, DT_UINT8}))
  477. .INPUT(segment_ids, TensorType({DT_INT32}))
  478. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8, DT_UINT8}))
  479. .REQUIRED_ATTR(num_segments, Int)
  480. .OP_END_FACTORY_REG(UnsortedSegmentSumD)
  481. /**
  482. *@brief Reverses specific dimensions of a tensor . \n
  483. *@par Inputs:
  484. * Two inputs, including:
  485. *@li x: An ND Tensor (up to 8D).
  486. *Must be one of the following types: int8, uint8, int16, uint16, int32, int64, bool, float16, float32, double, complex64, complex128, string.
  487. *@li axis: A 1D Tensor.
  488. *Must be one of the following types: int32, int64
  489. *@par Outputs:
  490. *y: A Tensor. Has the same type and format as "x"
  491. *@attention Constraints:
  492. "axis" must be within the rank of "x" . \n
  493. *@par Third-party framework compatibility
  494. * Compatible with the TensorFlow operator ReverseV2.
  495. */
  496. REG_OP(ReverseV2)
  497. .INPUT(x, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32,
  498. DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  499. DT_COMPLEX64, DT_COMPLEX128, DT_STRING}))
  500. .INPUT(axis, TensorType({DT_INT32,DT_INT64}))
  501. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32,
  502. DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  503. DT_COMPLEX64, DT_COMPLEX128, DT_STRING}))
  504. .OP_END_FACTORY_REG(ReverseV2)
  505. /**
  506. *@brief Reverses specific dimensions of a tensor . \n
  507. *@par Inputs:
  508. * One input:
  509. *@li x: An ND Tensor (up to 8D).
  510. * Must be one of the following types: int8, uint8, int16, uint16, int32,
  511. * int64, bool, float16, float, double, complex64, complex128, string . \n
  512. *@par Attributes:
  513. *axis: The indices of the dimensions to reverse. Support type: listInt . \n
  514. *@par Outputs:
  515. *y: A Tensor. Has the same type and format as "x"
  516. *@attention Constraints:
  517. "axis" must be within the rank of "x" . \n
  518. *@par Third-party framework compatibility
  519. * Compatible with the TensorFlow operator ReverseV2.
  520. *@par Restrictions:
  521. *Warning: THIS FUNCTION IS DEPRECATED. Please use ReverseV2 instead.
  522. */
  523. REG_OP(ReverseV2D)
  524. .INPUT(x, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32,
  525. DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  526. DT_COMPLEX64, DT_COMPLEX128, DT_STRING}))
  527. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32,
  528. DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  529. DT_COMPLEX64, DT_COMPLEX128, DT_STRING}))
  530. .REQUIRED_ATTR(axis, ListInt)
  531. .OP_END_FACTORY_REG(ReverseV2D)
  532. /**
  533. *@brief: Selects elements from "x1" or "x2", depending on "condition" . \n
  534. *@par Inputs:
  535. * Three inputs, including:
  536. * @li condition: A Tensor of type bool.
  537. * @li x1: A Tensor. Must be one of the following types: float16, float32,
  538. * int32, int8, uint8, int16, uint16, double, complex64, int64, complex128
  539. * half, qint8, quint8, qint16, quint16, qint32, quint32, uint32, uint64.
  540. * format:ND
  541. * @li x2: A Tensor of the same type as "x1".format:ND
  542. *@par Outputs:
  543. *y: A Tensor. Has the same type as "x1". format:ND
  544. *@par Third-party framework compatibility
  545. * Compatible with the TensorFlow operator Select.
  546. */
  547. REG_OP(Select)
  548. .INPUT(condition, TensorType({DT_BOOL}))
  549. .INPUT(x1,TensorType::BasicType())
  550. .INPUT(x2,TensorType::BasicType())
  551. .OUTPUT(y,TensorType::BasicType())
  552. .OP_END_FACTORY_REG(Select)
  553. /**
  554. *@brief: SelectV2s elements from "then" or "else", depending on "condition" . \n
  555. *@par Inputs:
  556. * Three inputs, including:
  557. * @li condition: A Tensor of type bool.
  558. * @li then: A Tensor. Must be one of the following types: float16, float32, int32, int8, uint8.
  559. * @li else: A Tensor of the same type as "then" . \n
  560. *@par Outputs:
  561. *result: A Tensor. Has the same type as "then" . \n
  562. *@par Third-party framework compatibility
  563. * Compatible with the TensorFlow operator SelectV2.
  564. */
  565. REG_OP(SelectV2)
  566. .INPUT(condition, TensorType({DT_BOOL}))
  567. .INPUT(then,TensorType::BasicType())
  568. .INPUT(else,TensorType::BasicType())
  569. .OUTPUT(result,TensorType::BasicType())
  570. .OP_END_FACTORY_REG(SelectV2)
  571. /**
  572. *@brief: Computes the maximum along segments of a tensor.
  573. *Computes a tensor such that output[i]=(data[i]) where max is over j such that segment_ids[j] == i.
  574. *If the max is empty for a given segment ID i, output[i] = 0
  575. *@par Inputs:
  576. *Two inputs, include:
  577. * @li x:A Tensor of type float16, float32, int32,int8,uint8.
  578. * @li segment_ids:should be the size of the first dimension
  579. must sorted and need not cover all values in the full range of valid values
  580. must be positive intege
  581. *@par Outputs:
  582. *y:A Tensor with same type as "x" . \n
  583. *@par Third-party framework compatibility
  584. *Compatible with the TensorFlow operator SegmentMax.
  585. */
  586. REG_OP(SegmentMax)
  587. .INPUT(x, TensorType::RealNumberType())
  588. .INPUT(segment_ids, TensorType::IndexNumberType())
  589. .OUTPUT(y, TensorType::RealNumberType())
  590. .OP_END_FACTORY_REG(SegmentMax)
  591. /**
  592. *@brief: Computes the maximum along segments of a tensor.
  593. *Computes a tensor such that output[i]=(data[i]) where max is over j
  594. * such that segment_ids[j] == i.
  595. *If the max is empty for a given segment ID i, output[i] = 0
  596. *@par Inputs:
  597. *One inputs, include:
  598. * @li x:A Tensor of type float16, float, int32. format:ND
  599. *@par Attributes:
  600. * @li segment_ids:should be the size of the first dimension
  601. must sorted and need not cover all values in
  602. the full range of valid values must be positive intege
  603. *@par Outputs:
  604. *y:A Tensor with same type as "x". format:ND
  605. *@par Third-party framework compatibility
  606. *Compatible with the TensorFlow operator SegmentMax.
  607. *@par Restrictions:
  608. *Warning: THIS FUNCTION IS DEPRECATED. Please use SegmentMax instead.
  609. */
  610. REG_OP(SegmentMaxD)
  611. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  612. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  613. .REQUIRED_ATTR(segment_ids, ListInt)
  614. .OP_END_FACTORY_REG(SegmentMaxD)
  615. /**
  616. *@brief Returns a one-hot tensor. The locations represented by index in "x" take value "on_value",
  617. * while all other locations take value "off_value" . \n
  618. *@par Inputs:
  619. *Four inputs, including:
  620. * @li x: A Tensor of indices. Must be one of the following types: int32, uint8, int64.
  621. * @li depth: A scalar of type int32. The depth of the one hot dimension.
  622. * @li on_value: A scalar. The value to fill in output when indices[j] = i,
  623. * Must be one of the following types: float16, float32, int32, int8, uint8.
  624. * @li off_value: A scalar. The value to fill in output when indices[j] != i,
  625. * Has the same type as "on_value" . \n
  626. *@par Attributes:
  627. *axis: An int. The axis to fill. Defaults to "-1" . \n
  628. *@par Outputs:
  629. *y: A Tensor. Has the same type as "on_value" . \n
  630. *@par Third-party framework compatibility:
  631. * Compatible with the TensorFlow operator OneHot.
  632. */
  633. REG_OP(OneHot)
  634. .INPUT(x, TensorType({DT_UINT8, DT_INT32, DT_INT64}))
  635. .INPUT(depth, TensorType({DT_INT32}))
  636. .INPUT(on_value, TensorType::BasicType())
  637. .INPUT(off_value, TensorType::BasicType())
  638. .OUTPUT(y, TensorType::BasicType())
  639. .ATTR(axis, Int, -1)
  640. .OP_END_FACTORY_REG(OneHot)
  641. /**
  642. *@brief Returns a one-hot tensor. The locations represented by index in "x" take value "on_value",
  643. * while all other locations take value "off_value" . \n
  644. *@par Inputs:
  645. *Three inputs, including:
  646. *@li x: A Tensor of indices. Must be one of the following types: int32, uint8, int64.
  647. *@li on_value: A scalar. The value to fill in output when indices[j] = i,
  648. * Must be one of the following types: float16, float32, int32, int8, uint8.
  649. *@li off_value: A scalar. The value to fill in output when indices[j] != i,
  650. * Has the same type as "on_value" . \n
  651. *@par Attributes:
  652. *@li depth: A scalar of type int32. The depth of the one hot dimension.
  653. *@li axis: An int. The axis to fill. Defaults to "-1" . \n
  654. *@par Outputs:
  655. *y: A Tensor. Has the same type as "on_value" . \n
  656. *@par Third-party framework compatibility:
  657. * Compatible with the TensorFlow operator OneHot.
  658. *
  659. * @par Restrictions:
  660. * Warning: THIS FUNCTION IS DEPRECATED. Please use OneHot instead.
  661. */
  662. REG_OP(OneHotD)
  663. .INPUT(x, TensorType({DT_UINT8, DT_INT32}))
  664. .INPUT(on_value, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT8,
  665. DT_INT8}))
  666. .INPUT(off_value, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT8,
  667. DT_INT8}))
  668. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_UINT8, DT_INT8}))
  669. .REQUIRED_ATTR(depth, Int)
  670. .ATTR(axis, Int, -1)
  671. .OP_END_FACTORY_REG(OneHotD)
  672. /**
  673. *@brief Extracts a slice from a tensor.
  674. * This operation extracts a slice of size "size" from a tensor "x"
  675. * starting at the location specified by "begin" . \n
  676. *@par Inputs:
  677. *@li x: A Tensor. Must be one of the following types:
  678. * float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8,
  679. * int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32.
  680. *@li offsets: A Tensor of type int32 or int64. The starting location for the slice.
  681. *@li size: A Tensor of type int32 or int64. The tensor shape . \n
  682. *@par Outputs:
  683. *y: A Tensor. Has the same type as "x". The slice extracted from the tensor . \n
  684. *@par Third-party framework compatibility
  685. *Compatible with the TensorFlow operator Slice.
  686. */
  687. REG_OP(Slice)
  688. .INPUT(x, TensorType::BasicType())
  689. .INPUT(offsets, TensorType::IndexNumberType())
  690. .INPUT(size, TensorType::IndexNumberType())
  691. .OUTPUT(y, TensorType::BasicType())
  692. .OP_END_FACTORY_REG(Slice)
  693. /**
  694. *@brief Extracts a slice from a tensor.
  695. * This operation extracts a slice of size "size" from a tensor "x"
  696. * starting at the location specified by "begin" . \n
  697. *@par Inputs:
  698. *@li x: A Tensor. Must be one of the following types:
  699. * float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8,
  700. * int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32 . \n
  701. *@par Attributes:
  702. *@li offsets: The starting location for the slice.
  703. *@li size: The tensor shape . \n
  704. *@par Outputs:
  705. *y: A Tensor. Has the same type as "x". The slice extracted from the tensor.
  706. *@par Restrictions:
  707. *Warning: THIS FUNCTION IS DEPRECATED. Please use Slice instead.
  708. */
  709. REG_OP(SliceD)
  710. .INPUT(x, TensorType::BasicType())
  711. .OUTPUT(y, TensorType::BasicType())
  712. .REQUIRED_ATTR(offsets, ListInt)
  713. .REQUIRED_ATTR(size, ListInt)
  714. .OP_END_FACTORY_REG(SliceD)
  715. /**
  716. *@brief Extracts a slice from a tensor.
  717. * This operation extracts a slice of size "size" from a tensor "x"
  718. * starting at the location specified by "begin" . \n
  719. *@par Inputs:
  720. *@li x: A Tensor. Must be one of the following types:
  721. * float16, float32, double, int64, int32, uint8, uint16, uint32, uint64, int8,
  722. * int16, complex64, complex128, qint8, quint8, qint16, quint16, qint32 . \n
  723. *@par Inputs:
  724. *@li offsets: The starting location for the slice.
  725. *@par Attributes:
  726. *@li size: The tensor shape . \n
  727. *@par Outputs:
  728. *y: A Tensor. Has the same type as "x". The slice extracted from the tensor.
  729. *@par Restrictions:
  730. *Warning: THIS FUNCTION IS DEPRECATED. Please use Slice instead.
  731. */
  732. REG_OP(SliceDV2)
  733. .INPUT(x, TensorType::BasicType())
  734. .INPUT(offsets, TensorType::IndexNumberType())
  735. .OUTPUT(y, TensorType::BasicType())
  736. .REQUIRED_ATTR(size, ListInt)
  737. .OP_END_FACTORY_REG(SliceDV2)
  738. /**
  739. * @brief Finds values and indices of the "k" largest elements for the last
  740. * dimension . \n
  741. * @par Inputs:
  742. * Two inputs, including:
  743. * @li x: A 1D or higher tensor of type float16, with the last dimension at
  744. * least "k".
  745. * Specifies the data to sort.
  746. * @li assist_seq: A 1D tensor of type float16.
  747. * with size of 2N, which "N" is the last dimension.
  748. * The first N numbers is indices, and the next N numbers is deviation of casting
  749. * int32 to float16. \n
  750. * @par Attributes:
  751. * @li k: A required int that is at least 0, specifying the number of top elements
  752. * to look for along the last dimension (along each row for matrices).
  753. * @li sorted: An optional bool. Defaults to true.
  754. * If true, the resulting "k" elements will be sorted by the values in descending
  755. * order.
  756. * @li dim: An optional int. Defaults to -1. For reserved use.
  757. * @li largest: An optional bool. Defaults to true. For reserved use. \n
  758. * @par Outputs:
  759. * @li values: A Tensor, specifying the sorted data. Has the same type as "input".
  760. * @li indices: A Tensor of type int32, specifying the indices of sorted data . \n
  761. * @attention Constraints:
  762. * @li k =< 5120
  763. * @li Size of the last dimension =< 1458176
  764. * @li sorted = true
  765. * @li It's unstable sorted indices on the platform of Ascend310
  766. * @par Restrictions:
  767. * Warning: THIS FUNCTION IS DEPRECATED. Please use TopKV2 instead.
  768. */
  769. REG_OP(TopKD)
  770. .INPUT(x, TensorType::RealNumberType())
  771. .INPUT(assist_seq, TensorType({DT_FLOAT16}))
  772. .OUTPUT(values, TensorType::RealNumberType())
  773. .OUTPUT(indices, TensorType({DT_INT32}))
  774. .REQUIRED_ATTR(k, Int)
  775. .ATTR(sorted, Bool, true)
  776. .ATTR(dim, Int, -1)
  777. .ATTR(largest, Bool, true)
  778. .OP_END_FACTORY_REG(TopKD)
  779. /**
  780. * @brief Finds values and indices of the "k" largest elements for the last
  781. * dimension . \n
  782. * @par Inputs:
  783. * Two inputs, including:
  784. * @li x: A 1D or higher tensor of type BasicType, with the last dimension
  785. * at least "k".
  786. * @li k: A 0D Tensor of type int32.
  787. * Number of top elements to look for along the last dimension (along each row
  788. * for matrices) . \n
  789. * @par Attributes:
  790. * @li sorted: An optional bool. Defaults to true.
  791. * If true, the resulting "k" elements will be sorted by the values in descending
  792. * order.
  793. * @li dim: An optional int. Defaults to -1. For reserved use.
  794. * @li largest: An optional bool. Defaults to true. For reserved use. \n
  795. * @par Outputs:
  796. * @li values: A Tensor, specifying the sorted data. Has the same type as
  797. * "input".
  798. * @li indices: A Tensor of type int32, specifying the indices of sorted data . \n
  799. * @see TopK()
  800. * @par Third-party framework compatibility
  801. * @li Compatible with the TensorFlow operator TopKV2.
  802. */
  803. REG_OP(TopKV2)
  804. .INPUT(x, TensorType::RealNumberType())
  805. .INPUT(k, TensorType({DT_INT32}))
  806. .OUTPUT(values, TensorType::RealNumberType())
  807. .OUTPUT(indices, TensorType({DT_INT32}))
  808. .ATTR(sorted, Bool, true)
  809. .ATTR(dim, Int, -1)
  810. .ATTR(largest, Bool, true)
  811. .OP_END_FACTORY_REG(TopKV2)
  812. /**
  813. * @brief Finds values and indices of the "k" largest elements for the last
  814. * dimension . \n
  815. * @par Inputs:
  816. * Two inputs, including:
  817. * @li x: A 1D or higher tensor of type BasicType, with the last dimension
  818. * at least "k".
  819. * @li k: A 0D Tensor of type int32.
  820. * Number of top elements to look for along the last dimension (along each row
  821. * for matrices) . \n
  822. * @par Attributes:
  823. * @li sorted: Defaults to true.
  824. * If true, the resulting "k" elements will be sorted by the values in descending
  825. * order.
  826. * @li largest:If true the resulting `k` elements will be sorted by the values in descending order.
  827. * @li dim:0-D. Number of top elements to look for along the last dimension (along each row for matrices). \n
  828. * @par Outputs:
  829. * @li values: A Tensor, specifying the sorted data. Has the same type as
  830. * "input".
  831. * @li indices: A Tensor of type int32, specifying the indices of sorted data . \n
  832. * @see TopK()
  833. * @par Third-party framework compatibility
  834. * Compatible with the TensorFlow operator TopKV2.
  835. */
  836. REG_OP(TopK)
  837. .INPUT(x, TensorType::RealNumberType())
  838. .INPUT(k, TensorType({DT_INT32}))
  839. .OUTPUT(values, TensorType::RealNumberType())
  840. .OUTPUT(indices, TensorType({DT_INT32}))
  841. .ATTR(sorted, Bool, true)
  842. .ATTR(largest, Bool, true)
  843. .ATTR(dim, Int, -1)
  844. .OP_END_FACTORY_REG(TopK)
  845. /**
  846. *@brief Creates a new tensor by applying sparse "updates" to individual values or slices within a tensor (initially zero for numeric, empty for string) of the given "shape" according to "indices" . \n
  847. *@par Inputs:
  848. *Inputs including:
  849. * @li indices: A required index tensor. Must be one of the following types: int32 or int64.
  850. * @li x: A required slice tensor. Must be one of the following types: float32, float16, int32, int8, uint8...
  851. * @li shape: A required list of int32 or int64, specifying the output shape.
  852. *@par Outputs:
  853. *y:A output Tensor with same datatype as "updates" . \n
  854. *@attention Constraints:
  855. *@li "y" has the same shape as "shape".
  856. *@li "y" has the same type as "x".
  857. *@par Third-party framework compatibility
  858. * Compatible with the TensorFlow operator ScatterNd.
  859. */
  860. REG_OP(ScatterNd)
  861. .INPUT(indices, TensorType::IndexNumberType())
  862. .INPUT(x, TensorType::BasicType())
  863. .INPUT(shape, TensorType::IndexNumberType())
  864. .OUTPUT(y, TensorType::BasicType())
  865. .OP_END_FACTORY_REG(ScatterNd)
  866. /**
  867. *@brief Creates a new tensor by applying sparse "updates" to individual values
  868. * or slices within a tensor (initially zero for numeric, empty for string) of
  869. * the given "shape" according to "indices" . \n
  870. *@par Inputs:
  871. *Inputs including:
  872. * @li indices: A required index tensor. Must be one of the following types:
  873. * int32 or int64. format:ND.
  874. * @li x: A required slice tensor. Must be one of the following types:
  875. * float16, float, int32, int8, uint8. format:ND.
  876. *@par Attributes:
  877. * @li shape: A required list of int32 or int64, specifying the output shape.
  878. *@par Outputs:
  879. *y: A Tensor. Has the same type as "x". format:ND . \n
  880. *@attention Constraints:
  881. *@li "y" has the same shape as "shape".
  882. *@li "y" has the same type as "x".
  883. *@par Third-party framework compatibility
  884. * Compatible with the TensorFlow operator ScatterNd.
  885. * @par Restrictions:
  886. * Warning: THIS FUNCTION IS DEPRECATED. Please use ScatterNd instead.
  887. */
  888. REG_OP(ScatterNdD)
  889. .INPUT(indices, TensorType::IndexNumberType())
  890. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8}))
  891. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT8, DT_UINT8}))
  892. .REQUIRED_ATTR(shape, ListInt)
  893. .OP_END_FACTORY_REG(ScatterNdD)
  894. /**
  895. * @brief Says whether the targets are in the top "k" predictions . \n
  896. * @par Inputs:
  897. * Three inputs, including:
  898. * @li x1: A 2D Tensor of type float32. A "batch_size * classes" tensor.
  899. * @li x2: A 1D Tensor of type int32. A batch_size tensor of class ids . \n
  900. * @par Attributes:
  901. * @li k: A required IndexNumberType, specifying the number of top elements to
  902. * look at for computing precision . \n
  903. * @par Outputs:
  904. * y: A Tensor of type bool . \n
  905. * @attention Constraints:
  906. * @li x2 must be non-negative tensor.
  907. * @see InTopK()
  908. * @par Third-party framework compatibility
  909. * Compatible with the TensorFlow operator InTopK.
  910. *
  911. *@par Restrictions:
  912. *Warning: THIS FUNCTION IS DEPRECATED. Please use InTopK instead.
  913. */
  914. REG_OP(InTopKD)
  915. .INPUT(x1, TensorType({DT_FLOAT}))
  916. .INPUT(x2, TensorType({IndexNumberType}))
  917. .OUTPUT(y, TensorType({DT_BOOL}))
  918. .REQUIRED_ATTR(k, Int)
  919. .OP_END_FACTORY_REG(InTopKD)
  920. /**
  921. * @brief Says whether the targets are in the top "k" predictions . \n
  922. * @par Inputs:
  923. * @li x1: A 2D Tensor of type float32. A "batch_size * classes" tensor.
  924. * @li x2: A 1D Tensor of type IndexNumberType. A batch_size tensor of class ids.
  925. * @li k: A 1D Tensor of the same type as "x2".
  926. * Specifies the number of top elements to look at for computing precision . \n
  927. * @par Outputs:
  928. * y: A Tensor of type bool . \n
  929. * @attention Constraints:
  930. * @li x2 must be non-negative tensor.
  931. * @par Third-party framework compatibility
  932. * @li Compatible with the TensorFlow operator InTopKV2.
  933. */
  934. REG_OP(InTopK)
  935. .INPUT(x1, TensorType({DT_FLOAT}))
  936. .INPUT(x2, TensorType(IndexNumberType))
  937. .INPUT(k, TensorType({IndexNumberType}))
  938. .OUTPUT(y, TensorType({DT_BOOL}))
  939. .OP_END_FACTORY_REG(InTopK)
  940. /**
  941. * @brief Assigns "value" to the sliced l-value reference of "var".
  942. * The values of "value" are assigned to the positions in the variable. "var"
  943. * that are selected by the slice parameters. The slice parameters "begin, "end",
  944. * "strides", etc. work exactly as in "StridedSlice" . \n
  945. * @par Inputs:
  946. * Five inputs, including:
  947. * @li var: A mutable ND Tensor of type BasicType.
  948. * @li begin: A mutable ND Tensor of type IndexNumberType.
  949. * Specifies the index of the first value to select.
  950. * @li end: A mutable ND Tensor of type IndexNumberType.
  951. * Specifies the index of the last value to select.
  952. * @li strides: A mutable ND Tensor of type IndexNumberType.
  953. * Specifies the stride to select.
  954. * @li input_value: A mutable ND Tensor of type BasicType . \n
  955. * @par Attributes:
  956. * @li begin_mask: An optional int. Defaults to "0".
  957. * @li end_mask: An optional int. Defaults to "0".
  958. * @li ellipsis_mask: An optional int. Defaults to "0".
  959. * @li new_axis_mask: An optional int. Defaults to "0".
  960. * @li shrink_axis_mask: An optional int. Defaults to "0" . \n
  961. * @par Outputs:
  962. * var: A mutable Tensor. Has the same type as "var" . \n
  963. * @attention Constraints:
  964. * This operator currently does not support broadcasting. Therefore, the shape
  965. * of "value" must be exactly the shape produced by the slice of "var" . \n
  966. * @see StridedSlice()
  967. * @par Third-party framework compatibility
  968. * @li Compatible with the TensorFlow operator StridedSlice.
  969. */
  970. REG_OP(StridedSliceAssign)
  971. .INPUT(var, TensorType(BasicType))
  972. .INPUT(begin, TensorType(IndexNumberType))
  973. .INPUT(end, TensorType(IndexNumberType))
  974. .INPUT(strides, TensorType(IndexNumberType))
  975. .INPUT(input_value, TensorType(BasicType))
  976. .OUTPUT(var, TensorType(BasicType))
  977. .ATTR(begin_mask, Int, 0)
  978. .ATTR(end_mask, Int, 0)
  979. .ATTR(ellipsis_mask, Int, 0)
  980. .ATTR(new_axis_mask, Int, 0)
  981. .ATTR(shrink_axis_mask, Int, 0)
  982. .OP_END_FACTORY_REG(StridedSliceAssign)
  983. /**
  984. * @brief Assigns "value" to the sliced l-value reference of "var".
  985. * The values of "value" are assigned to the positions in the variable. "var"
  986. * that are selected by the slice parameters. The slice parameters "begin, "end",
  987. * "strides", etc. work exactly as in "StridedSlice" . \n
  988. * @par Inputs:
  989. * Two inputs, including:
  990. * @li var: A mutable ND Tensor of the following types:int32, int16, float16, float32.
  991. * @li input_value: A mutable ND "Tensor" of the following types:int32, int16, float16, float32 . \n
  992. * @par Attributes:
  993. * @li begin: A required list of ints.
  994. * Specifies the index of the first value to select.
  995. * @li end: A required list of ints.
  996. * Specifies the index of the last value to select.
  997. * @li strides: A required list of ints. Specifies the stride to select.
  998. * @li begin_mask: An optional int. Defaults to "0".
  999. * @li end_mask: An optional int. Defaults to "0".
  1000. * @li ellipsis_mask: An optional int. Defaults to "0".
  1001. * @li new_axis_mask: An optional int. Defaults to "0".
  1002. * @li shrink_axis_mask: An optional int. Defaults to "0" . \n
  1003. * @par Outputs:
  1004. * var: A mutable Tensor. Has the same type as input "var" . \n
  1005. * @attention Constraints:
  1006. * This operator currently does not support broadcasting. Therefore, the shape of
  1007. * "value" shape must be exactly the shape produced by the slice of "var" . \n
  1008. * @see StridedSlice()
  1009. *
  1010. * @par Restrictions:
  1011. * Warning: THIS FUNCTION IS DEPRECATED. Please use StridedSliceAssign instead.
  1012. */
  1013. REG_OP(StridedSliceAssignD)
  1014. .INPUT(var, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT16}))
  1015. .INPUT(input_value, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT16}))
  1016. .OUTPUT(var, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32, DT_INT16}))
  1017. .REQUIRED_ATTR(begin, ListInt)
  1018. .REQUIRED_ATTR(end, ListInt)
  1019. .REQUIRED_ATTR(strides, ListInt)
  1020. .ATTR(begin_mask, Int, 0)
  1021. .ATTR(end_mask, Int, 0)
  1022. .ATTR(ellipsis_mask, Int, 0)
  1023. .ATTR(new_axis_mask, Int, 0)
  1024. .ATTR(shrink_axis_mask, Int, 0)
  1025. .OP_END_FACTORY_REG(StridedSliceAssignD)
  1026. /**
  1027. *@brief Gather slices from "params" according to "indices"."indices" must be
  1028. an integer tensor of any dimension(usually 0-D or 1-D).
  1029. Produces an output tensor with shape "indices.shape + params.shape[1:]" . \n
  1030. *@par Inputs:
  1031. *Two inputs, including:
  1032. * @li x: A Tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8,
  1033. * int64, qint8, quint8, qint32, qint16, quint16, uint16,
  1034. * float16, uint32, uint64, complex64, complex128.
  1035. * @li indices: A Tensor of type int32 or int64 .
  1036. *@par Attributes:
  1037. * @li validate_indices: A bool specifying whether to verify the argument of "indice" .
  1038. * @li batch_dims: An optional int. Defaults to 0.
  1039. *@par Outputs:
  1040. *y: A Tensor. Has the same type as "x" .
  1041. *@attention Constraints:
  1042. * "indices" is in the range [0, x.shape[0]) .
  1043. *@par Third-party framework compatibility
  1044. * Compatible with the TensorFlow operator Gather .
  1045. */
  1046. REG_OP(Gather)
  1047. .INPUT(x, TensorType::BasicType())
  1048. .INPUT(indices, TensorType::IndexNumberType())
  1049. .OUTPUT(y, TensorType::BasicType())
  1050. .ATTR(validate_indices, Bool, true)
  1051. .ATTR(batch_dims, Int, 0)
  1052. .OP_END_FACTORY_REG(Gather)
  1053. /**
  1054. *@brief Computes the cumulative product of the tensor "x" along "axis" . \n
  1055. *@par Inputs:
  1056. * Two inputs, including:
  1057. *@li x: A Tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8,
  1058. * complex64, int64, qint8, quint8, qint32, uint16, complex128, float16, uint32, uint64
  1059. *@li axis A Tensor of type int32 or int64. Range is [-rank(x),rank(x)). Defaults to "0".
  1060. *
  1061. *@par Attributes:
  1062. *@li exclusive: If "False", performs inclusive cumprod, which means that the first element of the input
  1063. * is identical to the first element of the output. If "True", performs exclusive cumprod.
  1064. *@li reverse: A bool. Defaults to "False".
  1065. *
  1066. *@par Outputs:
  1067. *y: A Tensor. Has the same type as "x".
  1068. *@par Third-party framework compatibility
  1069. * Compatible with the TensorFlow operator Cumprod.
  1070. */
  1071. REG_OP(Cumprod)
  1072. .INPUT(x, TensorType::NumberType())
  1073. .INPUT(axis, TensorType::IndexNumberType())
  1074. .OUTPUT(y, TensorType::NumberType())
  1075. .ATTR(exclusive, Bool, false)
  1076. .ATTR(reverse, Bool, false)
  1077. .OP_END_FACTORY_REG(Cumprod)
  1078. /**
  1079. *@brief Computes the cumulative product of the tensor "x" along "axis" . \n
  1080. *@par Inputs:
  1081. * One input:
  1082. *x: A Tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8,
  1083. * complex64, int64, qint8, quint8, qint32, uint16, complex128, float16, uint32, uint64
  1084. *
  1085. *@par Attributes:
  1086. *@li axis A Tensor of type int32 or int64. Range is [-rank(x),rank(x)). Defaults to "0".
  1087. *@li exclusive: If "False", performs inclusive cumprod, which means that the first element of the input
  1088. * is identical to the first element of the output. If "True", performs exclusive cumprod.
  1089. *@li reverse: A bool. Defaults to "False".
  1090. *
  1091. *@par Outputs:
  1092. *y: A Tensor. Has the same type as "x".
  1093. *@par Third-party framework compatibility
  1094. * Compatible with the TensorFlow operator Cumprod.
  1095. * @par Restrictions:
  1096. * Warning: THIS FUNCTION IS DEPRECATED. Please use Cumprod instead.
  1097. */
  1098. REG_OP(CumprodD)
  1099. .INPUT(x, TensorType::NumberType())
  1100. .OUTPUT(y, TensorType::NumberType())
  1101. .REQUIRED_ATTR(axis, Int)
  1102. .ATTR(exclusive, Bool, false)
  1103. .ATTR(reverse, Bool, false)
  1104. .OP_END_FACTORY_REG(CumprodD)
  1105. /**
  1106. *@brief Computes the cumulative sum of the tensor "x" along "axis" . \n
  1107. *@par Inputs:
  1108. * Two inputs, including:
  1109. *@li x: A Tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8,
  1110. * complex64, int64, qint8, quint8, qint32, uint16, complex128, float16, uint32, uint64.
  1111. *@li axis A Tensor of type int32 or int64. Range is [-rank(x),rank(x)). Defaults to "0".
  1112. *
  1113. *@par Attributes:
  1114. *@li exclusive: If "False", performs inclusive cumsum, which means that the first element of the input is
  1115. * identical to the first element of the output. If "True", performs exclusive cumsum.
  1116. *@li reverse: A bool. Defaults to "False".
  1117. *
  1118. *@par Outputs:
  1119. *@li y: A Tensor. Has the same type as "x".
  1120. *@par Third-party framework compatibility
  1121. * Compatible with the TensorFlow operator Cumsum.
  1122. */
  1123. REG_OP(Cumsum)
  1124. .INPUT(x, TensorType::NumberType())
  1125. .INPUT(axis, TensorType::IndexNumberType())
  1126. .OUTPUT(y, TensorType::NumberType())
  1127. .ATTR(exclusive, Bool, false)
  1128. .ATTR(reverse, Bool, false)
  1129. .OP_END_FACTORY_REG(Cumsum)
  1130. /**
  1131. *@brief Computes the cumulative sum of the tensor "x" along "axis".
  1132. *
  1133. *@par Inputs:
  1134. * One input:
  1135. *x: A Tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8,
  1136. * complex64, int64, qint8, quint8, qint32, uint16, complex128, float16, uint32, uint64.
  1137. *
  1138. *@par Attributes:
  1139. *@li axis A Tensor of type int32 or int64. Range is [-rank(x),rank(x)). Defaults to "0".
  1140. *@li exclusive: If "False", performs inclusive cumsum, which means that the first element of the input is
  1141. * identical to the first element of the output. If "True", performs exclusive cumsum.
  1142. *@li reverse: A bool. Defaults to "False".
  1143. *
  1144. *@par Outputs:
  1145. *y: A Tensor. Has the same type as "x".
  1146. *@par Third-party framework compatibility
  1147. * Compatible with the TensorFlow operator Cumsum.
  1148. * @par Restrictions:
  1149. * Warning: THIS FUNCTION IS DEPRECATED. Please use Cumsum instead.
  1150. */
  1151. REG_OP(CumsumD)
  1152. .INPUT(x, TensorType::NumberType())
  1153. .OUTPUT(y, TensorType::NumberType())
  1154. .REQUIRED_ATTR(axis, Int)
  1155. .ATTR(exclusive, Bool, false)
  1156. .ATTR(reverse, Bool, false)
  1157. .OP_END_FACTORY_REG(CumsumD)
  1158. /**
  1159. *@brief Updates specified rows with values in v.
  1160. *Computes x[i, :] = v; return x.
  1161. *@par Inputs:
  1162. *Three inputs, including:
  1163. * @li x: A Tensor.
  1164. * TensorType::NumberType().
  1165. * @li indices: A vector of type int32.
  1166. * Indices into the left-most dimension of "x".
  1167. * @li v: A Tensor of the same type as "x".
  1168. * Same dimension sizes as x except the first dimension,
  1169. * which must be the same as the size of "indices" . \n
  1170. *@par Outputs:
  1171. *y: A Tensor of the same type as "x".
  1172. * An alias of "x". The content of "y" is undefined if there are duplicates in indices.
  1173. *@par Third-party framework compatibility
  1174. *Compatible with the TensorFlow operator InplaceUpdate.
  1175. */
  1176. REG_OP(InplaceUpdate)
  1177. .INPUT(x, TensorType::BasicType())
  1178. .INPUT(indices, TensorType({DT_INT32}))
  1179. .INPUT(v, TensorType::BasicType())
  1180. .OUTPUT(y, TensorType::BasicType())
  1181. .OP_END_FACTORY_REG(InplaceUpdate)
  1182. /**
  1183. *@brief Updates specified rows with values in v.
  1184. *Computes x[i, :] = v; return x.
  1185. *@par Inputs:
  1186. *Two inputs, including:
  1187. * @li x: A Tensor of type int32, float16, floay32.
  1188. * @li v: A Tensor of the same type as "x".
  1189. * Same dimension sizes as "x" except the first dimension, which must be the same as the size of "indices" . \n
  1190. *@par Attributes:
  1191. *indices: A required list of ints. Indices into the left-most dimension of "x" . \n
  1192. *@par Outputs:
  1193. *y: A Tensor of the same type as "x".
  1194. * An alias of "x". The content of "y" is undefined if there are duplicates in indices . \n
  1195. *@par Third-party framework compatibility
  1196. *Compatible with the TensorFlow operator InplaceUpdate.
  1197. *
  1198. * @par Restrictions:
  1199. * Warning: THIS FUNCTION IS DEPRECATED. Please use InplaceUpdate instead.
  1200. */
  1201. REG_OP(InplaceUpdateD)
  1202. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
  1203. .INPUT(v, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
  1204. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
  1205. .REQUIRED_ATTR(indices, ListInt)
  1206. .OP_END_FACTORY_REG(InplaceUpdateD)
  1207. /**
  1208. *@brief Adds "v" into specified rows of "x".
  1209. *Computes y = x; y[i, :] += v.
  1210. *@par Inputs:
  1211. *Three inputs, including:
  1212. * @li x: A Tensor.
  1213. * TensorType::NumberType().
  1214. * @li indices: A vector of type int32.
  1215. * Indices into the left-most dimension of "x".
  1216. * @li v: A Tensor of the same type as "x".
  1217. * Same dimension sizes as x except the first dimension,
  1218. * which must be the same as the size of "indices" . \n
  1219. *@par Outputs:
  1220. *y: A Tensor of the same type as "x".
  1221. * An alias of "x". The content of "y" is undefined if there are duplicates in indices.
  1222. *@par Third-party framework compatibility
  1223. *Compatible with the TensorFlow operator InplaceAdd.
  1224. */
  1225. REG_OP(InplaceAdd)
  1226. .INPUT(x, TensorType::BasicType())
  1227. .INPUT(indices, TensorType({DT_INT32}))
  1228. .INPUT(v, TensorType::BasicType())
  1229. .OUTPUT(y, TensorType::BasicType())
  1230. .OP_END_FACTORY_REG(InplaceAdd)
  1231. /**
  1232. *@brief Adds "v" into specified rows of "x".
  1233. *Computes y = x; y[i, :] += v.
  1234. *@par Inputs:
  1235. *Two inputs, including:
  1236. * @li x: A Tensor of type is int32, float16, float32.
  1237. * @li v: A Tensor of the same type as "x".
  1238. * Same dimension sizes as "x" except the first dimension, which must be the same as the size of "indices" . \n
  1239. *@par Attributes:
  1240. *indices: A required list of ints. Indices into the left-most dimension of "x" . \n
  1241. *@par Outputs:
  1242. *y: A Tensor of the same type as "x".
  1243. * An alias of "x". The content of "y" is undefined if there are duplicates in indices . \n
  1244. *@par Third-party framework compatibility
  1245. *Compatible with the TensorFlow operator InplaceAdd.
  1246. *
  1247. * @par Restrictions:
  1248. * Warning: THIS FUNCTION IS DEPRECATED. Please use InplaceAdd instead.
  1249. */
  1250. REG_OP(InplaceAddD)
  1251. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
  1252. .INPUT(v, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
  1253. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
  1254. .REQUIRED_ATTR(indices, ListInt)
  1255. .OP_END_FACTORY_REG(InplaceAddD)
  1256. /**
  1257. *@brief Subtracts "v" into specified rows of "x".
  1258. *Computes y = x; y[i, :] -= v; return y.
  1259. *@par Inputs:
  1260. **Three inputs, including:
  1261. * @li x: A Tensor. TensorType::NumberType().
  1262. * @li indices: A vector of type int32. Indices into the left-most dimension of x.
  1263. * @li v: A Tensor of the same type as "x".
  1264. * Same dimension sizes as "x" except the first dimension, which must be the same as the size of "indices" . \n
  1265. *@par Outputs:
  1266. *y: A Tensor. Has the same type as "x".
  1267. * An alias of "x". The content of "y" is undefined if there are duplicates in indices . \n
  1268. *@par Third-party framework compatibility
  1269. *Compatible with the TensorFlow operator InplaceSub.
  1270. */
  1271. REG_OP(InplaceSub)
  1272. .INPUT(x, TensorType::BasicType())
  1273. .INPUT(indices, TensorType({DT_INT32}))
  1274. .INPUT(v, TensorType::BasicType())
  1275. .OUTPUT(y, TensorType::BasicType())
  1276. .OP_END_FACTORY_REG(InplaceSub)
  1277. /**
  1278. *@brief Subtracts "v" into specified rows of "x".
  1279. *Computes y = x; y[i, :] -= v . \n
  1280. *@par Inputs:
  1281. **Two inputs, including:
  1282. * @li x: A Tensor of type is int32, float16, float32.
  1283. * @li v: A Tensor of the same type as "x".
  1284. * Same dimension sizes as "x" except the first dimension, which must be the same as the size of "indices" . \n
  1285. *@par Attributes:
  1286. *indices: A required list of ints. Indices into the left-most dimension of "x" . \n
  1287. *@par Outputs:
  1288. *y: A Tensor. Has the same type as "x".
  1289. * An alias of x. The content of y is undefined if there are duplicates in indices . \n
  1290. *@par Third-party framework compatibility
  1291. *Compatible with the TensorFlow operator InplaceSub.
  1292. *
  1293. * @par Restrictions:
  1294. * Warning: THIS FUNCTION IS DEPRECATED. Please use InplaceSub instead.
  1295. */
  1296. REG_OP(InplaceSubD)
  1297. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
  1298. .INPUT(v, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
  1299. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
  1300. .REQUIRED_ATTR(indices, ListInt)
  1301. .OP_END_FACTORY_REG(InplaceSubD)
  1302. /**
  1303. * @brief Applies sparse addition to input "x" using individual values or slices
  1304. * from "updates" according to "indices". The updates are non-aliasing: "x" is
  1305. * only modified in-place if no other operations will use it. Otherwise, a copy
  1306. * of "x" is made. This operation has a gradient with respect to both "x" and
  1307. * "updates" . \n
  1308. * @par Inputs:
  1309. * Three inputs, including:
  1310. * @li x: A Tensor of type NumberType. A batch_size x classes tensor.
  1311. * @li indices: A Tensor of type IndexNumberType. Specifies the indices into "x".
  1312. * @li updates: A Tensor. Must have the same type as "x".
  1313. * Specifies the updated values to add to "x" . \n
  1314. * @par Outputs:
  1315. * y: A Tensor with the same shape as "x", containing values of "x" updated with
  1316. * "updates" . \n
  1317. * @see ScatterNd(),ScatterNdAdd()
  1318. * @par Third-party framework compatibility
  1319. * @li Compatible with the TensorFlow operator ScatterNDNonAliasingAdd.
  1320. */
  1321. REG_OP(ScatterNonAliasingAdd)
  1322. .INPUT(x, TensorType::NumberType())
  1323. .INPUT(indices, TensorType::IndexNumberType())
  1324. .INPUT(updates, TensorType::NumberType())
  1325. .OUTPUT(y, TensorType::NumberType())
  1326. .OP_END_FACTORY_REG(ScatterNonAliasingAdd)
  1327. /**
  1328. * @brief Computes the minimum along segments of a tensor . \n
  1329. * @par Inputs:
  1330. * Three inputs, including:
  1331. * @li x: A Tensor of type RealNumberType.
  1332. * @li segment_ids: A 1D Tensor of type IndexNumberType, whose shape is a prefix
  1333. * of "x.shape".
  1334. * @li num_segments: A Tensor of type IndexNumberType . \n
  1335. * @par Outputs:
  1336. * y: A Tensor of type RealNumberType . \n
  1337. * @attention Constraints:
  1338. * @li segment_ids must be non-negative tensor.
  1339. * @see UnsortedSegmentSum(), UnsortedSegmentProd(),
  1340. * @par Third-party framework compatibility
  1341. * @li Compatible with the TensorFlow operator UnsortedSegmentMin.
  1342. */
  1343. REG_OP(UnsortedSegmentMin)
  1344. .INPUT(x, TensorType::RealNumberType())
  1345. .INPUT(segment_ids, TensorType::IndexNumberType())
  1346. .INPUT(num_segments, TensorType::IndexNumberType())
  1347. .OUTPUT(y, TensorType::RealNumberType())
  1348. .OP_END_FACTORY_REG(UnsortedSegmentMin)
  1349. /**
  1350. * @brief Computes the minimum along segments of a tensor . \n
  1351. * @par Inputs:
  1352. * Two inputs, including:
  1353. * @li x: A Tensor of the following types:int32, int16, float16, float32.
  1354. * @li segment_ids: A 1D Tensor of type int32, whose shape is a prefix
  1355. * of "x.shape" . \n
  1356. * @par Attributes:
  1357. * num_segments: A required int32, specifying the number of distinct segment IDs . \n
  1358. * @par Outputs:
  1359. * y: A Tensor.Must have the same type as input "x" . \n
  1360. * @attention Constraints:
  1361. * @li segment_ids must be non-negative tensor.
  1362. * @see UnsortedSegmentProdD(), UnsortedSegmentSumD(),
  1363. *
  1364. * @par Restrictions:
  1365. * Warning: THIS FUNCTION IS DEPRECATED. Please use UnsortedSegmentMin instead.
  1366. */
  1367. REG_OP(UnsortedSegmentMinD)
  1368. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT16}))
  1369. .INPUT(segment_ids, TensorType({DT_INT32}))
  1370. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT16}))
  1371. .REQUIRED_ATTR(num_segments, Int)
  1372. .OP_END_FACTORY_REG(UnsortedSegmentMinD)
  1373. /**
  1374. * @brief Computes the maximum along segments of a tensor . \n
  1375. * @par Inputs:
  1376. * Three inputs, including:
  1377. * @li x: A Tensor of type RealNumberType.
  1378. * @li segment_ids: A 1D Tensor of type IndexNumberType, whose shape is a prefix
  1379. * of "x.shape".
  1380. * @li num_segments: A Tensor of type IndexNumberType . \n
  1381. * @par Outputs:
  1382. * y: A Tensor of type RealNumberType . \n
  1383. * @attention Constraints:
  1384. * segment_ids must be non-negative tensor.
  1385. * @see UnsortedSegmentSum(), UnsortedSegmentProd(),
  1386. * @par Third-party framework compatibility
  1387. * Compatible with the TensorFlow operator UnsortedSegmentMax.
  1388. */
  1389. REG_OP(UnsortedSegmentMax)
  1390. .INPUT(x, TensorType::RealNumberType())
  1391. .INPUT(segment_ids, TensorType::IndexNumberType())
  1392. .INPUT(num_segments, TensorType::IndexNumberType())
  1393. .OUTPUT(y, TensorType::RealNumberType())
  1394. .OP_END_FACTORY_REG(UnsortedSegmentMax)
  1395. /**
  1396. * @brief Computes the maximum along segments of a tensor . \n
  1397. * @par Inputs:
  1398. * Two inputs, including:
  1399. * @li x: A Tensor of the following types:int32, int16, float16, float32.
  1400. * @li segment_ids: A 1D Tensor of type int32, whose shape is a prefix
  1401. * of "x.shape" . \n
  1402. * @par Attributes:
  1403. * num_segments: A required int32, specifying the number of distinct segment IDs . \n
  1404. * @par Outputs:
  1405. * y: A Tensor.Must have the same type as input "x" . \n
  1406. * @attention Constraints:
  1407. * @li segment_ids must be non-negative tensor.
  1408. * @see UnsortedSegmentProdD(),
  1409. *
  1410. * @par Restrictions:
  1411. * Warning: THIS FUNCTION IS DEPRECATED. Please use UnsortedSegmentMax instead.
  1412. */
  1413. REG_OP(UnsortedSegmentMaxD)
  1414. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT16}))
  1415. .INPUT(segment_ids, TensorType({DT_INT32}))
  1416. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT16}))
  1417. .REQUIRED_ATTR(num_segments, Int)
  1418. .OP_END_FACTORY_REG(UnsortedSegmentMaxD)
  1419. /**
  1420. * @brief Computes the product along segments of a tensor . \n
  1421. * @par Inputs:
  1422. * Three inputs, including:
  1423. * @li x: A Tensor of type NumberType.
  1424. * @li segment_ids: A 1D Tensor of type IndexNumberType, whose shape is a prefix
  1425. * of "x.shape".
  1426. * @li num_segments: A Tensor of type IndexNumberType . \n
  1427. * @par Outputs:
  1428. * y: A Tensor of type NumberType . \n
  1429. * @attention Constraints:
  1430. * @li segment_ids must be non-negative tensor.
  1431. * @see UnsortedSegmentSum(), UnsortedSegmentMin(),
  1432. * @par Third-party framework compatibility
  1433. * @li Compatible with the TensorFlow operator UnsortedSegmentProd.
  1434. */
  1435. REG_OP(UnsortedSegmentProd)
  1436. .INPUT(x, TensorType::NumberType())
  1437. .INPUT(segment_ids, TensorType::IndexNumberType())
  1438. .INPUT(num_segments, TensorType::IndexNumberType())
  1439. .OUTPUT(y, TensorType::NumberType())
  1440. .OP_END_FACTORY_REG(UnsortedSegmentProd)
  1441. /**
  1442. * @brief Computes the product along segments of a tensor . \n
  1443. * @par Inputs:
  1444. * Two inputs, including:
  1445. * @li x: A Tensor of the following types:int32, int16, float16, float32.
  1446. * @li segment_ids: A 1D Tensor of type int32, whose shape is a prefix
  1447. * of "x.shape" . \n
  1448. * @par Attributes:
  1449. * num_segments: An int32, specifying the number of distinct segment IDs . \n
  1450. * @par Outputs:
  1451. * y: A Tensor.Must have the same type as input "x" . \n
  1452. * @attention Constraints:
  1453. * @li segment_ids must be non-negative tensor.
  1454. * @see UnsortedSegmentMinD()
  1455. *
  1456. * @par Restrictions:
  1457. * Warning: THIS FUNCTION IS DEPRECATED. Please use UnsortedSegmentProd instead.
  1458. */
  1459. REG_OP(UnsortedSegmentProdD)
  1460. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT16}))
  1461. .INPUT(segment_ids, TensorType({DT_INT32}))
  1462. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT16}))
  1463. .REQUIRED_ATTR(num_segments, Int)
  1464. .OP_END_FACTORY_REG(UnsortedSegmentProdD)
  1465. /**
  1466. *@brief Performs object detection . \n
  1467. *@par Inputs:
  1468. *@li cls_prob: An NCHW tensor of type float16 or float32, specifying the probability of the proposal is the background class.
  1469. *@li bbox_delta: An NCHW tensor of type float16 or float32, specifying the coordinates of the proposals bounding boxes.
  1470. *@li im_info: An ND tensor of type float16 or float32, specifying the Image information . \n
  1471. *@par Attributes:
  1472. *@li feat_stride: A optional float32, specifying the stride of the sliding window. Must be greater than "0".Defaults to "16".
  1473. *@li base_size: A optional float32, specifying the size of the generated base box. Must be greater than "0". Defaults to "16".
  1474. *@li min_size: A optional float32, specifying the minimum edge length of a proposal. A box with any edge less than this value is removed. Must be greater than "0". Defaults to "16".
  1475. *@li ratio: A optional list of floats, specifying the aspect ratio of the generated base box. Defaults to [0.5, 1, 2].
  1476. *@li scale: A optional list of floats, specifying the ratio of the size of the generated base box to "base_size". Defaults to [8, 16, 32].
  1477. *@li pre_nms_topn: A required int, specifying top K boxes before NMS. For float16 input, pre_nms_topn <= 6000. For float32 input, pre_nms_topn <= 3000. Defaults to "3000".
  1478. *@li post_nms_topn: A required int, specifying the number of boxes to be output after NMS. The value is a multiple of 16. For float16 input, post_nms_topn <= 6000. For float32 input, post_nms_topn <= 3000 (the maximum multiple of 16 is 2992 within the range). Defaults to "304".
  1479. *@li iou_threshold: A required float32, specifying the NMS threshold. The value range is (0,1]. Defaults to "0.7".
  1480. *@li output_actual_rois_num: An optional bool. Defaults to "false" . \n
  1481. *@par Outputs:
  1482. *@li rois: A Tensor with shape [batch, 5, post_nms_topn], of type float16 or float32, specifying the output box information. "post_nms_topn" must be a multiple of 16. The dimension "5" indicates (batchID, x1, y1, x2, y2). The number of BBoxes output per batch is determined by "actual_rois_num".
  1483. *@li actual_rois_num: A Tensor with shape [batch, 8], of type int32, specifying the number of BBoxes output per batch.
  1484. *@par Third-party framework compatibility
  1485. * It is a custom operator. It has no corresponding operator in Caffe.
  1486. */
  1487. REG_OP(Proposal)
  1488. .INPUT(cls_prob, TensorType({DT_FLOAT16, DT_FLOAT}))
  1489. .INPUT(bbox_delta, TensorType({DT_FLOAT16, DT_FLOAT}))
  1490. .INPUT(im_info, TensorType({DT_FLOAT16, DT_FLOAT}))
  1491. .OUTPUT(rois, TensorType({DT_FLOAT16, DT_FLOAT}))
  1492. .OUTPUT(actual_rois_num, TensorType({DT_INT32}))
  1493. .ATTR(feat_stride, Float, 16)
  1494. .ATTR(base_size, Float, 16)
  1495. .ATTR(min_size, Float, 16)
  1496. .ATTR(ratio, ListFloat, {0.5, 1, 2})
  1497. .ATTR(scale, ListFloat, {8, 16, 32})
  1498. .ATTR(pre_nms_topn, Int, 3000)
  1499. .ATTR(post_nms_topn, Int, 304)
  1500. .ATTR(iou_threshold, Float, 0.7)
  1501. .ATTR(output_actual_rois_num, Bool, false)
  1502. .OP_END_FACTORY_REG(Proposal)
  1503. /**
  1504. *@brief Performs object detection. Different from Proposal, this is an internal API called after FE fusion and has an additional "rpn_bbox" attribute. The suffix "D" in the API name will be removed from the generated model . \n
  1505. *@par Inputs:
  1506. *@li cls_prob: An NCHW tensor of type float16, specifying the probability of the proposal is the background class.
  1507. *@li bbox_delta: An NCHW tensor of type float16, specifying the coordinates of the proposals bounding boxes.
  1508. *@li im_info: An ND tensor of type float16 or float32, specifying the Image information.
  1509. *@li rpn_bbox: An NCHW tensor of type float16, specifying the coordinates of the proposals bounding boxes . \n
  1510. *@par Attributes:
  1511. *@li feat_stride: A required float32, specifying the stride of the sliding window. Must be greater than "0".Defaults to "16".
  1512. *@li base_size: A required float32, specifying the size of the generated base box. Must be greater than "0". Defaults to "16".
  1513. *@li min_size: A required float32, specifying the minimum edge length of a proposal. A box with any edge less than this value is removed. Must be greater than "0". Defaults to "16".
  1514. *@li ratio: A required list of floats, specifying the aspect ratio of the generated base box. Defaults to [0.5, 1, 2].
  1515. *@li scale: A required list of floats, specifying the ratio of the size of the generated base box to "base_size". Defaults to [8, 16, 32].
  1516. *@li pre_nms_topn: A required int, specifying top K boxes before NMS. For float16 input, pre_nms_topn <= 6000. For float32 input, pre_nms_topn <= 3000. Defaults to "3000".
  1517. *@li post_nms_topn: A required int, specifying the number of boxes to be output after NMS. The value is a multiple of 16. For float16 input, post_nms_topn <= 6000. For float32 input, post_nms_topn <= 3000 (the maximum multiple of 16 is 2992 within the range). Defaults to "304".
  1518. *@li iou_threshold: A required float32, specifying the NMS threshold. The value range is (0,1]. Defaults to 0.7.
  1519. *@li output_actual_rois_num: An optional bool. Defaults to "false" . \n
  1520. *@par Outputs:
  1521. *@li rois: A Tensor with shape [batch, 5, post_nms_topn], of type float16 or float32, specifying the output box information. "post_nms_topn" must be a multiple of 16. The dimension "5" indicates (batchID, x1, y1, x2, y2). The number of BBoxes output per batch is determined by "actual_rois_num".
  1522. *@li actual_rois_num: A Tensor with shape [batch, 8], of type int32, specifying the number of BBoxes output per batch.
  1523. *@par Third-party framework compatibility
  1524. * It is a custom operator. It has no corresponding operator in Caffe.
  1525. *@par Restrictions:
  1526. *Warning: THIS FUNCTION IS DEPRECATED. Please use Proposal instead.
  1527. */
  1528. REG_OP(ProposalD)
  1529. .INPUT(cls_prob, TensorType({DT_FLOAT16, DT_FLOAT}))
  1530. .INPUT(bbox_delta, TensorType({DT_FLOAT16, DT_FLOAT}))
  1531. .INPUT(im_info, TensorType({DT_FLOAT16, DT_FLOAT}))
  1532. .INPUT(rpn_bbox, TensorType({DT_FLOAT16, DT_FLOAT}))
  1533. .OUTPUT(rois, TensorType({DT_FLOAT16, DT_FLOAT}))
  1534. .OUTPUT(actual_rois_num, TensorType({DT_INT32}))
  1535. .ATTR(feat_stride, Float, 16)
  1536. .ATTR(base_size, Float, 16)
  1537. .ATTR(min_size, Float, 16)
  1538. .ATTR(ratio, ListFloat, {0.5, 1, 2})
  1539. .ATTR(scale, ListFloat, {8, 16, 32})
  1540. .ATTR(pre_nms_topn, Int, 3000)
  1541. .ATTR(post_nms_topn, Int, 304)
  1542. .ATTR(iou_threshold, Float, 0.7)
  1543. .ATTR(output_actual_rois_num, Bool, false)
  1544. .OP_END_FACTORY_REG(ProposalD)
  1545. /**
  1546. *@brief Performs plane or channel conversion on YoloV2.
  1547. * If reverse=true: (N, H, W, C)->(N, H*stride, W*stride, C/(stride*stride))
  1548. * If reverse=false: (N, H, W, C)->(N, H/stride, W/stride, C*(stride*stride))
  1549. *@par Inputs:
  1550. *x: An (N, H, W, C) tensor. Type is float16, float32, int8, uint8, int16, uint16, int32, uint32, int64 or uint64. . \n
  1551. *@par Attributes:
  1552. *@li stride: An optional int32, specifying the plane or channel scaling factor. Defaults to "2".
  1553. *@li reverse: An optional bool, specifying the conversion mode. If "true", depth to space conversion is performed. If "false", space to depth conversion is performed. Defaults to "false" . \n
  1554. *@par Outputs:
  1555. *y: An (N, H, W, C) tensor. Has same type as "x" . \n
  1556. *@attention Constraints:
  1557. *@li If reverse=true: C/(stride*stride) yields an integer result. If reverse=false: W/stride and H/stride yield integer results.
  1558. *@par Third-party framework compatibility
  1559. * It is a custom operator. It has no corresponding operator in Caffe.
  1560. */
  1561. REG_OP(PassThrough)
  1562. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, DT_UINT32, DT_INT64, DT_UINT64}))
  1563. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, DT_UINT32, DT_INT64, DT_UINT64}))
  1564. .ATTR(stride, Int, 2)
  1565. .ATTR(reverse, Bool, false)
  1566. .OP_END_FACTORY_REG(PassThrough)
  1567. /**
  1568. *@brief Crops the input tensor x to the shape of size. For example:
  1569. *(1) x: bottom to be cropped, with shape (20, 50, 512, 512);
  1570. *(2) size: reference input for cropping, with shape (20, 10, 256, 256);
  1571. *(3) axis = 1;
  1572. *(4) offset = (25, 128, 128);
  1573. *(5) y = x[:, 25:25 + size.shape[1], 128:128 + size.shape[2], 128:128 + size.shape[3]] . \n
  1574. *@par Inputs:
  1575. *Inputs include:
  1576. * @li x: A required Tensor. Must be one of the following types: float16, float32, int8, uint8, int16, uint16, int32, uint32,int64, uint64.
  1577. * @li size: A required Tensor. Must be one of the following types: float16, float32, int8, uint8, int16, uint16, int32, uint32, int64, uint64.
  1578. *@par Attributes:
  1579. *@li axis: A required int32, specifying the first dimension to crop. Defaults to "2".
  1580. *@li offset: A required array, specifying the shift for all/each dimension to align the cropped bottom with the reference bottom. Must be one of the following types: float16, float32, int8, uint8, int16, uint16, int32, uint32, int64, uint64.
  1581. *@par Outputs:
  1582. *y: A required Tensor. Has the same type and shape as "size" . \n
  1583. *@attention Constraints:
  1584. *@li "y" must have the same type and shape as "size". "x" must have the same type as "size".
  1585. *@li "axis" must be less than the rank of "x".
  1586. *@li The "offset" for each dimension must not exceed the maximum value of the corresponding dimension of "x".
  1587. *@li The array length of "offset" plus the value of "axis" equals to the rank of "y".
  1588. *@par Third-party framework compatibility
  1589. * Compatible with the Caffe operator Crop.
  1590. */
  1591. REG_OP(Crop)
  1592. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, DT_UINT32,DT_INT64,DT_UINT64}))
  1593. .INPUT(size, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, DT_UINT32,DT_INT64,DT_UINT64}))
  1594. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, DT_UINT32,DT_INT64,DT_UINT64}))
  1595. .ATTR(axis, Int, 2)
  1596. .REQUIRED_ATTR(offsets, ListInt)
  1597. .OP_END_FACTORY_REG(Crop)
  1598. /**
  1599. *@brief Returns a namedtuple (values, indices) where values is the cumulative
  1600. * the cumulative minimum of elements of input in the dimension dim.
  1601. * And indices is the index location of each maximum value found in the dimension dim. \n
  1602. *@par Inputs:
  1603. *One inputs, including:
  1604. * x: A tensor . Must be one of the following types:
  1605. * float16, float32, int32, uint32, int8, uint8. \n
  1606. *@par Attributes:
  1607. * axis: Axis along which to cummin. \n
  1608. *@par Outputs:
  1609. * @li y: A Tensor with the same type and shape of x's.
  1610. * @li indices: A Tensor with the int32 type and the same shape of x's. \n
  1611. *@par Third-party framework compatibility
  1612. *Compatible with the Pytorch operator Cummin. \n
  1613. */
  1614. REG_OP(Cummin)
  1615. .INPUT(x, TensorType::BasicType())
  1616. .OUTPUT(y, TensorType::BasicType())
  1617. .OUTPUT(indices, TensorType::BasicType())
  1618. .REQUIRED_ATTR(axis, Int)
  1619. .OP_END_FACTORY_REG(Cummin)
  1620. /**
  1621. *@brief Returns a namedtuple (values, indices) where values is the cumulative
  1622. * the cumulative maximum of elements of input in the dimension dim.
  1623. * And indices is the index location of each maximum value found in the dimension dim. \n
  1624. *@par Inputs:
  1625. *One inputs, including:
  1626. * x: A tensor . Must be one of the following types:
  1627. * float16, float32, int32, uint32, int8, uint8. \n
  1628. *@par Attributes:
  1629. * dim: Axis along which to cummax. \n
  1630. *@par Outputs:
  1631. * @li y: A Tensor with the same type and shape of x's.
  1632. * @li indices: A Tensor with the int32/int64 type and the same shape of x's. \n
  1633. *@par Third-party framework compatibility
  1634. *Compatible with the Pytorch operator Cummax. \n
  1635. */
  1636. REG_OP(Cummax)
  1637. .INPUT(x, TensorType::BasicType())
  1638. .OUTPUT(y, TensorType::BasicType())
  1639. .OUTPUT(indices, TensorType::BasicType())
  1640. .REQUIRED_ATTR(dim, Int)
  1641. .OP_END_FACTORY_REG(Cummax)
  1642. /**
  1643. *@brief Extends the input with copies of data along a specified dimension. For example:
  1644. *(1) If x = [[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, 12]]], with shape (2, 3, 2);
  1645. *(2) axis = 1;
  1646. *(3) tiles = 2;
  1647. *(4) Then, y = [[[1, 2], [3, 4], [5, 6], [1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [11, 12], [7, 8], [9, 10], [11, 12]]], with shape (2, 6, 2) . \n
  1648. *@par Inputs:
  1649. * One input:
  1650. *input_x: A Tensor with any format. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64 . \n
  1651. *@par Attributes:
  1652. *@li axis: An optional int32, specifying the axis to tile. Defaults to 1.
  1653. *@li tiles: A required int32, specifying the number of copies (tiles) to output . \n
  1654. *@par Outputs:
  1655. *output_y: A Tensor of any format. Must be one of the following types: float16, float32, int8, int16, int32, int64, uint8, uint16, uint32, uint64 . \n
  1656. *@attention Constraints:
  1657. *@li "axis" must be within the rank of the input tensor.
  1658. *@li "tiles" must be greater than 1.
  1659. *@par Third-party framework compatibility
  1660. * Compatible with the Caffe operator Tile.
  1661. */
  1662. REG_OP(TileWithAxis)
  1663. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT64, DT_INT32,
  1664. DT_INT16, DT_INT8, DT_UINT64, DT_UINT32, DT_UINT16, DT_UINT8}))
  1665. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT64, DT_INT32,
  1666. DT_INT16, DT_INT8, DT_UINT64, DT_UINT32, DT_UINT16, DT_UINT8}))
  1667. .ATTR(axis, Int, 1)
  1668. .REQUIRED_ATTR(tiles, Int)
  1669. .OP_END_FACTORY_REG(TileWithAxis)
  1670. /**
  1671. *@brief Read data with offset and stride . \n
  1672. *@par Inputs:
  1673. *One input:
  1674. *x: A Tensor. Must be one of the following types: float16, int8 . \n
  1675. *@par Attributes:
  1676. *@li stride_list: An optional 5D list of type int32. Defaults to "[1,1,1,1,1]" . \n
  1677. *@par Outputs:
  1678. *y: A Tensor of the same type as "x".
  1679. *@par Restrictions:
  1680. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1681. */
  1682. REG_OP(ReadSelect)
  1683. .INPUT(x, TensorType::ALL())
  1684. .OUTPUT(y, TensorType::ALL())
  1685. .ATTR(stride_list, ListInt, {1,1,1,1,1})
  1686. .OP_END_FACTORY_REG(ReadSelect)
  1687. /**
  1688. *@brief: Write data with offset . \n
  1689. *@par Inputs:
  1690. *x: A Tensor. Must be one of the following types: int32, float32, float16, int8 . \n
  1691. *@par Outputs:
  1692. *y: A Tensor. Has the same type as "x".
  1693. *@par Restrictions:
  1694. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1695. */
  1696. REG_OP(WriteSelect)
  1697. .INPUT(x, TensorType::ALL())
  1698. .OUTPUT(y, TensorType::ALL())
  1699. .OP_END_FACTORY_REG(WriteSelect)
  1700. /**
  1701. *@brief Read data by stride.
  1702. *@par Inputs:
  1703. *x: A Tensor. Must be one of the following types: float16, int8. \n
  1704. *@par Attributes:
  1705. *@li axis: A required int32, specifying the index of axis to read by stride. \n
  1706. *@li stride: A required int32, specifying the value of reading stride. \n
  1707. *@par Outputs:
  1708. *y: A Tensor of the same type as "x".
  1709. */
  1710. REG_OP(StridedRead)
  1711. .INPUT(x, TensorType::ALL())
  1712. .OUTPUT(y, TensorType::ALL())
  1713. .ATTR(axis, Int, 1)
  1714. .ATTR(stride, Int, 1)
  1715. .OP_END_FACTORY_REG(StridedRead)
  1716. /**
  1717. *@brief Write data by stride.
  1718. *@par Inputs:
  1719. *x: A Tensor. Must be one of the following types: float16, int8. \n
  1720. *@par Attributes:
  1721. *@li axis: A required int32, specifying the index of axis to write by stride. \n
  1722. *@li stride: A required int32, specifying the value of writing stride. \n
  1723. *@par Outputs:
  1724. *y: A Tensor. Has the same type as "x".
  1725. */
  1726. REG_OP(StridedWrite)
  1727. .INPUT(x, TensorType::ALL())
  1728. .OUTPUT(y, TensorType::ALL())
  1729. .ATTR(axis, Int, 1)
  1730. .ATTR(stride, Int, 1)
  1731. .OP_END_FACTORY_REG(StridedWrite)
  1732. /**
  1733. *@brief Computes the cumulative log sum exp of the tensor "x" along "axis" . \n
  1734. *@par Inputs:
  1735. * Two inputs, including:
  1736. *@li x: A Tensor. Must be one of the following types: float32, float16.
  1737. *@li axis A Tensor of type int32 or int16. Defaults to "0".
  1738. *
  1739. *@par Attributes:
  1740. *@li exclusive: If "False", performs inclusive CumulativeLogsumexp, which means that the first element of the input is identical to the first element of the output. If "True", performs exclusive CumulativeLogsumexp.
  1741. *@li reverse: A bool. Defaults to "False".
  1742. *
  1743. *@par Outputs:
  1744. *@li y: A Tensor. Has the same type as "x".
  1745. *@par Third-party framework compatibility
  1746. * Compatible with the TensorFlow operator Cumsum.
  1747. */
  1748. REG_OP(CumulativeLogsumexp)
  1749. .INPUT(x, TensorType({DT_DOUBLE, DT_FLOAT, DT_FLOAT16}))
  1750. .INPUT(axis, TensorType({DT_INT32, DT_INT16}))
  1751. .OUTPUT(y, TensorType({DT_DOUBLE, DT_FLOAT, DT_FLOAT16}))
  1752. .ATTR(exclusive, Bool, false)
  1753. .ATTR(reverse, Bool, false)
  1754. .OP_END_FACTORY_REG(CumulativeLogsumexp)
  1755. /**
  1756. *@brief Computes the cumulative log sum exp of the tensor "x" along "axis".
  1757. *
  1758. *@par Inputs:
  1759. * One input:
  1760. *x: A Tensor. Must be one of the following types: float32, float16.
  1761. *
  1762. *@par Attributes:
  1763. *@li axis A Tensor of type int32 or int16. Defaults to "0".
  1764. *@li exclusive: If "False", performs inclusive cumulativeLogsumexp, which means that the first element of the input is identical to the first element of the output. If "True", performs exclusive CumulativeLogsumexp.
  1765. *@li reverse: A bool. Defaults to "False".
  1766. *
  1767. *@par Outputs:
  1768. *y: A Tensor. Has the same type as "x".
  1769. *@par Third-party framework compatibility
  1770. * Compatible with the TensorFlow operator Cumsum.
  1771. *
  1772. * @par Restrictions:
  1773. * Warning: THIS FUNCTION IS DEPRECATED. Please use CumulativeLogsumexp instead.
  1774. */
  1775. REG_OP(CumulativeLogsumexpD)
  1776. .INPUT(x, TensorType({DT_DOUBLE, DT_FLOAT, DT_FLOAT16}))
  1777. .OUTPUT(y, TensorType({DT_DOUBLE, DT_FLOAT, DT_FLOAT16}))
  1778. .REQUIRED_ATTR(axis, Int)
  1779. .ATTR(exclusive, Bool, false)
  1780. .ATTR(reverse, Bool, false)
  1781. .OP_END_FACTORY_REG(CumulativeLogsumexpD)
  1782. /**
  1783. * @brief Add updates to var according to axis and indices.
  1784. * @par Inputs:
  1785. * Three inputs, including:
  1786. * @li var: A Tensor. Must be one of the following types:
  1787. * float16, float32, int16, int32, int8, uint8.
  1788. * @li indices: A Tensor of the indices, type should be int32.
  1789. * @li updates: A Tensor of the same type as "var". \n
  1790. * @par Attributes:
  1791. * axis: An required int to specify the axis to perform indices add. \n
  1792. * @par Outputs:
  1793. * var: A Tensor. Same as input "var".
  1794. * @par Third-party framework compatibility
  1795. * Compatible with the Pytorch operator index_add_.
  1796. */
  1797. REG_OP(InplaceIndexAdd)
  1798. .INPUT(var, TensorType({DT_INT16, DT_INT32, DT_INT8,
  1799. DT_UINT8, DT_FLOAT32, DT_FLOAT16}))
  1800. .INPUT(indices, TensorType({DT_INT32}))
  1801. .INPUT(updates, TensorType({DT_INT16, DT_INT32, DT_INT8,
  1802. DT_UINT8, DT_FLOAT32, DT_FLOAT16}))
  1803. .OUTPUT(var, TensorType({DT_INT16, DT_INT32, DT_INT8,
  1804. DT_UINT8, DT_FLOAT32, DT_FLOAT16}))
  1805. .REQUIRED_ATTR(axis, Int)
  1806. .OP_END_FACTORY_REG(InplaceIndexAdd)
  1807. /**
  1808. * @brief Replace the value of X with value according to mask.
  1809. * @par Inputs:
  1810. * three inputs, including:
  1811. * @li x: A Tensor of dtype is float16 or float32 or int64 or int32 or int8.
  1812. * @li mask: A Tensor of dtype bool.
  1813. * @li value: A Tensor of dtype float16 or float32 or int64 or int32 or int8.
  1814. * @par Outputs:
  1815. * y: A tensor. Must be one of the following dtypes:
  1816. * float16, float32, int64, int32, int8.
  1817. */
  1818. REG_OP(MaskedFill)
  1819. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT32, DT_INT64}))
  1820. .INPUT(mask, TensorType({DT_BOOL}))
  1821. .INPUT(value, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT32, DT_INT64}))
  1822. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT32, DT_INT64}))
  1823. .OP_END_FACTORY_REG(MaskedFill)
  1824. /**
  1825. * @brief Choose the value of X with value according to mask.
  1826. * @par Inputs:
  1827. * two inputs, including:
  1828. * @li x: A Tensor of dtype is float16 or float32.
  1829. * @li mask: A Tensor of dtype is bool. \n
  1830. * @par Outputs:
  1831. * y: A tensor with the same type as x. \n
  1832. * @par Third-party framework compatibility
  1833. * Compatible with the Numpy operator select.
  1834. * Replaces the pytorch operator masked_select in some scenarios.\n
  1835. */
  1836. REG_OP(MaskedSelectV2)
  1837. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  1838. .INPUT(mask, TensorType({DT_BOOL}))
  1839. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  1840. .OP_END_FACTORY_REG(MaskedSelectV2)
  1841. /**
  1842. * @brief update the value of X with value according to mask.
  1843. * @par Inputs:
  1844. * three inputs, including:
  1845. * @li x: A Tensor of dtype is float16 or float32 or float64 or int64 or int32 or int16 or int8 or uint8.
  1846. * @li mask: A Tensor of dtype is bool.
  1847. * @li updates: A tensor with the same type as x. \n
  1848. * @par Outputs:
  1849. * @li y: A tensor with the same type as x. \n
  1850. */
  1851. REG_OP(MaskedScatter)
  1852. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_UINT8, DT_INT8, DT_INT16, DT_INT32, DT_INT64}))
  1853. .INPUT(mask, TensorType({DT_BOOL}))
  1854. .INPUT(updates, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_UINT8, DT_INT8, DT_INT16, DT_INT32, DT_INT64}))
  1855. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_UINT8, DT_INT8, DT_INT16, DT_INT32, DT_INT64}))
  1856. .OP_END_FACTORY_REG(MaskedScatter)
  1857. /**
  1858. * @brief Slice a tensor at its last dim, e.x. a[..., begin:end:stride]. \n
  1859. * @par Inputs:
  1860. * One inputs, including:
  1861. * x: A Tensor. Must be one of the following types: float16, float32, int16, int32.
  1862. * @par Attributes:
  1863. * @li start: An attribute of type Int, start index of last dim. \n
  1864. * @li end: An attribute of type Int, end index of last dim. \n
  1865. * @li stride: An attribute of type Int, stride of slice. \n
  1866. * @par Outputs:
  1867. * y: A Tensor. Has the same type as "x". \n
  1868. * @par Third-party framework compatibility
  1869. * No compatibility
  1870. */
  1871. REG_OP(SliceLastDim)
  1872. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_INT16, DT_INT32, DT_INT64}))
  1873. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_INT16, DT_INT32, DT_INT64}))
  1874. .REQUIRED_ATTR(start, Int)
  1875. .REQUIRED_ATTR(end, Int)
  1876. .ATTR(stride, Int, 1)
  1877. .OP_END_FACTORY_REG(SliceLastDim)
  1878. /**
  1879. * @brief Extracts a strided slice of a tensor. Roughly speaking, this op
  1880. * extracts a slice of size (end-begin)/stride from the given input tensor.
  1881. * Starting at the location specified by begin the slice continues by
  1882. * adding stride to the index until all dimensions are not less than end. \n
  1883. *
  1884. * @par Inputs:
  1885. * Five inputs, including:
  1886. * @li x: A Tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8,
  1887. * complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16,
  1888. * complex128, float16, uint32, uint64, complex64, complex128.
  1889. * @li begin: A Tensor of type int32 or int64, for the index of the first value to select.
  1890. * @li end: A Tensor of type int32 or int64, for the index of the last value to select.
  1891. * @li axes: A Tensor of type int32 or int64, indicate axis to be select.
  1892. * @li strides: A Tensor of type int32 or int64, for the increment. \n
  1893. *
  1894. * @par Attributes:
  1895. * @li begin_mask: A Tensor of type int32.
  1896. * A bitmask where a bit "i" being "1" means to ignore the begin
  1897. * value and instead use the largest interval possible.
  1898. * @li end_mask: A Tensor of type int32.
  1899. * Analogous to "begin_mask".
  1900. * @li ellipsis_mask: A Tensor of type int32.
  1901. * A bitmask where bit "i" being "1" means the "i"th position
  1902. * is actually an ellipsis.
  1903. * @li new_axis_mask: A Tensor of type int32.
  1904. * A bitmask where bit "i" being "1" means the "i"th
  1905. * specification creates a new shape 1 dimension.
  1906. * @li shrink_axis_mask: A Tensor of type int32.
  1907. * A bitmask where bit "i" implies that the "i"th
  1908. * specification should shrink the dimensionality. \n
  1909. *
  1910. * @par Outputs:
  1911. * y: A Tensor. Has the same type as "x".
  1912. *
  1913. * @attention Constraints:
  1914. *
  1915. * @par Third-party framework compatibility
  1916. * Compatible with the TensorFlow operator StridedSliceV2.
  1917. */
  1918. REG_OP(StridedSliceV2)
  1919. .INPUT(x, TensorType::BasicType())
  1920. .INPUT(begin, TensorType::IndexNumberType())
  1921. .INPUT(end, TensorType::IndexNumberType())
  1922. .OPTIONAL_INPUT(axes, TensorType::IndexNumberType())
  1923. .OPTIONAL_INPUT(strides, TensorType::IndexNumberType())
  1924. .ATTR(begin_mask, Int, 0)
  1925. .ATTR(end_mask, Int, 0)
  1926. .ATTR(ellipsis_mask, Int, 0)
  1927. .ATTR(new_axis_mask, Int, 0)
  1928. .ATTR(shrink_axis_mask, Int, 0)
  1929. .OUTPUT(y, TensorType::BasicType())
  1930. .OP_END_FACTORY_REG(StridedSliceV2)
  1931. /**
  1932. *@brief Fills the elements of the input tensor with value val by selecting the indices in the order given in index. \n
  1933. *@par Inputs:
  1934. *Three inputs, including:
  1935. * @li x: A tensor. Must be one of the following types:
  1936. * float16, float32, int32. \n
  1937. *@li assist1: A tensor. Must be one of the following types:
  1938. * float16, float32, int32. \n
  1939. *@li assist2: A tensor. Must be one of the following types:
  1940. * float16, float32, int32. \n
  1941. * @par Attributes:
  1942. * dim: A required int. Used to select the dimension of this tensor. \n
  1943. *@par Outputs:
  1944. *y: A Tensor with the same type and shape of input_x's. \n
  1945. *@par Third-party framework compatibility
  1946. *Compatible with the Pytorch operator IndexFill. \n
  1947. */
  1948. REG_OP(IndexFillD)
  1949. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  1950. .INPUT(assist1, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  1951. .INPUT(assist2, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  1952. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  1953. .REQUIRED_ATTR(dim, Int)
  1954. .OP_END_FACTORY_REG(IndexFillD)
  1955. /**
  1956. * @brief For each row r of this and for each column c, do (*this)(r, c) += src(j, c), \n
  1957. * where j ranges from indexes[r].first through indexes[r].second - 1. \n
  1958. * In general indexes must be >= 0 and < src.NumRows(); \n
  1959. * but to represent an empty range you may use the pair (-1, -1) or any pair of numbers (i, j) such that i >= j. \n
  1960. * @par Inputs:
  1961. * Three inputs, including:
  1962. * @li x: A Tensor. Must be one of the following types:
  1963. * float16, float32.
  1964. * @li indices: A Tensor of the indices, type should be int32.
  1965. * @li src: A Tensor of the same type as "x". \n
  1966. * @par Outputs:
  1967. * @li x: A Tensor. Same as input "x".
  1968. * @par Third-party framework compatibility
  1969. * Compatible with the kaldi operator AddRowRanges.
  1970. */
  1971. REG_OP(AddRowRanges)
  1972. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  1973. .INPUT(src, TensorType({DT_FLOAT16,DT_FLOAT}))
  1974. .INPUT(indices, TensorType({DT_INT32}))
  1975. .OUTPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  1976. .OP_END_FACTORY_REG(AddRowRanges)
  1977. /**
  1978. *@brief masked fill tensor along with one axis by range.
  1979. * boxes. It is a customized masked fill range operator . \n
  1980. *@par Inputs:
  1981. * Four inputs, including:
  1982. *@li x: input tensor. A ND Tensor of float32/float16/int32/int8 with shapes
  1983. * 1-D (D,), 2-D(N, D), 3-D(N, C, D)
  1984. *@li start: masked fill start pos. A 3D Tensor of int32 with
  1985. * shape (num, N). "num" indicates the number of loop masked fill, and the value N
  1986. * indicates the batch of ND Tensor, if input x shape is 1-D, N = 1. \n
  1987. *@li end: masked fill end pos. A 3D Tensor of int32 with
  1988. * shape (num, N). "num" indicates the number of loop masked fill, and the value N
  1989. * indicates the batch of ND Tensor. \n
  1990. *@li value: masked fill value. A 2D Tensor of float32/float16/int32/int8 with
  1991. * shape (num,). "num" indicates the number of loop masked fill
  1992. *@par Attributes:
  1993. *@li axis: axis with masked fill of int32. Defaults to -1.
  1994. *@par Outputs:
  1995. *y: A ND Tensor of float32/float16/int32/int8 with shapes 1-D (D,), 2-D(N, D), 3-D(N, C, D)
  1996. * @par Restrictions:
  1997. * Warning: input shape's length must not be bigger than 1024 * 1024 * 1024.
  1998. */
  1999. REG_OP(MaskedFillRange)
  2000. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32}))
  2001. .INPUT(start, TensorType({DT_INT32}))
  2002. .INPUT(end, TensorType({DT_INT32}))
  2003. .INPUT(value, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32}))
  2004. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT32}))
  2005. .REQUIRED_ATTR(axis, Int)
  2006. .OP_END_FACTORY_REG(MaskedFillRange)
  2007. /**
  2008. * @brief After a set of sorted data and a new set of data are re-sorted, get the first k data. \n
  2009. *
  2010. * @par Inputs:
  2011. * Six inputs, including:
  2012. * @li topk_pq_distance: A sorted Tensor, Will be updated after calculation. Must be one of the following types: float32, float16.
  2013. * @li topk_pq_index: A Tensor of type int32, index corresponding to topk_pq_distance.
  2014. * @li topk_pq_ivf: A Tensor of type int32 , the bucket number corresponding to topk_pq_distance.
  2015. * @li pq_distance: A Tensor of type float32 or float16, the new data set will be reordered with topk_pq_distance and updated to topk_pq_distance.
  2016. * @li pq_index: A Tensor of type int32, index corresponding to pq_distance.
  2017. * @li pq_ivf: A scalar of type int32 , the bucket number corresponding to pq_distance. \n
  2018. *
  2019. * @par Attributes:
  2020. * @li order: A string, indicates the sorting method of topk_pq_distance. \n
  2021. *
  2022. * @par Restrictions:
  2023. * Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  2024. */
  2025. REG_OP(InplaceTopKDistance)
  2026. .INPUT(topk_pq_distance, TensorType({DT_FLOAT16, DT_FLOAT}))
  2027. .INPUT(topk_pq_index, TensorType({DT_INT32}))
  2028. .INPUT(topk_pq_ivf, TensorType({DT_INT32}))
  2029. .INPUT(pq_distance, TensorType({DT_FLOAT16, DT_FLOAT}))
  2030. .INPUT(pq_index, TensorType({DT_INT32}))
  2031. .INPUT(pq_ivf, TensorType({DT_INT32}))
  2032. .ATTR(order, String, "asc")
  2033. .OP_END_FACTORY_REG(InplaceTopKDistance)
  2034. /**
  2035. * @brief After a set of sorted data and a new set of data are re-sorted, get the first k data. \n
  2036. *
  2037. * @par Inputs:
  2038. * @li sorted_distance: A sorted Tensor, Will be updated after calculation. Must be one of the following types: float16.
  2039. * @li pq_ivf: A Tensor of type int32, index corresponding to sorted_distance.
  2040. * @li pq_index: A Tensor of type int32 , the bucket number corresponding to sorted_distance. \n
  2041. *
  2042. *@par Outputs:
  2043. * @li topk_distance: A Tensor of type float16, the new data set will be reordered with sorted_distance and updated to topk_distance.
  2044. * @li topk_ivf: A Tensor of type int32, index corresponding to topk_distance.
  2045. * @li topk_index: A scalar of type int32 , the bucket number corresponding to topk_distance. \n
  2046. *
  2047. * @par Attributes:
  2048. * k: get the first k data of sorted_distance. \n
  2049. *
  2050. * @par Restrictions:
  2051. * Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  2052. */
  2053. REG_OP(TopKPQDistanceMerge)
  2054. .INPUT(sorted_distance, TensorType({DT_FLOAT16}))
  2055. .INPUT(pq_ivf, TensorType({DT_INT32}))
  2056. .INPUT(pq_index, TensorType({DT_INT32}))
  2057. .OUTPUT(topk_distance, TensorType({DT_FLOAT16}))
  2058. .OUTPUT(topk_ivf, TensorType({DT_INT32}))
  2059. .OUTPUT(topk_index, TensorType({DT_INT32}))
  2060. .REQUIRED_ATTR(k, Int)
  2061. .OP_END_FACTORY_REG(TopKPQDistanceMerge)
  2062. /**
  2063. *@brief Extracts a strided slice of a tensor. Roughly speaking, this op
  2064. extracts a slice of size (end-begin)/stride from the given input tensor.
  2065. Starting at the location specified by begin the slice continues by
  2066. adding stride to the index until all dimensions are not less than end.
  2067. *@par Inputs:
  2068. *Four inputs, including:
  2069. * @li x: A Tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8,
  2070. * complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16,
  2071. * complex128, float16, uint32, uint64.
  2072. * @li begin: A Tensor of type int32 or int64, for the index of the first value to select . \n
  2073. * @li end: A Tensor of type int32 or int64, for the index of the last value to select . \n
  2074. * @li strides: A Tensor of type int32 or int64, for the increment . \n
  2075. * @li axes: A Tensor of type int32 or int64, for the increment . \n
  2076. *@par Outputs:
  2077. *y: A Tensor. Has the same type as "x" . \n
  2078. * @par Restrictions:
  2079. * Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  2080. */
  2081. REG_OP(StridedSliceV3)
  2082. .INPUT(x, TensorType::BasicType())
  2083. .INPUT(begin, TensorType::IndexNumberType())
  2084. .INPUT(end, TensorType::IndexNumberType())
  2085. .OPTIONAL_INPUT(axes, TensorType::IndexNumberType())
  2086. .OPTIONAL_INPUT(strides, TensorType::IndexNumberType())
  2087. .OUTPUT(y, TensorType::BasicType())
  2088. .OP_END_FACTORY_REG(StridedSliceV3)
  2089. } // namespace ge
  2090. #endif // OPS_BUILT_IN_OP_PROTO_INC_SELECTION_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示