You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

matrix_calculation_ops.h 49 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
3 years ago
5 years ago
3 years ago
5 years ago
3 years ago
3 years ago
5 years ago
3 years ago
5 years ago
3 years ago
5 years ago
3 years ago
5 years ago
3 years ago
5 years ago
3 years ago
3 years ago
5 years ago
3 years ago
5 years ago
3 years ago
5 years ago
3 years ago
3 years ago
5 years ago
3 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
3 years ago
5 years ago
3 years ago
5 years ago
3 years ago
3 years ago
3 years ago
5 years ago
3 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
3 years ago
5 years ago
3 years ago
3 years ago
3 years ago
5 years ago
3 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file matrix_calculation_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_MATRIX_CALCULATION_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_MATRIX_CALCULATION_OPS_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. *@brief Multiplies matrix "a" by matrix "b", producing "a * b" . \n
  26. *@par Inputs:
  27. *Three inputs, including:
  28. * @li x1: A matrix Tensor. 2D. Must be one of the following types: float16,
  29. * float32, int32. Has format [ND, NHWC, FRACTAL_NZ].
  30. * @li x2: A matrix Tensor. 2D. Must be one of the following types: float16,
  31. * float32, int32. Has format [ND, NHWC, FRACTAL_NZ].
  32. * @li bias: A optional 1D Tensor. Must be one of the following types: float16,
  33. * float32, int32. Has format [ND, NHWC] . \n
  34. *@par Attributes:
  35. *@li transpose_x1: A bool. If True, changes the shape of "x1" from [M, K] to [K, M].
  36. *@li transpose_x2: A bool. If True, changes the shape of "x2" from [M, K] to [K, M] . \n
  37. *@par Outputs:
  38. *y: The result matrix Tensor. 2D. Must be one of the following types: float16,
  39. * float32, int32. Has format [ND, NHWC, FRACTAL_NZ] . \n
  40. *@par Third-party framework compatibility
  41. * Compatible with the TensorFlow operator BatchMatmul.
  42. */
  43. REG_OP(MatMul)
  44. .INPUT(x1, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_BF16}))
  45. .INPUT(x2, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_BF16}))
  46. .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_BF16}))
  47. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_BF16}))
  48. .ATTR(transpose_x1, Bool, false)
  49. .ATTR(transpose_x2, Bool, false)
  50. .OP_END_FACTORY_REG(MatMul)
  51. /**
  52. *@brief Multiplies matrix "a" by matrix "b", producing "a * b" . \n
  53. *@par Inputs:
  54. *Four inputs, including:
  55. * @li x1: A matrix Tensor. 2D. Must be one of the following types: float32,
  56. float16, int32, int8. Has format [ND, NHWC, FRACTAL_NZ].
  57. * @li x2: A matrix Tensor. 2D. Must be one of the following types: float32,
  58. float16, int32, int8. Has format [ND, NHWC, FRACTAL_NZ].
  59. * @li bias: A 1D Tensor. Must be one of the following types: float32,
  60. float16, int32. Has format [ND, NHWC].
  61. * @li offset_w: A Optional 1D Tensor for quantized inference. Type is int8.
  62. Reserved. \n
  63. *@par Attributes:
  64. * @li transpose_x1: A bool. If True, changes the shape of "x1" from [K, M] to
  65. [M, K].
  66. * @li transpose_x2: A bool. If True, changes the shape of "x2" from [N, K] to
  67. [K, N].
  68. * @li offset_x: An optional integer for quantized MatMulV2.
  69. * The negative offset added to the input x1 for int8 type. Ensure offset_x
  70. within the effective range of int8 [-128, 127]. Defaults to "0". \n
  71. *@par Outputs:
  72. *y: The result matrix Tensor. 2D. Must be one of the following types: float32,
  73. float16, int32. Has format [ND, NHWC, FRACTAL_NZ]. \n
  74. *@par Third-party framework compatibility
  75. * Compatible with the TensorFlow operator BatchMatmul.
  76. */
  77. REG_OP(MatMulV2)
  78. .INPUT(x1, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8, DT_INT4, DT_BF16}))
  79. .INPUT(x2, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8, DT_INT4, DT_BF16}))
  80. .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_BF16}))
  81. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_BF16}))
  82. .OPTIONAL_INPUT(offset_w, TensorType({DT_INT8, DT_INT4}))
  83. .ATTR(transpose_x1, Bool, false)
  84. .ATTR(transpose_x2, Bool, false)
  85. .ATTR(offset_x, Int, 0)
  86. .OP_END_FACTORY_REG(MatMulV2)
  87. /**
  88. *@brief Multiplies matrix "a" by matrix "b", producing "a * b" . \n
  89. *@par Inputs:
  90. *Five inputs, including:
  91. * @li x1: A matrix Tensor. 2D. Must be one of the following types: int8.
  92. * @li x2: A matrix Tensor. 2D. Must be one of the following types: int8.
  93. * @li compress_index: A compress index matrix of type int8.
  94. * @li bias: An optional Tensor. 1D. Must be one of the following types: int32,
  95. float16.
  96. * @li offset_w: An optional matrix Tensor. 2D. Must be one of the following
  97. types: int8. \n
  98. *@par Attributes:
  99. *@li transpose_x1: A bool. If True, changes the shape of "x1" from [K, M] to
  100. [M, K].
  101. *@li transpose_x2: A bool. If True, changes the shape of "x2" from [N, K] to
  102. [K, N].
  103. *@li offset_x: An optional integer for quantized MatMulV2Compress.
  104. *The negative offset added to the input x1 for int8 type. Ensure offset_x
  105. within the effective range of int8 [-128, 127]. Defaults to "0". \n
  106. *@par Outputs:
  107. *y: The result matrix Tensor. 2D. Must be one of the following types: int32,
  108. * float16. \n
  109. */
  110. REG_OP(MatMulV2Compress)
  111. .INPUT(x1, TensorType({DT_INT8}))
  112. .INPUT(x2, TensorType({DT_INT8}))
  113. .INPUT(compress_index, TensorType({DT_INT8}))
  114. .OPTIONAL_INPUT(bias, TensorType({DT_INT32, DT_FLOAT16}))
  115. .OUTPUT(y, TensorType({DT_INT32, DT_FLOAT16}))
  116. .OPTIONAL_INPUT(offset_w, TensorType({DT_INT8}))
  117. .ATTR(transpose_x1, Bool, false)
  118. .ATTR(transpose_x2, Bool, false)
  119. .ATTR(offset_x, Int, 0)
  120. .OP_END_FACTORY_REG(MatMulV2Compress)
  121. /**
  122. *@brief Performs Matrix-to-matrix Multiply, producing c=alpha[0]*a*b+beta[0]*c . \n
  123. *@attention Constraints:
  124. * For better performance, The k-axis must be aligned to 16 (input type
  125. * is float16) or 32 (input type is int8). \n
  126. *@par Inputs:
  127. *Five inputs, including:
  128. *@li a: A matrix Tensor. Must be one of the following types: float16, int8.
  129. * Has format [ND, FRACTAL_NZ]. 2D(ND) or 4D(FRACTAL_NZ).
  130. *@li b: A matrix Tensor. Must be one of the following types: float16, int8.
  131. * Has format [ND, FRACTAL_NZ, FRACTAL_Z]. 2D(ND) or 4D(FRACTAL_NZ, FRACTAL_Z).
  132. *@li c: A matrix Tensor. Must be one of the following types: float16, int32,
  133. * float32. has format [ND, FRACTAL_NZ]. 2D(ND) or 4D(FRACTAL_NZ).
  134. *@li alpha: A 1D Tensor. The shape of alpha is [1].Must be one of the following
  135. * types: float16, int32, float32. Has format [ND].
  136. *@li beta: A 1D Tensor. The shape of beta is [1]. Must be one of the following
  137. * types: float16, int32, float32. Has format [ND].
  138. * The format of a, b, c has restriction:\n
  139. * When type of a is int8 and type of c is int32, the format of a, b, c should
  140. * all be ND, or a is FRACTAL_NZ and b is FRACTAL_Z and c is ND.\n
  141. * When type of a is int8 and type of c is float32, the format of a, b, c should
  142. * all be ND or a is FRACTAL_NZ and b is FRACTAL_Z and c is FRACTAL_NZ.\n
  143. * When type of a is float16 and type of c is float16, the format of a, b, c
  144. * should all be ND or FRACTAL_NZ.\n
  145. * When type of a is float16 and type of c is float32, the format of a, b, c
  146. * should all be ND or FRACTAL_NZ . \n
  147. *@par Attributes:
  148. *Two attributes, including:
  149. *@li transpose_a: Optional. A bool. If True, changes the shape of "a" from
  150. * [M, K] to [K, M].
  151. *@li transpose_b: Optional. A bool. If True, changes the shape of "b" from
  152. * [K, N] to [N, K] . \n
  153. *@par Outputs:
  154. *y: The result matrix Tensor. Must be one of the following types: float16,
  155. * float32, int32. Has format [ND, FRACTAL_NZ], the format should be equal to a.
  156. * 2D(ND) or 4D(FRACTAL_NZ).
  157. */
  158. REG_OP(GEMM)
  159. .INPUT(a, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT32}))
  160. .INPUT(b, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT32}))
  161. .INPUT(c, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT32}))
  162. .INPUT(alpha, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT32}))
  163. .INPUT(beta, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT32}))
  164. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT32}))
  165. .ATTR(transpose_a, Bool, false)
  166. .ATTR(transpose_b, Bool, false)
  167. .OP_END_FACTORY_REG(GEMM)
  168. /**
  169. *@brief Multiplies matrix "a" by matrix "b", producing "a * b" . \n
  170. *@par Inputs:
  171. *Two inputs, including:
  172. * @li x1: A matrix Tensor. Must be one of the following types: float16,
  173. * float32, int32. 2D or higher. Has format [ND, NHWC, FRACTAL_NZ].
  174. * @li x2: A matrix Tensor. Must be one of the following types: float16,
  175. * float32, int32. 2D or higher. Has format [ND, NHWC, FRACTAL_NZ] . \n
  176. *@par Attributes:
  177. *@li adj_x1: A bool. If True, changes the shape of "x1" from [B, M, K] to [B, K, M].
  178. *@li adj_x2: A bool. If True, changes the shape of "x2" from [B, M, K] to [B, K, M] . \n
  179. *@par Outputs:
  180. *y: The result matrix Tensor. 2D or higher. Must be one of the following types: float16,
  181. * float32, int32. 2D or higher. Has format [ND, NHWC, FRACTAL_NZ]. Has the same shape length as "x1" and "x2" . \n
  182. *@par Third-party framework compatibility
  183. * Compatible with the TensorFlow operator BatchMatmul.
  184. */
  185. REG_OP(BatchMatMul)
  186. .INPUT(x1, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
  187. .INPUT(x2, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
  188. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
  189. .ATTR(adj_x1, Bool, false)
  190. .ATTR(adj_x2, Bool, false)
  191. .OP_END_FACTORY_REG(BatchMatMul)
  192. /**
  193. * @brief Multiplies matrix "a" by matrix "b", producing "a * b" . \n
  194. * @par Inputs:
  195. * Three inputs, including:
  196. * @li x1: A matrix Tensor. Must be one of the following types: float16,
  197. * float32, int32. 2D or higher. Has format [ND, NHWC, FRACTAL_NZ].
  198. * @li x2: A matrix Tensor. Must be one of the following types: float16,
  199. * float32, int32. 2D or higher. Has format [ND, NHWC, FRACTAL_NZ] . \n
  200. * @li bias: A matrix Tensor. Must be one of the following types: float16,
  201. * float32, int32. 2D or higher. Has format [ND, NHWC, FRACTAL_NZ] . \n
  202. * @par Attributes:
  203. * @li adj_x1: A bool. If True, changes the shape of "x1" from [B, M, K] to [B, K, M].
  204. * @li adj_x2: A bool. If True, changes the shape of "x2" from [B, M, K] to [B, K, M] . \n
  205. * @par Outputs:
  206. * y: The result matrix Tensor. 2D or higher. Must be one of the following types: float16,
  207. * float32, int32. 2D or higher. Has format [ND, NHWC, FRACTAL_NZ]. Has the same shape length as "x1" and "x2" . \n
  208. * @par Third-party framework compatibility
  209. * Compatible with the TensorFlow operator BatchMatmul.
  210. */
  211. REG_OP(BatchMatMulV2)
  212. .INPUT(x1, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8, DT_INT4}))
  213. .INPUT(x2, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8, DT_INT4}))
  214. .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
  215. .OPTIONAL_INPUT(offset_w, TensorType({DT_INT8, DT_INT4}))
  216. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
  217. .ATTR(adj_x1, Bool, false)
  218. .ATTR(adj_x2, Bool, false)
  219. .ATTR(offset_x, Int, 0)
  220. .OP_END_FACTORY_REG(BatchMatMulV2)
  221. /**
  222. *@brief Computes half the L2 norm of a tensor without the sqrt . \n
  223. *@par Inputs:
  224. * x: A Tensor.
  225. * TensorType::FloatingDataType() . \n
  226. *@par Outputs:
  227. *y: A Tensor. Has the same type as "x".
  228. *@par Third-party framework compatibility
  229. *Compatible with the TensorFlow operator L2Loss.
  230. */
  231. REG_OP(L2Loss)
  232. .INPUT(x, TensorType::FloatingDataType())
  233. .OUTPUT(y, TensorType::FloatingDataType())
  234. .OP_END_FACTORY_REG(L2Loss)
  235. /**
  236. *@brief: Returns a batched diagonal tensor with a given batched diagonal values . \n
  237. *@par Inputs:
  238. *x: A Tensor. Must be one of the following types:
  239. * float16, float32, double, int32, uint8, int16, int8, complex64, int64,
  240. * qint8, quint8, qint32, uint16, complex128, uint32, uint64 . \n
  241. *@par Outputs:
  242. *y: A Tensor. Has the same type as "x" . \n
  243. *@par Third-party framework compatibility
  244. * Compatible with the TensorFlow operator MatrixDiag.
  245. */
  246. REG_OP(MatrixDiag)
  247. .INPUT(x, TensorType::BasicType())
  248. .OUTPUT(y, TensorType::BasicType())
  249. .OP_END_FACTORY_REG(MatrixDiag)
  250. /**
  251. *@brief: Returns a batched diagonal tensor with a given batched diagonal values . \n
  252. *@par Inputs:
  253. * Two inputs, including:
  254. *@li x: A Tensor. Must be one of the following types: float16, float32, int32, int8, uint8.
  255. *@li assist: A Tensor of the same type as "x" . \n
  256. *@par Outputs:
  257. *y: A Tensor. Has the same type as "x" . \n
  258. *@par Third-party framework compatibility
  259. * Compatible with the TensorFlow operator MatrixDiag.
  260. *
  261. * @par Restrictions:
  262. * Warning: THIS FUNCTION IS DEPRECATED. Please use MatrixDiag instead.
  263. */
  264. REG_OP(MatrixDiagD)
  265. .INPUT(x, TensorType::BasicType())
  266. .INPUT(assist, TensorType::BasicType())
  267. .OUTPUT(y, TensorType::BasicType())
  268. .OP_END_FACTORY_REG(MatrixDiagD)
  269. /**
  270. *@brief: Returns the batched diagonal part of a batched tensor . \n
  271. *@par Inputs:
  272. *x: A Tensor. Must be one of the following types:
  273. * float16, float32, double, int32, uint8, int16, int8, complex64, int64,
  274. * qint8, quint8, qint32, uint16, complex128, uint32, uint64 . \n
  275. *@par Outputs:
  276. *y: A Tensor. Has the same type as "x" . \n
  277. *@par Third-party framework compatibility
  278. * Compatible with the TensorFlow operator MatrixDiagPart.
  279. */
  280. REG_OP(MatrixDiagPart)
  281. .INPUT(x, TensorType::BasicType())
  282. .OUTPUT(y, TensorType::BasicType())
  283. .OP_END_FACTORY_REG(MatrixDiagPart)
  284. /**
  285. *@brief: Returns the batched diagonal part of a batched tensor . \n
  286. *@par Inputs:
  287. * Two inputs, including:
  288. *@li x: A Tensor. Must be one of the following types: float16, float32, int32, int8, uint8.
  289. *@li assist: A Tensor of the same type as "x" . \n
  290. *@par Outputs:
  291. *y: A Tensor. Has the same type as "x" . \n
  292. *@par Third-party framework compatibility
  293. * Compatible with the TensorFlow operator MatrixDiagPart.
  294. *
  295. * @par Restrictions:
  296. * Warning: THIS FUNCTION IS DEPRECATED. Please use MatrixDiagPart instead.
  297. */
  298. REG_OP(MatrixDiagPartD)
  299. .INPUT(x, TensorType::BasicType())
  300. .INPUT(assist, TensorType::BasicType())
  301. .OUTPUT(y, TensorType::BasicType())
  302. .OP_END_FACTORY_REG(MatrixDiagPartD)
  303. /**
  304. *@brief: Returns a batched matrix tensor with new batched diagonal values . \n
  305. *@par Inputs:
  306. * Two inputs, including:
  307. *@li x: A Tensor. Must be one of the following types:
  308. * float16, float32, double, int32, uint8, int16, int8, complex64, int64,
  309. * qint8, quint8, qint32, uint16, complex128, uint32, uint64.
  310. *@li diagonal: A Tensor of the same type as "x" . \n
  311. *@par Outputs:
  312. *y: A Tensor. Has the same type as "x" . \n
  313. *@par Third-party framework compatibility
  314. * Compatible with the TensorFlow operator MatrixSetDiag.
  315. */
  316. REG_OP(MatrixSetDiag)
  317. .INPUT(x, TensorType::BasicType())
  318. .INPUT(diagonal, TensorType::BasicType())
  319. .OUTPUT(y, TensorType::BasicType())
  320. .OP_END_FACTORY_REG(MatrixSetDiag)
  321. /**
  322. *@brief: Returns a batched matrix tensor with new batched diagonal values . \n
  323. *@par Inputs:
  324. * Three inputs, including:
  325. *@li x: A Tensor. Must be one of the following types: float16, float32, int32, int8, uint8.
  326. *@li diagonal: A Tensor of the same type as "x".
  327. *@li assist: A Tensor of the same type as "x" . \n
  328. *@par Outputs:
  329. *y: A Tensor. Has the same type as "x" . \n
  330. *@par Third-party framework compatibility
  331. * Compatible with the TensorFlow operator MatrixSetDiag.
  332. *
  333. * @par Restrictions:
  334. * Warning: THIS FUNCTION IS DEPRECATED. Please use MatrixSetDiag instead.
  335. */
  336. REG_OP(MatrixSetDiagD)
  337. .INPUT(x, TensorType::BasicType())
  338. .INPUT(diagonal, TensorType::BasicType())
  339. .INPUT(assist, TensorType::BasicType())
  340. .OUTPUT(y, TensorType::BasicType())
  341. .OP_END_FACTORY_REG(MatrixSetDiagD)
  342. /**
  343. *@brief Applies sparse "updates" to individual values or slices in a Variable . \n
  344. *@par Inputs:
  345. * Three inputs, including:
  346. *@li var: An ND Tensor.
  347. *Must be one of the following types: float16, float32, int8, uint8, double,
  348. * int64, complex64, qint8, quint8, qint32, uint16, complex128, half, uint32,
  349. * uint64
  350. *@li indices: An ND Tensor.
  351. *Must be one of the following types: int32 or int64
  352. *@li updates: An ND Tensor.
  353. *Must be one of the following types: float16, float32, int8, uint8, double,
  354. * int64, complex64, qint8, quint8, qint32, uint16, complex128, half, uint32,
  355. * uint64
  356. *@par Attributes:
  357. *use_locking: An optional bool. Defaults to "False". If "True",
  358. * the operation will be protected by a lock . \n
  359. *@par Outputs:
  360. *var: A Tensor. Has the same type and format as input "var" . \n
  361. *@par Third-party framework compatibility
  362. * Compatible with the TensorFlow operator ScatterNdUpdate.
  363. */
  364. REG_OP(ScatterNdUpdate)
  365. .INPUT(var, TensorType::BasicType())
  366. .INPUT(indices, TensorType::IndexNumberType())
  367. .INPUT(updates, TensorType::BasicType())
  368. .OUTPUT(var, TensorType::BasicType())
  369. .ATTR(use_locking, Bool, false)
  370. .OP_END_FACTORY_REG(ScatterNdUpdate)
  371. /**
  372. *@brief Applies sparse addition to individual values or slices in a Variable . \n
  373. *@par Inputs:
  374. * Three inputs, including:
  375. *@li x: An ND Tensor. \n
  376. *Must be one of the following types: float16, float32, bool, int8, uint8
  377. *@li indices: An ND Tensor. \n
  378. *Must be one of the following types: int32
  379. *@li updates: An ND Tensor. \n
  380. *Must be one of the following types: float16, float32, bool, int8, uint8
  381. *@par Outputs:
  382. *y: A Tensor. Has the same type and format as input "x" . \n
  383. *@par Third-party framework compatibility
  384. * Compatible with the TensorFlow operator TensorScatterUpdate.
  385. *@par Restrictions:
  386. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  387. */
  388. REG_OP(TensorScatterUpdate)
  389. .INPUT(x, TensorType::BasicType())
  390. .INPUT(indices, TensorType::IndexNumberType())
  391. .INPUT(updates, TensorType::BasicType())
  392. .OUTPUT(y, TensorType::BasicType())
  393. .OP_END_FACTORY_REG(TensorScatterUpdate)
  394. /**
  395. *@brief Uses "updates" to update tensor "data" by "indices". \n
  396. *@par Inputs:
  397. * Three inputs, including:
  398. *@li data: An ND Tensor . \n
  399. *Must be one of the following types: float16, float32, int32, int8, uint8
  400. *@li indices: An ND Tensor of type int32 or int64
  401. *@li updates: An Tensor. Same shape as indices. format:NCHW, NHWC . \n
  402. *Must be one of the following types: float16, float32, int32, int8, uint8
  403. *@par Attributes:
  404. *@li axis: An optional attribute. Defaults to 0.
  405. *@par Outputs:
  406. *y: A Tensor. Has the same type and format as input "data" . \n
  407. *@par Third-party framework compatibility
  408. * Compatible with the ONNX operator ScatterElements.
  409. */
  410. REG_OP(ScatterElements)
  411. .INPUT(data, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  412. .INPUT(indices, TensorType::IndexNumberType())
  413. .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  414. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  415. .ATTR(axis, Int, 0)
  416. .OP_END_FACTORY_REG(ScatterElements)
  417. /**
  418. *@brief Adds sparse "updates" to a variable reference . \n
  419. *@par Inputs:
  420. * Three inputs, including:
  421. *@li var: An ND Tensor .
  422. *Must be one of the following types: float16, float32, int32, int8, uint8
  423. *@li indices: An ND Tensor of type int32 or int64
  424. *@li updates: An Tensor. format:NCHW, NHWC .
  425. *Must be one of the following types: float16, float32, int32, int8, uint8
  426. *@par Attributes:
  427. * use_locking: An optional bool. Defaults to "False". If "True", the operation
  428. * will be protected by a lock . \n
  429. *@par Outputs:
  430. *var: A Tensor. Has the same type and format as input "var" . \n
  431. *@par Third-party framework compatibility
  432. * Compatible with the TensorFlow operator ScatterAdd.
  433. */
  434. REG_OP(ScatterAdd)
  435. .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  436. .INPUT(indices, TensorType::IndexNumberType())
  437. .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  438. .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  439. .ATTR(use_locking, Bool, false)
  440. .OP_END_FACTORY_REG(ScatterAdd)
  441. /**
  442. *@brief Adds sparse "updates" to a variable reference . \n
  443. *@par Inputs:
  444. * Three inputs, including:
  445. *@li var: An ND Tensor .
  446. *Must be one of the following types: float16, float32, int32, int8, uint8
  447. *@li indices: An ND Tensor of type int32 or int64
  448. *@li updates: An ND Tensor .
  449. *Must be one of the following types: float16, float32, int32, int8, uint8
  450. *@par Attributes:
  451. * axis: An required int. The axis along which to index. \n
  452. *@par Outputs:
  453. *var: A Tensor. Has the same type and format as input "var" . \n
  454. *@par Third-party framework compatibility
  455. * Compatible with the pytorch operator ScatterAdd.
  456. */
  457. REG_OP(ScatterAddWithAxis)
  458. .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  459. .INPUT(indices, TensorType::IndexNumberType())
  460. .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  461. .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  462. .REQUIRED_ATTR(axis, Int)
  463. .OP_END_FACTORY_REG(ScatterAddWithAxis)
  464. /**
  465. *@brief Divides a variable reference by sparse updates . \n
  466. *@par Inputs:
  467. * Three inputs, including:
  468. *@li var: An ND Tensor.
  469. *Must be one of the following types: float16, float, int32, int8, uint8
  470. *@li indices: An ND Tensor.
  471. *Must be one of the following types: int32 or int64
  472. *@li updates: An ND Tensor.
  473. *Must be one of the following types: float16, float, int32, int8, uint8
  474. *@par Attributes:
  475. *use_locking: An optional bool. Defaults to "False". If "True",
  476. * the operation will be protected by a lock . \n
  477. *@par Outputs:
  478. *var: A Tensor. Has the same type and format as input "var" . \n
  479. *@par Third-party framework compatibility
  480. * Compatible with the TensorFlow operator ScatterDiv.
  481. */
  482. REG_OP(ScatterDiv)
  483. .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  484. .INPUT(indices, TensorType::IndexNumberType())
  485. .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  486. .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  487. .ATTR(use_locking, Bool, false)
  488. .OP_END_FACTORY_REG(ScatterDiv)
  489. /**
  490. *@brief Applies sparse addition to individual values or slices in a Variable . \n
  491. *@par Inputs:
  492. * Three inputs, including:
  493. *@li var: An ND Tensor.
  494. *Must be one of the following types: float16, float, int32, int8, uint8
  495. *@li indices: An ND Tensor.
  496. *Must be one of the following types: int32 or int64
  497. *@li updates: An ND Tensor.
  498. *Must be one of the following types: float16, float, int32, int8, uint8
  499. *@par Attributes:
  500. *use_locking: An optional bool. Defaults to "False". If "True",
  501. * the operation will be protected by a lock . \n
  502. *@par Outputs:
  503. *var: A Tensor. Has the same type and format as input "var" . \n
  504. *@par Third-party framework compatibility
  505. * Compatible with the TensorFlow operator ScatterNdAdd.
  506. */
  507. REG_OP(ScatterNdAdd)
  508. .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  509. .INPUT(indices, TensorType::IndexNumberType())
  510. .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  511. .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  512. .ATTR(use_locking, Bool, false)
  513. .OP_END_FACTORY_REG(ScatterNdAdd)
  514. /**
  515. *@brief Applies sparse addition to individual values or slices in a Variable . \n
  516. *@par Inputs:
  517. * Three inputs, including:
  518. *@li x: An ND Tensor. \n
  519. *Must be one of the following types: float16, float32, int32, int8, uint8
  520. *@li indices: An ND Tensor. \n
  521. *Must be one of the following types: int32
  522. *@li updates: An ND Tensor. \n
  523. * Must be one of the following types: float16, float32, int32, int8, uint8
  524. *@par Outputs:
  525. *y: A Tensor. Has the same type and format as input "x" . \n
  526. *@par Third-party framework compatibility
  527. * Compatible with the TensorFlow operator TensorScatterAdd.
  528. *@par Restrictions:
  529. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  530. */
  531. REG_OP(TensorScatterAdd)
  532. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  533. .INPUT(indices, TensorType::IndexNumberType())
  534. .INPUT(updates, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  535. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  536. .OP_END_FACTORY_REG(TensorScatterAdd)
  537. /**
  538. *@brief Applies sparse subtraction to individual values or slices in a Variable . \n
  539. *@par Inputs:
  540. * Three inputs, including:
  541. *@li var: An ND Tensor.
  542. *Must be one of the following types: float16, float, int32, int8, uint8
  543. *@li indices: An ND Tensor.
  544. *Must be one of the following types: int32 or int64
  545. *@li updates: An ND Tensor.
  546. *Must be one of the following types: float16, float, int32, int8, uint8
  547. *@par Attributes:
  548. *use_locking: An optional bool. Defaults to "False". If "True",
  549. * the operation will be protected by a lock . \n
  550. *@par Outputs:
  551. * var: A Tensor. Has the same type and format as input "var" . \n
  552. *@par Third-party framework compatibility
  553. * Compatible with the TensorFlow operator ScatterNdSub.
  554. */
  555. REG_OP(ScatterNdSub)
  556. .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  557. .INPUT(indices, TensorType::IndexNumberType())
  558. .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  559. .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  560. .ATTR(use_locking, Bool, false)
  561. .OP_END_FACTORY_REG(ScatterNdSub)
  562. /**
  563. *@brief Applies sparse addition to individual values or slices in a Variable . \n
  564. *@par Inputs:
  565. * Three inputs, including:
  566. *@li x: An ND Tensor. \n
  567. *Must be one of the following types: float16, float32, int32, int8, uint8
  568. *@li indices: An ND Tensor. \n
  569. *Must be one of the following types: int32
  570. *@li updates: An ND Tensor. \n
  571. *Must be one of the following types: float16, float32, int32, int8, uint8
  572. *@par Outputs:
  573. * y: A Tensor. Has the same type and format as input "x" . \n
  574. *@par Third-party framework compatibility
  575. * Compatible with the TensorFlow operator TensorScatterSub.
  576. *@par Restrictions:
  577. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  578. */
  579. REG_OP(TensorScatterSub)
  580. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  581. .INPUT(indices, TensorType::IndexNumberType())
  582. .INPUT(updates, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  583. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  584. .OP_END_FACTORY_REG(TensorScatterSub)
  585. /**
  586. *@brief Subtracts sparse updates to a variable reference . \n
  587. *@par Inputs:
  588. * Three inputs, including:
  589. *@li var: An ND Tensor.
  590. *Must be one of the following types: float16, float, int32, int8, uint8
  591. *@li indices: An ND Tensor.
  592. *Must be one of the following types: int32 or int64
  593. *@li updates: An ND Tensor.
  594. *Must be one of the following types: float16, float, int32, int8, uint8
  595. *@par Attributes:
  596. *use_locking: An optional bool. Defaults to "False". If "True",
  597. * the operation will be protected by a lock . \n
  598. *@par Outputs:
  599. * var: A Tensor. Has the same type and format as input "var" . \n
  600. *@par Third-party framework compatibility
  601. * Compatible with the TensorFlow operator ScatterSub.
  602. */
  603. REG_OP(ScatterSub)
  604. .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  605. .INPUT(indices, TensorType::IndexNumberType())
  606. .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  607. .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  608. .ATTR(use_locking, Bool, false)
  609. .OP_END_FACTORY_REG(ScatterSub)
  610. /**
  611. *@brief: Returns the batched diagonal part of a batched tensor with "assist" . \n
  612. *@par Inputs:
  613. * Two inputs, including:
  614. * @li x: A Tensor of type float16, float32, or int32.
  615. * @li assist: A Tensor of the same type as "x" . \n
  616. *@par Outputs:
  617. *y: A Tensor. Has the same type as "x" . \n
  618. *@par Third-party framework compatibility
  619. * Compatible with the TensorFlow operator DiagPart.
  620. *
  621. * @par Restrictions:
  622. * Warning: THIS FUNCTION IS DEPRECATED. Please use DiagPart instead.
  623. */
  624. REG_OP(DiagPartD)
  625. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
  626. .INPUT(assist, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
  627. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
  628. .OP_END_FACTORY_REG(DiagPartD)
  629. /**
  630. *@brief: Returns the batched diagonal part of a batched tensor . \n
  631. *@par Inputs:
  632. *x: A Tensor. Must be one of the following types:
  633. * float16, float32, int32, int64, double, complex64, complex128 . \n
  634. *@par Outputs:
  635. *y: A Tensor. Has the same type as "x" . \n
  636. *@par Third-party framework compatibility
  637. * Compatible with the TensorFlow operator DiagPart.
  638. */
  639. REG_OP(DiagPart)
  640. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT64, DT_DOUBLE,
  641. DT_COMPLEX64, DT_COMPLEX128}))
  642. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT64, DT_DOUBLE,
  643. DT_COMPLEX64, DT_COMPLEX128}))
  644. .OP_END_FACTORY_REG(DiagPart)
  645. /**
  646. *@brief Also known as a "fully-connected" layer, computes an inner product with a set of learned weights, and (optionally) adds biases . \n
  647. *@par Inputs:
  648. * Four inputs, including:
  649. *@li x: A Tensor of type float16, int8.
  650. *@li w: A weight matrix of type float16, int8.
  651. *@li b: A Tensor of type float16, int32, float32.
  652. *@li offset_w: A Tensor of type int8 . \n
  653. *@par Attributes:
  654. *@li num_output: Reserved.
  655. *@li transpose: A bool, specifying weight whether to transpose input w, either "true" or "false". Defaults to "false".
  656. *@li axis: Optional. A int, 1 or 2, specifying which dimension the input "K" starts from. Defaults to 1.
  657. * The product of the subsequent dimensions starting form first dimension or the second dimension is "K".
  658. *@li offset_x: An optional integer for quantized FullyConnection.
  659. *The negative offset added to the input image for int8 type. Ensure offset_x within the
  660. *effective range of int8 [-128, 127]. Defaults to "0". \n
  661. *@par Outputs:
  662. *y: The result tensor of type float16, int32, float32 . \n
  663. *@par Third-party framework compatibility
  664. * Compatible with the Caffe operator InnerProduct . \n
  665. *@par Quantization supported or not
  666. * Yes
  667. */
  668. REG_OP(FullyConnection)
  669. .INPUT(x, TensorType({DT_FLOAT16, DT_INT8, DT_INT4}))
  670. .INPUT(w, TensorType({DT_FLOAT16, DT_INT8, DT_INT4}))
  671. .OPTIONAL_INPUT(b, TensorType({DT_FLOAT16, DT_INT32,DT_FLOAT32}))
  672. .OPTIONAL_INPUT(offset_w, TensorType({DT_INT8, DT_INT4}))
  673. .OUTPUT(y, TensorType({DT_FLOAT16, DT_INT32,DT_FLOAT32}))
  674. .REQUIRED_ATTR(num_output, Int)
  675. .ATTR(transpose, Bool, false)
  676. .ATTR(axis, Int, 1)
  677. .ATTR(offset_x, Int, 0)
  678. .OP_END_FACTORY_REG(FullyConnection)
  679. /**
  680. *@brief Also known as a "fully-connected-compress" layer, computes an inner
  681. product with a set of learned weights, and (optionally) adds biases . \n
  682. *@par Inputs:
  683. * Five inputs, including:
  684. *@li x: A Tensor of type uint8, int8.
  685. *@li w: A weight matrix of type int8.
  686. *@li compress_index: A compress index matrix of type int8.
  687. *@li b: A Tensor of type int32.
  688. *@li offset_w: A Tensor of type int8.
  689. *@par Attributes:
  690. *@li num_output: A int, specifying the number of outputs.
  691. *@li transpose: A bool, specifying whether to transpose input w, either "true"
  692. or "false". Defaults to "false".
  693. *@li axis: Optional. A int, 1 or 2, specifying which dimension the input "K"
  694. starts from. Defaults to "1".
  695. * The product of the subsequent dimensions starting form first dimension or the
  696. second dimension is "K".
  697. *@li offset_x: An optional integer for quantized FullyConnectionCompress.
  698. *The negative offset added to the input image for int8 type. Ensure offset_x
  699. within the effective range of int8 [-128, 127]. Defaults to "0". \n
  700. *@par Outputs:
  701. *y: The result tensor of type int32. \n
  702. *@par Third-party framework compatibility
  703. * Compatible with the Caffe operator InnerProduct. \n
  704. *@par Quantization supported or not
  705. * Yes
  706. */
  707. REG_OP(FullyConnectionCompress)
  708. .INPUT(x, TensorType({DT_UINT8, DT_INT8}))
  709. .INPUT(w, TensorType({DT_INT8}))
  710. .INPUT(comress_index, TensorType({DT_INT8}))
  711. .OPTIONAL_INPUT(b, TensorType({DT_INT32}))
  712. .OPTIONAL_INPUT(offset_w, TensorType({DT_INT8}))
  713. .OUTPUT(y, TensorType({DT_INT32}))
  714. .REQUIRED_ATTR(num_output, Int)
  715. .ATTR(transpose, Bool, false)
  716. .ATTR(axis, Int, 1)
  717. .ATTR(offset_x, Int, 0)
  718. .OP_END_FACTORY_REG(FullyConnectionCompress)
  719. /**
  720. *@brief Computes the confusion matrix from predictions and labels . \n
  721. *@par Inputs:
  722. * Three inputs, including:
  723. *@li labels: A Tensor. Must be one of the following types: float16, float32,
  724. * int32, int8, uint8.
  725. *@li predictions: A Tensor. Must be one of the following types: float16,
  726. * float32, int32, int8, uint8.
  727. *@li weights: A Tensor. Must be one of the following types: float16, float32,
  728. * int32, int8, uint8 . \n
  729. *@par Attributes:
  730. *@li num_classes: An integer for the shape of the output matrix.
  731. * No default value.
  732. *@li dtype: Data type of the confusion matrix. No default value . \n
  733. *@par Outputs:
  734. *y: A Tensor. Has the same type and format as input "labels"
  735. *@attention Constraints:
  736. *@li "weights", "labels", and "predictions" are 1D tensors.
  737. *@li The output is with shape (num_classes, num_classes),
  738. * where, 1 <= num_classes <= 4096 . \n
  739. *@see Region()
  740. *@par Third-party framework compatibility
  741. * Compatible with the TensorFlow operator ConfusionMatrix.
  742. */
  743. REG_OP(ConfusionMatrix)
  744. .INPUT(labels, TensorType({DT_FLOAT, DT_INT32, DT_FLOAT16, DT_INT8, DT_UINT8}))
  745. .INPUT(predictions, TensorType({DT_FLOAT, DT_INT32, DT_FLOAT16, DT_INT8, DT_UINT8}))
  746. .OPTIONAL_INPUT(weights, TensorType({DT_FLOAT, DT_INT32, DT_FLOAT16, DT_INT8, DT_UINT8}))
  747. .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_FLOAT16, DT_INT8, DT_UINT8}))
  748. .REQUIRED_ATTR(num_classes, Int)
  749. .REQUIRED_ATTR(dtype, String)
  750. .OP_END_FACTORY_REG(ConfusionMatrix)
  751. /**
  752. *@brief Multiplies sparse updates into a variable reference . \n
  753. *@par Inputs:
  754. * Three inputs, including:
  755. *@li var: An ND Tensor.
  756. *Must be one of the following types: float16, float, int32, int8, uint8
  757. *@li indices: An ND Tensor.
  758. *Must be one of the following types: int32 or int64
  759. *@li updates: An ND Tensor . \n
  760. *Must be one of the following types: float16, float, int32, int8, uint8
  761. *@par Attributes:
  762. *use_locking: An optional bool. Defaults to "False". If "True", the operation
  763. * will be protected by a lock . \n
  764. *@par Outputs:
  765. *var: A Tensor. Has the same type and format as input "var" . \n
  766. *@par Third-party framework compatibility
  767. * Compatible with the TensorFlow operator ScatterMul.
  768. */
  769. REG_OP(ScatterMul)
  770. .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  771. .INPUT(indices, TensorType::IndexNumberType())
  772. .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  773. .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  774. .ATTR(use_locking, Bool, false)
  775. .OP_END_FACTORY_REG(ScatterMul)
  776. /**
  777. *@brief Reduces sparse updates into a variable reference using
  778. * the "min" operation . \n
  779. *@par Inputs:
  780. * Three inputs, including:
  781. *@li var: An ND Tensor.
  782. *Must be one of the following types: float16, float, int32, int8, uint8
  783. *@li indices: An ND Tensor.
  784. *Must be one of the following types: int32 or int64
  785. *@li updates: An ND Tensor.
  786. *Must be one of the following types: float16, float, int32, int8, uint8
  787. *@par Attributes:
  788. *use_locking: An optional bool. Defaults to "False". If "True", the operation
  789. * will be protected by a lock . \n
  790. *@par Outputs:
  791. *var: A Tensor. Has the same type and format as input "var" . \n
  792. *@par Third-party framework compatibility
  793. * Compatible with the TensorFlow operator ScatterMin.
  794. */
  795. REG_OP(ScatterMin)
  796. .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  797. .INPUT(indices, TensorType::IndexNumberType())
  798. .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  799. .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  800. .ATTR(use_locking, Bool, false)
  801. .OP_END_FACTORY_REG(ScatterMin)
  802. /**
  803. *@brief Reduces sparse updates into a variable reference using the "max" operation . \n
  804. *@par Inputs:
  805. * Three inputs, including:
  806. *@li var: An ND Tensor .
  807. *Must be one of the following types: float16, float, int32, int8, uint8
  808. *@li indices: An NCHW, NHWC, or ND Tensor . \n
  809. *Must be one of the following types: int32 or int64
  810. *@li updates: An NCHW, NHWC, or ND Tensor .
  811. *Must be one of the following types: float16, float, int32, int8, uint8
  812. *@par Attributes:
  813. *use_locking: An optional bool. Defaults to "False".
  814. * If "True", the operation will be protected by a lock . \n
  815. *@par Outputs:
  816. *var: A Tensor. Has the same type and format as input "var" . \n
  817. *@par Third-party framework compatibility
  818. * Compatible with the TensorFlow operator ScatterMax.
  819. */
  820. REG_OP(ScatterMax)
  821. .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  822. .INPUT(indices, TensorType::IndexNumberType())
  823. .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  824. .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  825. .ATTR(use_locking, Bool, false)
  826. .OP_END_FACTORY_REG(ScatterMax)
  827. /**
  828. *@brief Applies sparse updates to a variable reference . \n
  829. *@par Inputs:
  830. * Three inputs, including:
  831. *@li var: An ND Tensor .
  832. *Must be one of the following types: float16, float, int32, int8, uint8
  833. *@li indices: An ND Tensor . \n
  834. *Must be one of the following types: int32 or int64
  835. *@li updates: An ND Tensor .
  836. *Must be one of the following types: float16, float, int32, int8, uint8
  837. *@par Attributes:
  838. *use_locking: An optional bool. Defaults to "False". If "True",
  839. * the operation will be protected by a lock . \n
  840. *@par Outputs:
  841. *var: A Tensor. Has the same type and format as input "var" . \n
  842. *@par Third-party framework compatibility
  843. * Compatible with the TensorFlow operator ScatterUpdate.
  844. */
  845. REG_OP(ScatterUpdate)
  846. .INPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  847. .INPUT(indices, TensorType::IndexNumberType())
  848. .INPUT(updates, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  849. .OUTPUT(var, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  850. .ATTR(use_locking, Bool, false)
  851. .OP_END_FACTORY_REG(ScatterUpdate)
  852. /**
  853. *@brief Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched `input` . \n
  854. *@par Inputs:
  855. * Three inputs, including:
  856. *@li input: Rank `r` tensor where `r >= 2`. \n
  857. *@li k: \n
  858. *Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main \n
  859. *diagonal, and negative value means subdiagonals. `k` can be a single integer \n
  860. *(for a single diagonal) or a pair of integers specifying the low and high ends \n
  861. *of a matrix band. `k[0]` must not be larger than `k[1]`. \n
  862. *@li padding_value: The value to fill the area outside the specified diagonal band with. \n
  863. *@par Outputs:
  864. *diagonal: The extracted diagonal(s) . \n
  865. *@par Third-party framework compatibility
  866. * Compatible with the TensorFlow operator ScatterUpdate.
  867. */
  868. REG_OP(MatrixDiagPartV2)
  869. .INPUT(input, TensorType::BasicType())
  870. .INPUT(k, TensorType({DT_INT32}))
  871. .INPUT(padding_value, TensorType::BasicType())
  872. .OUTPUT(diagonal, TensorType::BasicType())
  873. .OP_END_FACTORY_REG(MatrixDiagPartV2)
  874. /**
  875. *@brief Returns a batched matrix tensor with new batched diagonal values . \n
  876. *@par Inputs:
  877. * Three inputs, including:
  878. *@li input: "Rank `r+1`, where `r >= 1`. \n
  879. *@li diagonal: Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`. \n
  880. *@li k:
  881. *Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main \n
  882. *diagonal, and negative value means subdiagonals. `k` can be a single integer \n
  883. *(for a single diagonal) or a pair of integers specifying the low and high ends \n
  884. *of a matrix band. `k[0]` must not be larger than `k[1]`. \n
  885. *@par Outputs:
  886. *output: Rank `r+1`, with `output.shape = input.shape` . \n
  887. *@par Third-party framework compatibility
  888. * Compatible with the TensorFlow operator ScatterUpdate.
  889. */
  890. REG_OP(MatrixSetDiagV2)
  891. .INPUT(input, TensorType::BasicType())
  892. .INPUT(diagonal, TensorType::BasicType())
  893. .INPUT(k, TensorType({DT_INT32}))
  894. .OUTPUT(output, TensorType::BasicType())
  895. .OP_END_FACTORY_REG(MatrixSetDiagV2)
  896. /**
  897. *@brief Returns a batched matrix tensor with new batched diagonal values . \n
  898. *@par Inputs:
  899. * Three inputs, including:
  900. *@li input: "Rank `r+1`, where `r >= 1`. \n
  901. *@li diagonal: Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`. \n
  902. *@li k:
  903. *Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main \n
  904. *diagonal, and negative value means subdiagonals. `k` can be a single integer \n
  905. *(for a single diagonal) or a pair of integers specifying the low and high ends \n
  906. *of a matrix band. `k[0]` must not be larger than `k[1]`. \n
  907. *@par Attributes:
  908. *@li align: An optional string. Defaults to RIGHT_LEFT. It is a string specifying \n
  909. *how superdiagonals and subdiagonals should be aligned, respectively. \n
  910. *other optional: LEFT_RIGHT, LEFT_LEFT, and RIGHT_RIGHT.\n
  911. *@par Outputs:
  912. *output: Rank `r+1`, with `output.shape = input.shape` . \n
  913. *@par Third-party framework compatibility
  914. * Compatible with the TensorFlow operator ScatterUpdate.
  915. */
  916. REG_OP(MatrixSetDiagV3)
  917. .INPUT(input, TensorType::BasicType())
  918. .INPUT(diagonal, TensorType::BasicType())
  919. .INPUT(k, TensorType({DT_INT32}))
  920. .OUTPUT(output, TensorType::BasicType())
  921. .ATTR(align, String, "RIGHT_LEFT")
  922. .OP_END_FACTORY_REG(MatrixSetDiagV3)
  923. /**
  924. *@brief Returns a batched diagonal tensor with given batched diagonal values . \n
  925. *@par Inputs:
  926. * Five inputs, including:
  927. *@li diagonal: Rank `r`, where `r >= 1` \n
  928. *@li k:
  929. *Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main \n
  930. *diagonal, and negative value means subdiagonals. `k` can be a single integer \n
  931. *(for a single diagonal) or a pair of integers specifying the low and high ends \n
  932. *of a matrix band. `k[0]` must not be larger than `k[1]`. \n
  933. *@li num_rows:
  934. *The number of rows of the output matrix. If it is not provided, the op assumes \n
  935. *the output matrix is a square matrix and infers the matrix size from k and the \n
  936. *innermost dimension of `diagonal`. \n
  937. *@li num_cols: An NCHW, NHWC, or ND Tensor.
  938. *The number of columns of the output matrix. If it is not provided, the op \n
  939. *assumes the output matrix is a square matrix and infers the matrix size from \n
  940. *k and the innermost dimension of `diagonal`. \n
  941. *@li padding_value: The number to fill the area outside the specified diagonal band with. \n
  942. *@par Outputs:
  943. *output: Has rank `r+1` when `k` is an integer or `k[0] == k[1]`, rank `r` otherwise . \n
  944. *@par Third-party framework compatibility
  945. * Compatible with the TensorFlow operator ScatterUpdate.
  946. */
  947. REG_OP(MatrixDiagV2)
  948. .INPUT(diagonal, TensorType::BasicType())
  949. .INPUT(k, TensorType({DT_INT32}))
  950. .INPUT(num_rows, TensorType({DT_INT32}))
  951. .INPUT(num_cols, TensorType({DT_INT32}))
  952. .INPUT(padding_value, TensorType::BasicType())
  953. .OUTPUT(output, TensorType::BasicType())
  954. .OP_END_FACTORY_REG(MatrixDiagV2)
  955. /**
  956. * @brief Add updates to var_out according to axis and indices.
  957. * @par Inputs:
  958. * Three inputs, including:
  959. * @li var: A Tensor. Must be one of the following types:
  960. * float16, float32, int32, int8, uint8.
  961. * @li indices: A Tensor of the indices, type should be int32.
  962. * @li updates: A Tensor of the same type as "var".
  963. * @par Attributes:
  964. * @li axis: An required int to specify the axis to perform indices add.
  965. * @par Outputs:
  966. * @li var_out: A Tensor. Same as input "var".
  967. * @par Third-party framework compatibility
  968. * Compatible with the Pytorch operator index_add.
  969. * @par Restrictions:
  970. * Warning:THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  971. */
  972. REG_OP(IndexAdd)
  973. .INPUT(var, TensorType({DT_INT32, DT_INT8, DT_UINT8, DT_FLOAT32, DT_FLOAT16}))
  974. .INPUT(indices, TensorType({DT_INT32}))
  975. .INPUT(updates, TensorType({DT_INT32, DT_INT8, DT_UINT8, DT_FLOAT32, DT_FLOAT16}))
  976. .OUTPUT(var_out, TensorType({DT_INT32, DT_INT8, DT_UINT8, DT_FLOAT32, DT_FLOAT16}))
  977. .ATTR(axis, Int, 0)
  978. .OP_END_FACTORY_REG(IndexAdd)
  979. /**
  980. * @brief According to the index number of indexes, replace the value
  981. *corresponding to X1 with the value in x2.
  982. * @par Inputs:
  983. * Three inputs, including:
  984. * @li x1: A Tensor. Must be one of the following types:
  985. *float16, float32, double, int32, uint8, int16, int8, complex64, int64,
  986. *qint8, quint8, qint32, uint16, complex128, uint32, uint64. \n
  987. * @li x2: A Tensor of the same type as "x1".
  988. * @li indices: A Tensor of the indices,
  989. * @par Attributes:
  990. * @li accumulate: Does it support self accumulation.Defaults to 0.
  991. * @par Outputs:
  992. * @li y: A Tensor. Same as input "x1".
  993. * @par Third-party framework compatibility
  994. * Compatible with the Pytorch operator index_put.
  995. * @par Restrictions:
  996. * Warning:THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  997. */
  998. REG_OP(IndexPut)
  999. .INPUT(x1, TensorType::BasicType())
  1000. .INPUT(x2, TensorType::BasicType())
  1001. .OUTPUT(y, TensorType::BasicType())
  1002. .REQUIRED_ATTR(indices, ListInt)
  1003. .ATTR(accumulate, Int, 0)
  1004. .OP_END_FACTORY_REG(IndexPut)
  1005. /**
  1006. *@brief: Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices input \n
  1007. *@par Inputs:
  1008. *x: A Tensor. Must be one of the following types:
  1009. *float16, float32, double, int32, uint8, int16, int8, complex64, int64,
  1010. *qint8, quint8, qint32, uint16, complex128, uint32, uint64. \n
  1011. *@par Attributes:
  1012. *diagonal: An optional attribute indicates the diagonal to consider. \n
  1013. *@par Outputs:
  1014. *y: A Tensor. Has the same type as "x" . \n
  1015. *@par Third-party framework compatibility
  1016. * Compatible with the Pytorch operator Triu.
  1017. */
  1018. REG_OP(Triu)
  1019. .INPUT(x, TensorType::BasicType())
  1020. .ATTR(diagonal, Int, 0)
  1021. .OUTPUT(y, TensorType::BasicType())
  1022. .OP_END_FACTORY_REG(Triu)
  1023. /**
  1024. *@brief: Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices input \n
  1025. *@par Inputs:
  1026. *x: A Tensor. Must be one of the following types:
  1027. *float16, float32, double, int32, uint8, int16, int8, complex64, int64,
  1028. *qint8, quint8, qint32, uint16, complex128, uint32, uint64. \n
  1029. *@par Attributes:
  1030. *diagonal: An optional attribute indicates the diagonal to consider. \n
  1031. *@par Outputs:
  1032. *y: A Tensor. Has the same type as "x" . \n
  1033. *@par Third-party framework compatibility
  1034. * Compatible with the Pytorch operator Tril.
  1035. */
  1036. REG_OP(Tril)
  1037. .INPUT(x, TensorType::BasicType())
  1038. .ATTR(diagonal, Int, 0)
  1039. .OUTPUT(y, TensorType::BasicType())
  1040. .OP_END_FACTORY_REG(Tril)
  1041. /**
  1042. *@brief Concatenates a list of N tensors along the first dimension.
  1043. *@par Inputs:
  1044. * Two inputs, including:
  1045. * @li values: A list of Tensors. Must be one of the following types: int32, float16, float32.
  1046. * Tensors to be concatenated. All must have size 1 in the first dimension and same shape.
  1047. * It's a dynamic input.
  1048. * @li shape: A Tensor of the same type as "x".
  1049. * The final shape of the result. Should be equal to the shapes of any input
  1050. * but with the number of input values in the first dimension . \n
  1051. *@par Attributes:
  1052. *equation: The subscripts for the Einstein summation. \n
  1053. *N: tensor size of input \n
  1054. *@par Outputs:
  1055. *@li y: Sums the product of the elements of the input operands along dimensions specified
  1056. using a notation based on the Einstein summation convention. \n
  1057. *@attention Constraints:
  1058. *Input N must be Int. \n
  1059. *@par Third-party framework compatibility
  1060. *Compatible with Pytorch einsum operator.
  1061. */
  1062. REG_OP(Einsum)
  1063. .DYNAMIC_INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  1064. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  1065. .REQUIRED_ATTR(equation, String)
  1066. .REQUIRED_ATTR(N, Int)
  1067. .OP_END_FACTORY_REG(Einsum)
  1068. /**
  1069. *@brief Returns a 2-D tensor with ones on the diagonal and zeros elsewhere. \n
  1070. *@par Inputs:
  1071. *No inputs
  1072. *@par Attributes:
  1073. *@li num_rows: An required int. \n
  1074. *@li num_columns: An optional int.Defaults to 0. \n
  1075. *@li batch_shape: An optional ListInt.Defaults to []. \n
  1076. *@li dtype: An optional int.Defaults to 0. \n
  1077. *@par Outputs:
  1078. *y: A Tensor with targeted type and shape. \n
  1079. *@par Third-party framework compatibility
  1080. *Compatible with the Pytorch operator Eye. \n
  1081. */
  1082. REG_OP(Eye)
  1083. .OUTPUT(y, TensorType::BasicType()) /* "Result, has targeted element type" */
  1084. .REQUIRED_ATTR(num_rows, Int)
  1085. .ATTR(num_columns, Int, 0)
  1086. .ATTR(batch_shape, ListInt, {})
  1087. .ATTR(dtype, Int, 0)
  1088. .OP_END_FACTORY_REG(Eye)
  1089. /**
  1090. *@brief: Fill diagonal of at least 2 dimension tensors with value . \n
  1091. *@par Inputs:
  1092. *x: A Tensor. Must be one of the following types:
  1093. * float32, int32, int64 . \n
  1094. *@par Outputs:
  1095. *y: A Tensor. Has the same type as "x" . \n
  1096. *@par Attributes:
  1097. *fill_value:The value to fill in
  1098. *wrap: An optional bool. Defaults to "False". If "True", Use recursive fill. \n
  1099. *@par Third-party framework compatibility
  1100. * Compatible with the Pytorch operator FillDiagonal.
  1101. */
  1102. REG_OP(FillDiagonal)
  1103. .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_INT64}))
  1104. .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_INT64}))
  1105. .REQUIRED_ATTR(fill_value, Float)
  1106. .ATTR(wrap, Bool, false)
  1107. .OP_END_FACTORY_REG(FillDiagonal)
  1108. } // namespace ge
  1109. #endif // OPS_BUILT_IN_OP_PROTO_INC_MATRIX_CALCULATION_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示