You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

sparse_ops.h 44 kB

5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file sparse_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_SPARSE_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_SPARSE_OPS_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. *@brief Applies softmax to a batched ND SparseTensor . \n
  26. *@par Inputs:
  27. *The input must be a batched ND SparseTensor.
  28. * @li indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  29. * @li values: A vector Tensor of type float or double. 1D. The values of the SparseTensor.
  30. * @li shape: A vector Tensor of type int64. 1D. The shape of the SparseTensor . \n
  31. *@par Outputs:
  32. *y: A vector Tensor. 1D. Has the same type as "values" . \n
  33. *@par Third-party framework compatibility
  34. *Compatible with the TensorFlow operator SparseSoftmax.
  35. */
  36. REG_OP(SparseSoftmax)
  37. .INPUT(indices, TensorType({DT_INT64}))
  38. .INPUT(values, TensorType({DT_FLOAT, DT_DOUBLE}))
  39. .INPUT(shape, TensorType({DT_INT64}))
  40. .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE}))
  41. .OP_END_FACTORY_REG(SparseSoftmax)
  42. /**
  43. *@brief Adds up a SparseTensor and a dense Tensor, producing a dense Tensor . \n
  44. *@par Inputs:
  45. *Inputs "x1_*" must be SparseTensors and "x2" must be a dense Tensor.
  46. * @li x1_indices: A matrix Tensor of type int32 or int64. 2D. The indices of the SparseTensor.
  47. * @li x1_values: The values of the SparseTensor. A vector Tensor. 1D.
  48. * @li x1_shape: A vector Tensor of type int32 or int64. 1D. The shape of the SparseTensor.
  49. * @li x2: A matrix Tensor. Has the same type and same shape as the SparseTensors . \n
  50. *@par Outputs:
  51. *y: A matrix Tensor. Has the same type and same shape as "x2" . \n
  52. *@par Third-party framework compatibility
  53. * Compatible with the TensorFlow operator SparseTensorDenseAdd.
  54. */
  55. REG_OP(SparseTensorDenseAdd)
  56. .INPUT(x1_indices, TensorType({DT_INT32, DT_INT64}))
  57. .INPUT(x1_values, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, \
  58. DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  59. .INPUT(x1_shape, TensorType({DT_INT32, DT_INT64}))
  60. .INPUT(x2, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, \
  61. DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  62. .OUTPUT(y, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, \
  63. DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  64. .OP_END_FACTORY_REG(SparseTensorDenseAdd)
  65. /**
  66. *@brief Reorders a SparseTensor into the canonical, row-major ordering . \n
  67. *@par Inputs:
  68. * @li indices: A matrix Tensor of type int32 or int64. 2D. The indices of the SparseTensor.
  69. * @li values: Values of the SparseTensor. A vector Tensor. 1D.
  70. * @li shape: A vector Tensor of type int32 or int64. 1D. The shape of the SparseTensor . \n
  71. *@par Outputs:
  72. *@li y_indices: The indices of the SparseTensor. Has the same type as "indices".
  73. *@li y_values: The values of the SparseTensorr. Has the same type as "values" . \n
  74. *@par Third-party framework compatibility
  75. * Compatible with the TensorFlow operator SparseReorder.
  76. */
  77. REG_OP(SparseReorder)
  78. .INPUT(indices, TensorType({DT_INT64}))
  79. .INPUT(values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  80. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  81. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  82. .INPUT(shape, TensorType({DT_INT64}))
  83. .OUTPUT(y_indices, TensorType({DT_INT64}))
  84. .OUTPUT(y_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  85. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  86. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  87. .OP_END_FACTORY_REG(SparseReorder)
  88. /**
  89. *@brief Reshapes a SparseTensor to represent values in a new dense shape . \n
  90. *@par Inputs:
  91. * @li indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  92. * @li shape: A vector Tensor of type int64. 1D. The shape of the SparseTensor.
  93. * @li new_shape: A 1D Tensor of type int64. The requested new dense shape . \n
  94. *@par Outputs:
  95. *@li y_indices: A Tensor of type int64. The indices of the new dense shape.
  96. *@li y_shape: A Tensor of type int64. The shape of the new dense shape . \n
  97. *@par Third-party framework compatibility
  98. * Compatible with the TensorFlow operator SparseReshape.
  99. */
  100. REG_OP(SparseReshape)
  101. .INPUT(indices, TensorType({DT_INT64}))
  102. .INPUT(shape, TensorType({DT_INT64}))
  103. .INPUT(new_shape, TensorType({DT_INT64}))
  104. .OUTPUT(y_indices, TensorType({DT_INT64}))
  105. .OUTPUT(y_shape, TensorType({DT_INT64}))
  106. .OP_END_FACTORY_REG(SparseReshape)
  107. /**
  108. *@brief Adds up a SparseTensor and a dense Tensor.
  109. *@par Inputs:
  110. *(1) Broadcasts the dense side to have the same shape as the sparse side, if eligible;
  111. *(2) Then, only the dense values pointed to by the indices of the SparseTensor participate in the cwise addition.
  112. * @li x1_indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  113. * @li x1_values: The values of the SparseTensor. A vector Tensor. 1D.
  114. * @li x1_shape: A 1D Tensor of type int64. The requested new dense shape.
  115. * @li x2: A dense Tensor of the same type as "x1_values" . \n
  116. *@par Outputs:
  117. *y: A Tensor. Has the same type as "x1_values" . \n
  118. *@par Third-party framework compatibility
  119. * Compatible with the TensorFlow operator SparseDenseCwiseAdd.
  120. */
  121. REG_OP(SparseDenseCwiseAdd)
  122. .INPUT(x1_indices, TensorType({DT_INT64}))
  123. .INPUT(x1_values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  124. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, \
  125. DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  126. .INPUT(x1_shape, TensorType({DT_INT64}))
  127. .INPUT(x2, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  128. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  129. DT_COMPLEX64, DT_COMPLEX128}))
  130. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  131. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  132. DT_COMPLEX64, DT_COMPLEX128}))
  133. .OP_END_FACTORY_REG(SparseDenseCwiseAdd)
  134. /**
  135. *@brief Divides a SparseTensor by a dense Tensor . \n
  136. *@par Inputs:
  137. * @li x1_indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  138. * @li x1_values: The values of the SparseTensor. A vector Tensor. 1D.
  139. * @li x1_shape: A 1D Tensor of type int64. The requested new dense shape.
  140. * @li x2: A dense Tensor of the same type as "x1_values" . \n
  141. *@par Outputs:
  142. *y: A Tensor. Has the same type as "x1_values" . \n
  143. *@par Third-party framework compatibility
  144. * Compatible with the TensorFlow operator SparseDenseCwiseDiv.
  145. */
  146. REG_OP(SparseDenseCwiseDiv)
  147. .INPUT(x1_indices, TensorType({DT_INT64}))
  148. .INPUT(x1_values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  149. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, \
  150. DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  151. .INPUT(x1_shape, TensorType({DT_INT64}))
  152. .INPUT(x2, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  153. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  154. DT_COMPLEX64, DT_COMPLEX128}))
  155. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  156. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  157. DT_COMPLEX64, DT_COMPLEX128}))
  158. .OP_END_FACTORY_REG(SparseDenseCwiseDiv)
  159. /**
  160. *@brief Multiplies a SparseTensor by a dense Tensor . \n
  161. *@par Inputs:
  162. * @li x1_indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  163. * @li x1_values: The values of the SparseTensor. A vector Tensor. 1D.
  164. * @li x1_shape: A 1D Tensor of type int64. The requested new dense shape.
  165. * @li x2: A dense Tensor of the same type as "x1_values" . \n
  166. *@par Outputs:
  167. *y: A Tensor. Has the same type as "x1_values" . \n
  168. *@par Third-party framework compatibility
  169. * Compatible with the TensorFlow operator SparseDenseCwiseMul.
  170. */
  171. REG_OP(SparseDenseCwiseMul)
  172. .INPUT(x1_indices, TensorType({DT_INT64}))
  173. .INPUT(x1_values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  174. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, \
  175. DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  176. .INPUT(x1_shape, TensorType({DT_INT64}))
  177. .INPUT(x2, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  178. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  179. DT_COMPLEX64, DT_COMPLEX128}))
  180. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  181. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  182. DT_COMPLEX64, DT_COMPLEX128}))
  183. .OP_END_FACTORY_REG(SparseDenseCwiseMul)
  184. /**
  185. *@brief Adds a SparseTensor to a SparseTensorsMap . \n
  186. *@par Inputs:
  187. * The input tensor must be a SparseTensor.
  188. * @li x1_indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  189. * @li x1_values: The values of the SparseTensor. A vector Tensor. 1D.
  190. * @li x1_shape: A 1D Tensor of type int64. The requested new dense shape . \n
  191. *@par Attributes:
  192. *@li container: An optional string. Defaults to " ".
  193. *@li shared_name: An optional string. Defaults to " " . \n
  194. *@par Outputs:
  195. *handle: A Tensor of type int64 . \n
  196. *@par Third-party framework compatibility
  197. * Compatible with the TensorFlow operator AddSparseToTensorsMap.
  198. */
  199. REG_OP(AddSparseToTensorsMap)
  200. .INPUT(indices, TensorType({DT_INT64}))
  201. .INPUT(values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  202. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  203. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  204. .INPUT(shape, TensorType({DT_INT64}))
  205. .OUTPUT(handle, TensorType({DT_INT64}))
  206. .ATTR(container, String, "")
  207. .ATTR(shared_name, String, "")
  208. .OP_END_FACTORY_REG(AddSparseToTensorsMap)
  209. /**
  210. *@brief The gradient operator for the SparseSlice op . \n
  211. *@par Inputs:
  212. * @li backprop_val_grad: A Tensor.
  213. * @li indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  214. * @li start: A 1D Tensor of type int64. The start of the slice.
  215. * @li new_indices: A matrix Tensor of type int64. 2D. The indices of the sliced SparseTensor . \n
  216. *@par Outputs:
  217. *y_grad: A Tensor of type int64 . \n
  218. *@par Third-party framework compatibility
  219. * Compatible with the TensorFlow operator SparseSliceGrad.
  220. */
  221. REG_OP(SparseSliceGrad)
  222. .INPUT(backprop_val_grad, TensorType({ DT_INT8, DT_UINT8, DT_INT16,
  223. DT_UINT16, DT_INT32, DT_INT64, DT_FLOAT, DT_FLOAT16, DT_DOUBLE,
  224. DT_COMPLEX64, DT_COMPLEX128}))
  225. .INPUT(indices, TensorType({DT_INT64}))
  226. .INPUT(start, TensorType({DT_INT64}))
  227. .INPUT(new_indices, TensorType({DT_INT64}))
  228. .OUTPUT(y_grad, TensorType({ DT_INT8, DT_UINT8, DT_INT16,
  229. DT_UINT16, DT_INT32, DT_INT64, DT_FLOAT, DT_FLOAT16, DT_DOUBLE,
  230. DT_COMPLEX64, DT_COMPLEX128 }))
  231. .OP_END_FACTORY_REG(SparseSliceGrad)
  232. /**
  233. *@brief Slices a SparseTensor based on the "start" and "size" . \n
  234. *@par Inputs:
  235. * @li indices: A 2D Tensor of type int64. The indices of the SparseTensor.
  236. * @li values: A 1D Tensor. The values of the SparseTensor.
  237. * @li shape: A 2D Tensor of type int64. The shape of the SparseTensor.
  238. * @li start: A 1D Tensor of type int64. The start of the slice.
  239. * @li size: A 1D Tensor of type int64. The size of the slice . \n
  240. *@par Outputs:
  241. *y_indices: A Tensor of type int64.
  242. *y_values: A Tensor. Has the same type as "values".
  243. *y_values: A Tensor of type int64 . \n
  244. *@par Third-party framework compatibility
  245. * Compatible with the TensorFlow operator SparseSlice.
  246. */
  247. REG_OP(SparseSlice)
  248. .INPUT(indices, TensorType({DT_INT64}))
  249. .INPUT(values, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, \
  250. DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, \
  251. DT_COMPLEX128, DT_BOOL, DT_STRING, DT_RESOURCE}))
  252. .INPUT(shape, TensorType({DT_INT64}))
  253. .INPUT(start, TensorType({DT_INT64}))
  254. .INPUT(size, TensorType({DT_INT64}))
  255. .OUTPUT(y_indices, TensorType({DT_INT64}))
  256. .OUTPUT(y_values, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, \
  257. DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, \
  258. DT_COMPLEX128, DT_BOOL, DT_STRING, DT_RESOURCE}))
  259. .OUTPUT(y_shape, TensorType({DT_INT64}))
  260. .OP_END_FACTORY_REG(SparseSlice)
  261. /**
  262. *@brief The gradient operator for the SparseAdd op . \n
  263. *@par Inputs:
  264. * @li backprop_val_grad: A 1D Tensor with shape [nnz(sum)]. The gradient with respect to the non-empty values of the sum.
  265. * @li x1_indices: A 2D Tensor of type int64. The indices of the SparseTensor A, with size [nnz(A), ndims].
  266. * @li x2_indices: A 2D Tensor of type int64. The indices of the SparseTensor B, with size [nnz(B), ndims].
  267. * @li sum_indices: A 2D Tensor of type int64. The indices of the sum SparseTensor, with size [nnz(sum), ndims] . \n
  268. *@par Outputs:
  269. *x1_val_grad: A Tensor. Has the same type as "backprop_val_grad".
  270. *x2_val_grad: A Tensor. Has the same type as "backprop_val_grad" . \n
  271. *@par Third-party framework compatibility
  272. * Compatible with the TensorFlow operator SparseAddGrad.
  273. */
  274. REG_OP(SparseAddGrad)
  275. .INPUT(backprop_val_grad, TensorType({DT_INT8, DT_INT16, DT_INT32,
  276. DT_INT64, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  277. .INPUT(x1_indices, TensorType({DT_INT64}))
  278. .INPUT(x2_indices, TensorType({DT_INT64}))
  279. .INPUT(sum_indices, TensorType({DT_INT64}))
  280. .OUTPUT(x1_val_grad, TensorType({DT_INT8, DT_INT16, DT_INT32,
  281. DT_INT64, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  282. .OUTPUT(x2_val_grad, TensorType({DT_INT8, DT_INT16, DT_INT32,
  283. DT_INT64, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  284. .OP_END_FACTORY_REG(SparseAddGrad)
  285. /**
  286. *@brief The gradient of SparseFillEmptyRows . \n
  287. *@par Inputs:
  288. * @li reverse_index_map: A 1D Tensor of type int64. The reverse index map from SparseFillEmptyRows.
  289. * @li grad_values: A 1D Tensor. The gradients from backprop . \n
  290. *@par Outputs:
  291. *@li y_value: A Tensor. Has the same type as "grad_values".
  292. *@li y_default_value: A Tensor. Has the same type as "grad_values" . \n
  293. *@par Third-party framework compatibility
  294. * Compatible with the TensorFlow operator SparseFillEmptyRowsGrad.
  295. */
  296. REG_OP(SparseFillEmptyRowsGrad)
  297. .INPUT(reverse_index_map, TensorType({DT_INT64}))
  298. .INPUT(grad_values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  299. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  300. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  301. .OUTPUT(y_value, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  302. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  303. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  304. .OUTPUT(y_default_value, TensorType({DT_INT8, DT_UINT8, DT_INT16, \
  305. DT_UINT16, DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  306. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  307. .OP_END_FACTORY_REG(SparseFillEmptyRowsGrad)
  308. /**
  309. *@brief Multiplies SparseTensor A (of rank 2) by dense matrix B . \n
  310. *@par Inputs:
  311. * @li x1_indices: A 2D Tensor of type int32 or int64.
  312. * @li The indices of the matrix "SparseTensor", with size [nnz, 2].
  313. * @li x1_values: A 1D Tensor. The values of the SparseTensor, with size [nnz].
  314. * @li x1_shape: A 1D Tensor of type int64. The shape of the SparseTensor, with size [2].
  315. * @li x2: A dense matrix Tensor of the same type as "x1_values". 2D . \n
  316. *@par Outputs:
  317. *y: A "Tensor". Has the same type as "x1_values" . \n
  318. *@par Attributes:
  319. *@li adjoint_a: An optional bool. Defaults to "False".Use the adjoint of A in the matrix multiply.
  320. *@li If A is complex, this is transpose(conj(A)). Otherwise it is transpose(A).
  321. *@li adjoint_b: An optional bool. Defaults to "False".Use the adjoint of B in the matrix multiply.
  322. *@li If B is complex, this is transpose(conj(B)). Otherwise it is transpose(B) . \n
  323. *@par Third-party framework compatibility
  324. * Compatible with the TensorFlow operator SparseTensorDenseMatMul.
  325. */
  326. REG_OP(SparseTensorDenseMatMul)
  327. .INPUT(x1_indices, TensorType({DT_INT32, DT_INT64}))
  328. .INPUT(x1_values, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, \
  329. DT_COMPLEXT64, DT_COMPLEX128, DT_FLOAT16, DT_INT64}))
  330. .INPUT(x1_shape, TensorType({DT_INT64}))
  331. .INPUT(x2, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_COMPLEXT64, \
  332. DT_COMPLEX128, DT_FLOAT16}))
  333. .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_COMPLEXT64, \
  334. DT_COMPLEX128, DT_FLOAT16}))
  335. .ATTR(adjoint_a, Bool, false)
  336. .ATTR(adjoint_b, Bool, false)
  337. .OP_END_FACTORY_REG(SparseTensorDenseMatMul)
  338. /**
  339. *@brief Converts a sparse representation into a dense tensor . \n
  340. *@par Inputs:
  341. * @li indices: A 0D, 1D, or 2D Tensor of type int32 or int64.
  342. * @li output_shape: A 1D Tensor of the same type as "sparse_indices". The shape of the dense output tensor.
  343. * @li values: A 1D Tensor. Values corresponding to each row of "sparse_indices",
  344. * @li or a scalar value to be used for all sparse indices.
  345. * @li default_value: A Tensor of the same type as "sparse_values" . \n
  346. *@par Outputs:
  347. *y: A Tensor. Has the same type as "values" . \n
  348. *@par Third-party framework compatibility
  349. * Compatible with the TensorFlow operator SparseToDense.
  350. */
  351. REG_OP(SparseToDense)
  352. .INPUT(indices, TensorType({DT_INT32, DT_INT64}))
  353. .INPUT(output_shape, TensorType({DT_INT32, DT_INT64}))
  354. .INPUT(values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  355. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_BOOL, DT_DOUBLE}))
  356. .INPUT(default_value, TensorType({DT_INT8, DT_UINT8, DT_INT16, \
  357. DT_UINT16, DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_BOOL, \
  358. DT_DOUBLE}))
  359. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  360. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_BOOL, DT_DOUBLE}))
  361. .ATTR(validate_indices, Bool, true)
  362. .OP_END_FACTORY_REG(SparseToDense)
  363. /**
  364. *@brief Concatenates a list of `SparseTensor` along the specified dimension.
  365. *Concatenation is with respect to the dense versions of these sparse tensors . \n
  366. *@par Inputs:
  367. *3 or 5 inputs,contains:
  368. * @li indices:A list of at least 2 `Tensor` objects with type `int64`.2-D.
  369. *Indices of each input `SparseTensor`.It's a dynamic input.
  370. * @li values:A list with the same length as `indices` of `Tensor` objects with the same type.
  371. It's a dynamic input.
  372. * @li shapes:A list with the same length as `indices` of `Tensor` objects with type `int64`.1-D.
  373. * Shapes of each `SparseTensor`. It's a dynamic input. \n
  374. *@par Attributes:
  375. *@li concat_dim: An `int` Dimension to concatenate along
  376. *@li N:Number of sparse
  377. *@par Outputs:
  378. * @li y_indices:A `Tensor` of type `int64`.
  379. * @li y_values:A `Tensor`. Has the same type as `values`.
  380. * @li y_shape:A `Tensor` of type `int64` . \n
  381. *@par Third-party framework compatibility
  382. * Compatible SparseConcat operator in Tensorflow
  383. */
  384. REG_OP(SparseConcat)
  385. .DYNAMIC_INPUT(indices, TensorType({DT_INT64}))
  386. .DYNAMIC_INPUT(values,
  387. TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  388. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  389. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  390. .DYNAMIC_INPUT(shapes, TensorType({DT_INT64}))
  391. .OUTPUT(y_indices, TensorType({DT_INT64}))
  392. .OUTPUT(y_values,
  393. TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  394. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  395. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  396. .OUTPUT(y_shape, TensorType({DT_INT64}))
  397. .ATTR(concat_dim, Int, 0)
  398. .ATTR(N, Int, 1)
  399. .OP_END_FACTORY_REG(SparseConcat)
  400. /**
  401. *@brief Adds two `SparseTensor` objects to produce another `SparseTensor` . \n
  402. *@par Inputs:
  403. *7 inputs, contains:
  404. * @li x1_indices:A `Tensor` of type `int64`.2-D.
  405. * The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.
  406. * @li x1_values:A `Tensor`. Must be one of the following types:float,int8,int16,int32,int64, float64.
  407. * @li x1_shape:A `Tensor` of type `int64`.1-D. The `shape` of the first `SparseTensor`,
  408. * size `[ndims]` Vector.
  409. * @li x2_indices:A `Tensor` of type `int64`.2-D.The `indices` of the second `SparseTensor`,
  410. * size `[nnz, ndims]` Matrix.
  411. * @li x2_values:A `Tensor`. Must have the same type as `a_values`.1-D.
  412. * The `values` of the second `SparseTensor`, size `[nnz]` Vector.
  413. * @li x2_shape:A `Tensor` of type `int64`.1-D.
  414. * The `shape` of the second `SparseTensor`, size `[ndims]` Vector.
  415. * @li thresh:A `Tensor` 0-D.The magnitude threshold that determines if an output value/index pair takes space . \n
  416. *@par Outputs:
  417. * @li sum_indices:A `Tensor` of type `int64`.
  418. * @li sum_values:A `Tensor`. Has the same type as `x1_values`.
  419. * @li sum_shape:A `Tensor` of type `int64` . \n
  420. *@par Third-party framework compatibility
  421. * Compatible SparseAdd operator in Tensorflow
  422. */
  423. REG_OP(SparseAdd)
  424. .INPUT(x1_indices, TensorType({DT_INT64}))
  425. .INPUT(x1_values, TensorType({DT_FLOAT, DT_INT8, DT_INT16, \
  426. DT_INT32, DT_INT64, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  427. .INPUT(x1_shape, TensorType({DT_INT64}))
  428. .INPUT(x2_indices, TensorType({DT_INT64}))
  429. .INPUT(x2_values, TensorType({DT_FLOAT, DT_INT8, DT_INT16, DT_INT32, \
  430. DT_INT64, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  431. .INPUT(x2_shape, TensorType({DT_INT64}))
  432. .INPUT(thresh, TensorType({DT_FLOAT, DT_INT8, DT_INT16, DT_INT32, \
  433. DT_INT64, DT_DOUBLE}))
  434. .OUTPUT(sum_indices, TensorType({DT_INT64}))
  435. .OUTPUT(sum_values, TensorType({DT_FLOAT, DT_INT8, DT_INT16, \
  436. DT_INT32, DT_INT64, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  437. .OUTPUT(sum_shape, TensorType({DT_INT64}))
  438. .OP_END_FACTORY_REG(SparseAdd)
  439. /**
  440. *@brief Fills empty rows in the input 2-D `SparseTensor` with a default value . \n
  441. *@par Inputs:
  442. *4 inputs,contains:
  443. * @li indices: A `Tensor` of type `int64`.2-D. the indices of the sparse tensor.
  444. * @li values: A `Tensor`. 1-D. the values of the sparse tensor.
  445. * @li dense_shape: A `Tensor` of type `int64`.1-D. the shape of the sparse tensor.
  446. * @li default_value: `Tensor`. Must have the same type as `values`.
  447. *0-D. default value to insert into location `[row, 0, ..., 0]`
  448. *for rows missing from the input sparse tensor . \n
  449. *@par Outputs:
  450. * @li y_indices:A `Tensor` of type `int64`.
  451. * @li y_values:A `Tensor`. Has the same type as `values`.
  452. * @li empty_row_indicator:A `Tensor` of type `bool`.
  453. * @li reverse_index_map:A `Tensor` of type `int64` . \n
  454. *@par Third-party framework compatibility
  455. * Compatible SparseFillEmptyRows operator in Tensorflow
  456. */
  457. REG_OP(SparseFillEmptyRows)
  458. .INPUT(indices, TensorType({DT_INT64}))
  459. .INPUT(values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  460. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  461. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  462. .INPUT(dense_shape, TensorType({DT_INT64}))
  463. .INPUT(default_value, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, \
  464. DT_INT16, DT_UINT16, DT_UINT8, \
  465. DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  466. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  467. .OUTPUT(y_indices, TensorType({DT_INT64}))
  468. .OUTPUT(y_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, \
  469. DT_INT16, DT_UINT16, DT_UINT8, \
  470. DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  471. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  472. .OUTPUT(empty_row_indicator, TensorType({DT_BOOL}))
  473. .OUTPUT(reverse_index_map, TensorType({DT_INT64}))
  474. .OP_END_FACTORY_REG(SparseFillEmptyRows)
  475. /**
  476. *@brief Returns the element-wise max of two SparseTensors . \n
  477. *@par Inputs:
  478. *6 inputs,contains:
  479. * @li x1_indices:A `Tensor` of type `int64`.2-D.
  480. *`N x R` matrix with the indices of non-empty values in a SparseTensor,
  481. * in the canonical lexicographic ordering.
  482. * @li x1_values:A `Tensor`. 1-D. the values of the sparse tensor.
  483. * @li x1_shape:A `Tensor` of type `int64`.1-D. the shape of the sparse tensor.
  484. * @li x2_indices:A `Tensor` of type `int64`.2-D. the indices of the sparse tensor.
  485. * @li x2_values:A `Tensor`. 1-D. Must have the same type as `x1_values`.
  486. * @li x2_shape:A `Tensor` of type `int64`.1-D.
  487. *counterpart to `a_shape` for the other operand; the two shapes must be equal . \n
  488. *@par Outputs:
  489. * @li y_indices:A `Tensor` of type `int64`.
  490. * @li y_values:A `Tensor`. Has the same type as `x1_values` . \n
  491. *@par Third-party framework compatibility
  492. * Compatible SparseSparseMaximum operator in Tensorflow
  493. */
  494. REG_OP(SparseSparseMaximum)
  495. .INPUT(x1_indices, TensorType({DT_INT64}))
  496. .INPUT(x1_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  497. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  498. .INPUT(x1_shape, TensorType({DT_INT64}))
  499. .INPUT(x2_indices, TensorType({DT_INT64}))
  500. .INPUT(x2_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  501. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  502. .INPUT(x2_shape, TensorType({DT_INT64}))
  503. .OUTPUT(y_indices, TensorType({DT_INT64}))
  504. .OUTPUT(y_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  505. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  506. .OP_END_FACTORY_REG(SparseSparseMaximum)
  507. /**
  508. *@brief Returns the element-wise min of two SparseTensors . \n
  509. *@par Inputs:
  510. *6 inputs,contains:
  511. * @li x1_indices:A `Tensor` of type `int64`.2-D.
  512. *`N x R` matrix with the indices of non-empty values in a SparseTensor,
  513. * in the canonical lexicographic ordering.
  514. * @li x1_values:A `Tensor`. 1-D. the values of the sparse tensor.
  515. * @li x1_shape:A `Tensor` of type `int64`.1-D. the shape of the sparse tensor.
  516. * @li x2_indices:A `Tensor` of type `int64`.2-D. the indices of the sparse tensor.
  517. * @li x2_values:A `Tensor`. 1-D. Must have the same type as `x1_values`.
  518. * @li x2_shape:A `Tensor` of type `int64`.1-D.
  519. *counterpart to `a_shape` for the other operand; the two shapes must be equal . \n
  520. *@par Outputs:
  521. * @li y_indices:A `Tensor` of type `int64`.
  522. * @li y_values:A `Tensor`. Has the same type as `x1_values` . \n
  523. *@par Third-party framework compatibility
  524. * Compatible SparseSparseMinimum operator in Tensorflow
  525. */
  526. REG_OP(SparseSparseMinimum)
  527. .INPUT(x1_indices, TensorType({DT_INT64}))
  528. .INPUT(x1_values, TensorType({DT_INT64, DT_INT32, \
  529. DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, DT_FLOAT16, \
  530. DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  531. .INPUT(x1_shape, TensorType({DT_INT64}))
  532. .INPUT(x2_indices, TensorType({DT_INT64}))
  533. .INPUT(x2_values, TensorType({DT_INT64, DT_INT32, \
  534. DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, DT_FLOAT16, \
  535. DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  536. .INPUT(x2_shape, TensorType({DT_INT64}))
  537. .OUTPUT(y_indices, TensorType({DT_INT64}))
  538. .OUTPUT(y_values, TensorType({DT_INT64, DT_INT32, \
  539. DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, DT_FLOAT16, \
  540. DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  541. .OP_END_FACTORY_REG(SparseSparseMinimum)
  542. /**
  543. *@brief Computes the max of elements across dimensions of a SparseTensor . \n
  544. *@par Inputs:
  545. *4 or 5 inputs,contains:
  546. * @li x_indices:A `Tensor` of type `int64`.2-D.
  547. *`N x R` matrix with the indices of non-empty values in a
  548. *SparseTensor, possibly not in canonical ordering.
  549. * @li x_values:A `Tensor`. 1-D. the values of the sparse tensor.
  550. *`N` non-empty values corresponding to `input_indices`.
  551. * @li x_shape:A `Tensor` of type `int64`.1-D. Shape of the input SparseTensor.
  552. * @li reduction_axes:A `Tensor` of type `int32`.1-D.
  553. *Length-`K` vector containing the reduction axes . \n
  554. *@par Attributes:
  555. * keep_dims:An optional `bool`. Defaults to `False`.
  556. *If true, retain reduced dimensions with length 1 . \n
  557. *@par Outputs:
  558. * y:A `Tensor`. Has the same type as `input_values` . \n
  559. *@par Third-party framework compatibility
  560. * Compatible SparseReduceMax operator in Tensorflow
  561. */
  562. REG_OP(SparseReduceMax)
  563. .INPUT(x_indices, TensorType({DT_INT64}))
  564. .INPUT(x_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  565. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  566. .INPUT(x_shape, TensorType({DT_INT64}))
  567. .INPUT(reduction_axes, TensorType({DT_INT32}))
  568. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16,
  569. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  570. .ATTR(keep_dims, Bool, false)
  571. .OP_END_FACTORY_REG(SparseReduceMax)
  572. /**
  573. *@brief Computes the max of elements across dimensions of a SparseTensor . \n
  574. *@par Inputs:
  575. *4 or 5 inputs,contains:
  576. * @li x_indices:A `Tensor` of type `int64`.2-D.
  577. *`N x R` matrix with the indices of non-empty values in a
  578. *SparseTensor, possibly not in canonical ordering.
  579. * @li x_values:A `Tensor`. 1-D. the values of the sparse tensor.
  580. *`N` non-empty values corresponding to `input_indices`.
  581. * @li x_shape:A `Tensor` of type `int64`.1-D. Shape of the input SparseTensor.
  582. * @li reduction_axes:A `Tensor` of type `int32`.1-D.
  583. *Length-`K` vector containing the reduction axes . \n
  584. *@par Attributes:
  585. * keep_dims:An optional `bool`. Defaults to `False`.
  586. *If true, retain reduced dimensions with length 1 . \n
  587. *@par Outputs:
  588. * @li y_indices:A `Tensor` of type `int64`.
  589. * @li y_values:A `Tensor`. Has the same type as `input_values`.
  590. * @li y_shape:A `Tensor` of type `int64` . \n
  591. *@par Third-party framework compatibility
  592. * Compatible SparseReduceMaxSparse operator in Tensorflow
  593. */
  594. REG_OP(SparseReduceMaxSparse)
  595. .INPUT(x_indices, TensorType({DT_INT64}))
  596. .INPUT(x_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  597. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  598. .INPUT(x_shape, TensorType({DT_INT64}))
  599. .INPUT(reduction_axes, TensorType({DT_INT32}))
  600. .OUTPUT(y_indices, TensorType({DT_INT64}))
  601. .OUTPUT(y_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  602. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  603. .OUTPUT(y_shape, TensorType({DT_INT64}))
  604. .ATTR(keep_dims, Bool, false)
  605. .OP_END_FACTORY_REG(SparseReduceMaxSparse)
  606. /**
  607. *@brief Computes the sum of elements across dimensions of a SparseTensor . \n
  608. *@par Inputs:
  609. *4 or 5 inputs, including:
  610. * @li x_indices: A 2D Tensor of type int64.
  611. *"N x R" matrix with the indices of non-empty values in a
  612. *SparseTensor, possibly not in canonical ordering.
  613. * @li x_values: A 1D Tensor. The values of the SparseTensor.
  614. *"N" non-empty values corresponding to "input_indices".
  615. * @li x_shape: A 1D Tensor of type int64. Shape of the input SparseTensor.
  616. * @li reduction_axes: A 1D Tensor of type int32.
  617. *A length-"K" vector containing the reduction axes . \n
  618. *@par Attributes:
  619. * keep_dims: An optional bool. Defaults to "False".
  620. *If true, retains reduced dimensions with length 1 . \n
  621. *@par Outputs:
  622. * @li y_indices: A Tensor of type int64.
  623. * @li y_values: A Tensor. Has the same type as "input_values".
  624. * @li y_shape: A Tensor of type int64 . \n
  625. *@par Third-party framework compatibility
  626. * Compatible with the TensorFlow operator SparseReduceSum.
  627. */
  628. REG_OP(SparseReduceSum)
  629. .INPUT(x_indices, TensorType({DT_INT64}))
  630. .INPUT(x_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  631. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE, \
  632. DT_COMPLEX64, DT_COMPLEX128}))
  633. .INPUT(x_shape, TensorType({DT_INT64}))
  634. .INPUT(reduction_axes, TensorType({DT_INT32}))
  635. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16,
  636. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE, \
  637. DT_COMPLEX64, DT_COMPLEX128}))
  638. .ATTR(keep_dims, Bool, false)
  639. .OP_END_FACTORY_REG(SparseReduceSum)
  640. /**
  641. *@brief Computes the sum of elements across dimensions of a SparseTensor . \n
  642. *@par Inputs:
  643. *4 or 5 inputs, including:
  644. * @li x_indices: A 2D Tensor of type int64.
  645. *"N x R" matrix with the indices of non-empty values in a
  646. *SparseTensor, possibly not in canonical ordering.
  647. * @li x_values: A 1D Tensor. The values of the SparseTensor.
  648. *"N" non-empty values corresponding to "input_indices".
  649. * @li x_shape: A 1D Tensor of type int64. Shape of the input SparseTensor.
  650. * @li reduction_axes: A 1D Tensor of type int32.
  651. * A length-"K" vector containing the reduction axes . \n
  652. *@par Attributes:
  653. * keep_dims: An optional bool. Defaults to "False".
  654. *If true, retains reduced dimensions with length 1 . \n
  655. *@par Outputs:
  656. * @li y_indices: A Tensor of type int64.
  657. * @li y_values: A Tensor. Has the same type as "input_values".
  658. * @li y_shape: A Tensor of type int64 . \n
  659. *@par Third-party framework compatibility
  660. * Compatible with the TensorFlow operator SparseReduceSumSparse.
  661. */
  662. REG_OP(SparseReduceSumSparse)
  663. .INPUT(x_indices, TensorType({DT_INT64}))
  664. .INPUT(x_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  665. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE, \
  666. DT_COMPLEX64, DT_COMPLEX128}))
  667. .INPUT(x_shape, TensorType({DT_INT64}))
  668. .INPUT(reduction_axes, TensorType({DT_INT32}))
  669. .OUTPUT(y_indices, TensorType({DT_INT64}))
  670. .OUTPUT(y_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  671. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE, \
  672. DT_COMPLEX64, DT_COMPLEX128}))
  673. .OUTPUT(y_shape, TensorType({DT_INT64}))
  674. .ATTR(keep_dims, Bool, false)
  675. .OP_END_FACTORY_REG(SparseReduceSumSparse)
  676. /**
  677. *@brief Splits a SparseTensor into "num_split" tensors along one dimension . \n
  678. *@par Inputs:
  679. *4 or 5 inputs, including:
  680. * @li split_dim: A 0D Tensor of type int64.
  681. *The dimension along which to split. Must be in the range "[0, rank(shape))".
  682. * @li indices: A 2D Tensor of type int64.
  683. * The indices of the SparseTensor.
  684. * @li values: A 1D Tensor. The values of the SparseTensor.
  685. * @li shape: A 1D Tensor of type int64. Shape of the SparseTensor . \n
  686. *@par Attributes:
  687. * num_split: An int that is >= 1. The number of ways to split . \n
  688. *@par Outputs:
  689. * @li y_indices: A list of "num_split" Tensor objects of type int64.
  690. * @li y_values: A list of "num_split" Tensor objects with the same type as "values".
  691. * @li y_shape: A list of "num_split" Tensor objects of type int64 . \n
  692. *@par Third-party framework compatibility
  693. * Compatible with the TensorFlow operator SparseSplit.
  694. */
  695. REG_OP(SparseSplit)
  696. .INPUT(split_dim, TensorType({DT_INT64}))
  697. .INPUT(indices, TensorType({DT_INT64}))
  698. .INPUT(values, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, \
  699. DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, \
  700. DT_COMPLEX128, DT_BOOL, DT_STRING, DT_RESOURCE}))
  701. .INPUT(shape, TensorType({DT_INT64}))
  702. .DYNAMIC_OUTPUT(y_indices, TensorType({DT_INT64}))
  703. .DYNAMIC_OUTPUT(y_values, TensorType({DT_INT64, DT_INT32, DT_UINT16, \
  704. DT_INT16, DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  705. DT_COMPLEX64, DT_COMPLEX128, DT_BOOL, DT_STRING, DT_RESOURCE}))
  706. .DYNAMIC_OUTPUT(y_shape, TensorType({DT_INT64}))
  707. .ATTR(num_split, Int, 1)
  708. .OP_END_FACTORY_REG(SparseSplit)
  709. /**
  710. *@brief Generates sparse cross from a list of sparse and dense tensors . \n
  711. *@par Inputs:
  712. *8 or 10 inputs, including:
  713. * @li indices: A list of 2D Tensor objects of type int64.
  714. * Indices of each input SparseTensor.It's a dynamic input.
  715. * @li values: A list of 1D Tensor objects of type int64 or string.
  716. * Values of each SparseTensor.It's a dynamic input.
  717. * @li shapes: A list with the same length as "indices" of 1D Tensor objects of type int64.
  718. * Shapes of each SparseTensor.It's a dynamic input.
  719. * @li dense_inputs: A list of 2D Tensor objects of type int64 or string.
  720. * Columns represented by dense Tensor .It's a dynamic input. \n
  721. *@par Attributes:
  722. * @li N: number of sparse.
  723. * @li hashed_output: A bool. If true, returns the hash of the cross instead of the string.
  724. * @li num_buckets: An int that is >= 0. It is used if "hashed_output" is true.
  725. *output = hashed_value%num_buckets if num_buckets > 0 else "hashed_value".
  726. * @li hash_key: An int. Specify the hash_key that will be used by the "FingerprintCat64"
  727. *function to combine the crosses fingerprints.
  728. * @li out_type: An int64 or string.
  729. * @li internal_type: An int64 or string . \n
  730. *@par Outputs:
  731. * @li output_indices: A Tensor of type int64.
  732. * @li output_values: A Tensor of type "out_type".
  733. * @li output_shape: A Tensor of type int64 . \n
  734. *@par Third-party framework compatibility
  735. * Compatible with the TensorFlow operator SparseCross.
  736. */
  737. REG_OP(SparseCross)
  738. .DYNAMIC_INPUT(indices, TensorType({DT_INT64}))
  739. .DYNAMIC_INPUT(values, TensorType({DT_INT64, DT_STRING}))
  740. .DYNAMIC_INPUT(shapes, TensorType({DT_INT64}))
  741. .DYNAMIC_INPUT(dense_inputs, TensorType({DT_INT64, DT_STRING}))
  742. .OUTPUT(output_indices, TensorType({DT_INT64}))
  743. .OUTPUT(output_values, TensorType({DT_INT64, DT_STRING}))
  744. .OUTPUT(output_shape, TensorType({DT_INT64}))
  745. .ATTR(N, Int, 0)
  746. .REQUIRED_ATTR(hashed_output, Bool)
  747. .ATTR(num_buckets, Int, 0)
  748. .REQUIRED_ATTR(hash_key, Int)
  749. .REQUIRED_ATTR(out_type, Type)
  750. .REQUIRED_ATTR(internal_type, Type)
  751. .OP_END_FACTORY_REG(SparseCross)
  752. /**
  753. *@brief Generates sparse cross from a list of sparse and dense tensors . \n
  754. *@par Inputs:
  755. *3 or 5 inputs, including:
  756. * @li indices: A 2D Tensor of type int64.
  757. * The "indices" of the minibatch SparseTensor.
  758. * @li values: A 1D Tensor. The "values" of the minibatch SparseTensor.
  759. * @li shape: A 1D Tensor of type int64. The "shape" of the minibatch SparseTensor . \n
  760. *@par Attributes:
  761. * @li container: An optional string. Defaults to "".
  762. *The container name for the "SparseTensorsMap" created by this op.
  763. * @li shared_name: An optional string. Defaults to "".
  764. *The shared name for the "SparseTensorsMap" created by this op . \n
  765. *@par Outputs:
  766. * handles: A Tensor of type int64 . \n
  767. *@par Third-party framework compatibility
  768. * Compatible with the TensorFlow operator AddManySparseToTensorsMap.
  769. */
  770. REG_OP(AddManySparseToTensorsMap)
  771. .INPUT(indices, TensorType({DT_INT64}))
  772. .INPUT(values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  773. DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  774. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  775. .INPUT(shape, TensorType({DT_INT64}))
  776. .OUTPUT(handles, TensorType({DT_INT64}))
  777. .ATTR(container, String, "")
  778. .ATTR(shared_name, String, "")
  779. .OP_END_FACTORY_REG(AddManySparseToTensorsMap)
  780. /**
  781. *@brief Reads SparseTensors from a "SparseTensorsMap" and concatenate them . \n
  782. *@par Inputs:
  783. *2 or 4 inputs, including:
  784. * handles: A 1D Tensor of type int64.
  785. * The "N" serialized SparseTensor objects . \n
  786. *@par Attributes:
  787. * @li dtype: A tf.DType. The "dtype" of the SparseTensor objects stored in the "SparseTensorsMap".
  788. * @li container: An optional string. Defaults to "".
  789. *The container name for the "SparseTensorsMap" read by this op.
  790. * @li shared_name: An optional string. Defaults to "".
  791. *The shared name for the "SparseTensorsMap" read by this op . \n
  792. *@par Outputs:
  793. * @li indices: A Tensor of type int64.
  794. * @li values: A Tensor of type "dtype".
  795. * @li shape: A Tensor of type int64 . \n
  796. *@par Third-party framework compatibility
  797. * Compatible with the TensorFlow operator TakeManySparseFromTensorsMap.
  798. */
  799. REG_OP(TakeManySparseFromTensorsMap)
  800. .INPUT(handles, TensorType({DT_INT64}))
  801. .OUTPUT(indices, TensorType({DT_INT64}))
  802. .OUTPUT(values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, \
  803. DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16}))
  804. .OUTPUT(shape, TensorType({DT_INT64}))
  805. .REQUIRED_ATTR(dtype, Type)
  806. .ATTR(container, String, "")
  807. .ATTR(shared_name, String, "")
  808. .OP_END_FACTORY_REG(TakeManySparseFromTensorsMap)
  809. /**
  810. *@brief Serializes a SparseTensor into a [3] Tensor object . \n
  811. *@par Inputs:
  812. *3 or 4 inputs, including:
  813. * @li indices: A 2D Tensor of type int64. The indices of the SparseTensor.
  814. * @li values: A 1D Tensor. The values of the SparseTensor.
  815. * @li shape: A 1D Tensor of type int64. The shape of the SparseTensor . \n
  816. *@par Attributes:
  817. * out_type: An optional type. Defaults to "string" . \n
  818. *@par Outputs:
  819. * serialized_sparse: A Tensor of type "out_type" . \n
  820. *@par Third-party framework compatibility
  821. * Compatible with the TensorFlow operator SerializeSparse.
  822. */
  823. REG_OP(SerializeSparse)
  824. .INPUT(indices, TensorType({DT_INT64}))
  825. .INPUT(values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, \
  826. DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, \
  827. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  828. .INPUT(shape, TensorType({DT_INT64}))
  829. .OUTPUT(serialized_sparse, TensorType({DT_STRING}))
  830. .ATTR(out_type, Type, DT_STRING)
  831. .OP_END_FACTORY_REG(SerializeSparse)
  832. /**
  833. *@brief Serializes an "N"-minibatch SparseTensor into an [N, 3] Tensor object . \n
  834. *@par Inputs:
  835. *3 or 4 inputs, including:
  836. * @li indices: A 2D Tensor of type int64. The "indices" of the minibatch SparseTensor.
  837. * @li values: A 1D Tensor. The "values" of the minibatch SparseTensor.
  838. * @li shape: A 1D Tensor of type int64. The "shape" of the minibatch SparseTensor . \n
  839. *@par Attributes:
  840. * out_type: An optional type. Defaults to "string" . \n
  841. *@par Outputs:
  842. * serialized_sparse: A Tensor of type "out_type" . \n
  843. *@par Third-party framework compatibility
  844. * Compatible with the TensorFlow operator SerializeManySparse.
  845. */
  846. REG_OP(SerializeManySparse)
  847. .INPUT(indices, TensorType({DT_INT64}))
  848. .INPUT(values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, \
  849. DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, \
  850. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  851. .INPUT(shape, TensorType({DT_INT64}))
  852. .OUTPUT(serialized_sparse, TensorType({DT_STRING}))
  853. .ATTR(out_type, Type, DT_STRING)
  854. .OP_END_FACTORY_REG(SerializeManySparse)
  855. /**
  856. *@brief Deserializes SparseTensor objects . \n
  857. *@par Inputs:
  858. *Two inputs, including:
  859. * serialized_sparse: A Tensor. The serialized SparseTensor objects.
  860. *The last dimension must have 3 columns . \n
  861. *@par Attributes:
  862. * dtype: An optional type. The type of the serialized SparseTensor objects . \n
  863. *@par Outputs:
  864. * @li indices: A Tensor of type int64.
  865. * @li values: A Tensor of type "dtype".
  866. * @li shape: A Tensor of type int64 . \n
  867. *@par Third-party framework compatibility
  868. * Compatible with the TensorFlow operator DeserializeSparse.
  869. */
  870. REG_OP(DeserializeSparse)
  871. .INPUT(serialized_sparse, TensorType({DT_STRING}))
  872. .OUTPUT(indices, TensorType({DT_INT64}))
  873. .OUTPUT(values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, \
  874. DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, \
  875. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  876. .OUTPUT(shape, TensorType({DT_INT64}))
  877. .REQUIRED_ATTR(dtype, Type)
  878. .OP_END_FACTORY_REG(DeserializeSparse)
  879. /**
  880. *@brief Deserializes and concatenates SparseTensors from a serialized minibatch . \n
  881. *@par Inputs:
  882. *Two inputs, including:
  883. * serialized_sparse: A 2D Tensor of type string.
  884. *The "N" serialized SparseTensor objects. Must have 3 columns . \n
  885. *@par Attributes:
  886. * dtype: An optional type. The type of the serialized SparseTensor objects . \n
  887. *@par Outputs:
  888. * @li indices: A Tensor of type int64.
  889. * @li values: A Tensor of type "dtype".
  890. * @li shape: A Tensor of type int64 . \n
  891. *@par Third-party framework compatibility
  892. * Compatible with the TensorFlow operator DeserializeManySparse.
  893. */
  894. REG_OP(DeserializeManySparse)
  895. .INPUT(serialized_sparse, TensorType({DT_STRING}))
  896. .OUTPUT(indices, TensorType({DT_INT64}))
  897. .OUTPUT(values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, \
  898. DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, \
  899. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  900. .OUTPUT(shape, TensorType({DT_INT64}))
  901. .REQUIRED_ATTR(dtype, Type)
  902. .OP_END_FACTORY_REG(DeserializeManySparse)
  903. } // namespace ge
  904. #endif // OPS_BUILT_IN_OP_PROTO_INC_SPARSE_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示