You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

sparse_ops.h 45 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
5 years ago
3 years ago
5 years ago
3 years ago
3 years ago
5 years ago
3 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file sparse_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_SPARSE_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_SPARSE_OPS_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. *@brief Applies softmax to a batched ND SparseTensor . \n
  26. *@par Inputs:
  27. *The input must be a batched ND SparseTensor.
  28. * @li indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  29. * @li values: A vector Tensor of type float or double. 1D. The values of the SparseTensor.
  30. * @li shape: A vector Tensor of type int64. 1D. The shape of the SparseTensor . \n
  31. *@par Outputs:
  32. *y: A vector Tensor. 1D. Has the same type as "values" . \n
  33. *@par Third-party framework compatibility
  34. *Compatible with the TensorFlow operator SparseSoftmax.
  35. */
  36. REG_OP(SparseSoftmax)
  37. .INPUT(indices, TensorType({DT_INT64}))
  38. .INPUT(values, TensorType({DT_FLOAT, DT_DOUBLE}))
  39. .INPUT(shape, TensorType({DT_INT64}))
  40. .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE}))
  41. .OP_END_FACTORY_REG(SparseSoftmax)
  42. /**
  43. *@brief Adds up a SparseTensor and a dense Tensor, producing a dense Tensor . \n
  44. *@par Inputs:
  45. *Inputs "x1_*" must be SparseTensors and "x2" must be a dense Tensor.
  46. * @li x1_indices: A matrix Tensor of type int32 or int64. 2D. The indices of the SparseTensor.
  47. * @li x1_values: The values of the SparseTensor. A vector Tensor. 1D.
  48. * @li x1_shape: A vector Tensor of type int32 or int64. 1D. The shape of the SparseTensor.
  49. * @li x2: A matrix Tensor. Has the same type and same shape as the SparseTensors . \n
  50. *@par Outputs:
  51. *y: A matrix Tensor. Has the same type and same shape as "x2" . \n
  52. *@par Third-party framework compatibility
  53. * Compatible with the TensorFlow operator SparseTensorDenseAdd.
  54. */
  55. REG_OP(SparseTensorDenseAdd)
  56. .INPUT(x1_indices, TensorType({DT_INT32, DT_INT64}))
  57. .INPUT(x1_values, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, \
  58. DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  59. .INPUT(x1_shape, TensorType({DT_INT32, DT_INT64}))
  60. .INPUT(x2, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, \
  61. DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  62. .OUTPUT(y, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, \
  63. DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  64. .OP_END_FACTORY_REG(SparseTensorDenseAdd)
  65. /**
  66. *@brief Reorders a SparseTensor into the canonical, row-major ordering . \n
  67. *@par Inputs:
  68. * @li indices: A matrix Tensor of type int32 or int64. 2D. The indices of the SparseTensor.
  69. * @li values: Values of the SparseTensor. A vector Tensor. 1D.
  70. * @li shape: A vector Tensor of type int32 or int64. 1D. The shape of the SparseTensor . \n
  71. *@par Outputs:
  72. *@li y_indices: The indices of the SparseTensor. Has the same type as "indices".
  73. *@li y_values: The values of the SparseTensorr. Has the same type as "values" . \n
  74. *@par Third-party framework compatibility
  75. * Compatible with the TensorFlow operator SparseReorder.
  76. */
  77. REG_OP(SparseReorder)
  78. .INPUT(indices, TensorType({DT_INT64}))
  79. .INPUT(values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  80. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  81. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  82. .INPUT(shape, TensorType({DT_INT64}))
  83. .OUTPUT(y_indices, TensorType({DT_INT64}))
  84. .OUTPUT(y_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  85. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  86. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  87. .OP_END_FACTORY_REG(SparseReorder)
  88. /**
  89. *@brief Reshapes a SparseTensor to represent values in a new dense shape . \n
  90. *@par Inputs:
  91. * @li indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  92. * @li shape: A vector Tensor of type int64. 1D. The shape of the SparseTensor.
  93. * @li new_shape: A 1D Tensor of type int64. The requested new dense shape . \n
  94. *@par Outputs:
  95. *@li y_indices: A Tensor of type int64. The indices of the new dense shape.
  96. *@li y_shape: A Tensor of type int64. The shape of the new dense shape . \n
  97. *@par Third-party framework compatibility
  98. * Compatible with the TensorFlow operator SparseReshape.
  99. */
  100. REG_OP(SparseReshape)
  101. .INPUT(indices, TensorType({DT_INT64}))
  102. .INPUT(shape, TensorType({DT_INT64}))
  103. .INPUT(new_shape, TensorType({DT_INT64}))
  104. .OUTPUT(y_indices, TensorType({DT_INT64}))
  105. .OUTPUT(y_shape, TensorType({DT_INT64}))
  106. .OP_END_FACTORY_REG(SparseReshape)
  107. /**
  108. *@brief Adds up a SparseTensor and a dense Tensor.
  109. *@par Inputs:
  110. *(1) Broadcasts the dense side to have the same shape as the sparse side, if eligible;
  111. *(2) Then, only the dense values pointed to by the indices of the SparseTensor participate in the cwise addition.
  112. * @li x1_indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  113. * @li x1_values: The values of the SparseTensor. A vector Tensor. 1D.
  114. * @li x1_shape: A 1D Tensor of type int64. The requested new dense shape.
  115. * @li x2: A dense Tensor of the same type as "x1_values" . \n
  116. *@par Outputs:
  117. *y: A Tensor. Has the same type as "x1_values" . \n
  118. *@par Third-party framework compatibility
  119. * Compatible with the TensorFlow operator SparseDenseCwiseAdd.
  120. */
  121. REG_OP(SparseDenseCwiseAdd)
  122. .INPUT(x1_indices, TensorType({DT_INT64}))
  123. .INPUT(x1_values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  124. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, \
  125. DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  126. .INPUT(x1_shape, TensorType({DT_INT64}))
  127. .INPUT(x2, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  128. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  129. DT_COMPLEX64, DT_COMPLEX128}))
  130. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  131. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  132. DT_COMPLEX64, DT_COMPLEX128}))
  133. .OP_END_FACTORY_REG(SparseDenseCwiseAdd)
  134. /**
  135. *@brief Divides a SparseTensor by a dense Tensor . \n
  136. *@par Inputs:
  137. * @li x1_indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  138. * @li x1_values: The values of the SparseTensor. A vector Tensor. 1D.
  139. * @li x1_shape: A 1D Tensor of type int64. The requested new dense shape.
  140. * @li x2: A dense Tensor of the same type as "x1_values" . \n
  141. *@par Outputs:
  142. *y: A Tensor. Has the same type as "x1_values" . \n
  143. *@par Third-party framework compatibility
  144. * Compatible with the TensorFlow operator SparseDenseCwiseDiv.
  145. */
  146. REG_OP(SparseDenseCwiseDiv)
  147. .INPUT(x1_indices, TensorType({DT_INT64}))
  148. .INPUT(x1_values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  149. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, \
  150. DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  151. .INPUT(x1_shape, TensorType({DT_INT64}))
  152. .INPUT(x2, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  153. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  154. DT_COMPLEX64, DT_COMPLEX128}))
  155. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  156. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  157. DT_COMPLEX64, DT_COMPLEX128}))
  158. .OP_END_FACTORY_REG(SparseDenseCwiseDiv)
  159. /**
  160. *@brief Multiplies a SparseTensor by a dense Tensor . \n
  161. *@par Inputs:
  162. * @li x1_indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  163. * @li x1_values: The values of the SparseTensor. A vector Tensor. 1D.
  164. * @li x1_shape: A 1D Tensor of type int64. The requested new dense shape.
  165. * @li x2: A dense Tensor of the same type as "x1_values" . \n
  166. *@par Outputs:
  167. *y: A Tensor. Has the same type as "x1_values" . \n
  168. *@par Third-party framework compatibility
  169. * Compatible with the TensorFlow operator SparseDenseCwiseMul.
  170. */
  171. REG_OP(SparseDenseCwiseMul)
  172. .INPUT(x1_indices, TensorType({DT_INT64}))
  173. .INPUT(x1_values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  174. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, \
  175. DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  176. .INPUT(x1_shape, TensorType({DT_INT64}))
  177. .INPUT(x2, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  178. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  179. DT_COMPLEX64, DT_COMPLEX128}))
  180. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  181. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  182. DT_COMPLEX64, DT_COMPLEX128}))
  183. .OP_END_FACTORY_REG(SparseDenseCwiseMul)
  184. /**
  185. *@brief Adds a SparseTensor to a SparseTensorsMap . \n
  186. *@par Inputs:
  187. * The input tensor must be a SparseTensor.
  188. * @li x1_indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  189. * @li x1_values: The values of the SparseTensor. A vector Tensor. 1D.
  190. * @li x1_shape: A 1D Tensor of type int64. The requested new dense shape . \n
  191. *@par Attributes:
  192. *@li container: An optional string. Defaults to " ".
  193. *@li shared_name: An optional string. Defaults to " " . \n
  194. *@par Outputs:
  195. *handle: A Tensor of type int64 . \n
  196. *@par Third-party framework compatibility
  197. * Compatible with the TensorFlow operator AddSparseToTensorsMap.
  198. */
  199. REG_OP(AddSparseToTensorsMap)
  200. .INPUT(indices, TensorType({DT_INT64}))
  201. .INPUT(values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  202. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  203. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  204. .INPUT(shape, TensorType({DT_INT64}))
  205. .OUTPUT(handle, TensorType({DT_INT64}))
  206. .ATTR(container, String, "")
  207. .ATTR(shared_name, String, "")
  208. .OP_END_FACTORY_REG(AddSparseToTensorsMap)
  209. /**
  210. *@brief The gradient operator for the SparseSlice op . \n
  211. *@par Inputs:
  212. * @li backprop_val_grad: A Tensor.
  213. * @li indices: A matrix Tensor of type int64. 2D. The indices of the SparseTensor.
  214. * @li start: A 1D Tensor of type int64. The start of the slice.
  215. * @li new_indices: A matrix Tensor of type int64. 2D. The indices of the sliced SparseTensor . \n
  216. *@par Outputs:
  217. *y_grad: A Tensor of type int64 . \n
  218. *@par Third-party framework compatibility
  219. * Compatible with the TensorFlow operator SparseSliceGrad.
  220. */
  221. REG_OP(SparseSliceGrad)
  222. .INPUT(backprop_val_grad, TensorType({ DT_INT8, DT_UINT8, DT_INT16,
  223. DT_UINT16, DT_INT32, DT_INT64, DT_FLOAT, DT_FLOAT16, DT_DOUBLE,
  224. DT_COMPLEX64, DT_COMPLEX128}))
  225. .INPUT(indices, TensorType({DT_INT64}))
  226. .INPUT(start, TensorType({DT_INT64}))
  227. .INPUT(new_indices, TensorType({DT_INT64}))
  228. .OUTPUT(y_grad, TensorType({ DT_INT8, DT_UINT8, DT_INT16,
  229. DT_UINT16, DT_INT32, DT_INT64, DT_FLOAT, DT_FLOAT16, DT_DOUBLE,
  230. DT_COMPLEX64, DT_COMPLEX128 }))
  231. .OP_END_FACTORY_REG(SparseSliceGrad)
  232. /**
  233. *@brief Slices a SparseTensor based on the "start" and "size" . \n
  234. *@par Inputs:
  235. * @li indices: A 2D Tensor of type int64. The indices of the SparseTensor.
  236. * @li values: A 1D Tensor. The values of the SparseTensor.
  237. * @li shape: A 2D Tensor of type int64. The shape of the SparseTensor.
  238. * @li start: A 1D Tensor of type int64. The start of the slice.
  239. * @li size: A 1D Tensor of type int64. The size of the slice . \n
  240. *@par Outputs:
  241. *@li y_indices: A Tensor of type int64.
  242. *@li y_values: A Tensor. Has the same type as "values".
  243. *@li y_shape: A Tensor of type int64 . \n
  244. *@par Third-party framework compatibility
  245. * Compatible with the TensorFlow operator SparseSlice.
  246. */
  247. REG_OP(SparseSlice)
  248. .INPUT(indices, TensorType({DT_INT64}))
  249. .INPUT(values, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, \
  250. DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, \
  251. DT_COMPLEX128, DT_BOOL, DT_STRING, DT_RESOURCE}))
  252. .INPUT(shape, TensorType({DT_INT64}))
  253. .INPUT(start, TensorType({DT_INT64}))
  254. .INPUT(size, TensorType({DT_INT64}))
  255. .OUTPUT(y_indices, TensorType({DT_INT64}))
  256. .OUTPUT(y_values, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, \
  257. DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, \
  258. DT_COMPLEX128, DT_BOOL, DT_STRING, DT_RESOURCE}))
  259. .OUTPUT(y_shape, TensorType({DT_INT64}))
  260. .OP_END_FACTORY_REG(SparseSlice)
  261. /**
  262. *@brief The gradient operator for the SparseAdd op . \n
  263. *@par Inputs:
  264. * @li backprop_val_grad: A 1D Tensor with shape [nnz(sum)]. The gradient with respect to the non-empty values of the sum.
  265. * @li x1_indices: A 2D Tensor of type int64. The indices of the SparseTensor A, with size [nnz(A), ndims].
  266. * @li x2_indices: A 2D Tensor of type int64. The indices of the SparseTensor B, with size [nnz(B), ndims].
  267. * @li sum_indices: A 2D Tensor of type int64. The indices of the sum SparseTensor, with size [nnz(sum), ndims] . \n
  268. *@par Outputs:
  269. *@li x1_val_grad: A Tensor. Has the same type as "backprop_val_grad".
  270. *@li x2_val_grad: A Tensor. Has the same type as "backprop_val_grad" . \n
  271. *@par Third-party framework compatibility
  272. * Compatible with the TensorFlow operator SparseAddGrad.
  273. */
  274. REG_OP(SparseAddGrad)
  275. .INPUT(backprop_val_grad, TensorType({DT_INT8, DT_INT16, DT_INT32,
  276. DT_INT64, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  277. .INPUT(x1_indices, TensorType({DT_INT64}))
  278. .INPUT(x2_indices, TensorType({DT_INT64}))
  279. .INPUT(sum_indices, TensorType({DT_INT64}))
  280. .OUTPUT(x1_val_grad, TensorType({DT_INT8, DT_INT16, DT_INT32,
  281. DT_INT64, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  282. .OUTPUT(x2_val_grad, TensorType({DT_INT8, DT_INT16, DT_INT32,
  283. DT_INT64, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  284. .OP_END_FACTORY_REG(SparseAddGrad)
  285. /**
  286. *@brief The gradient of SparseFillEmptyRows . \n
  287. *@par Inputs:
  288. * @li reverse_index_map: A 1D Tensor of type int64. The reverse index map from SparseFillEmptyRows.
  289. * @li grad_values: A 1D Tensor. The gradients from backprop . \n
  290. *@par Outputs:
  291. *@li y_value: A Tensor. Has the same type as "grad_values".
  292. *@li y_default_value: A Tensor. Has the same type as "grad_values" . \n
  293. *@par Third-party framework compatibility
  294. * Compatible with the TensorFlow operator SparseFillEmptyRowsGrad.
  295. */
  296. REG_OP(SparseFillEmptyRowsGrad)
  297. .INPUT(reverse_index_map, TensorType({DT_INT64}))
  298. .INPUT(grad_values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  299. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  300. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  301. .OUTPUT(y_value, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  302. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  303. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  304. .OUTPUT(y_default_value, TensorType({DT_INT8, DT_UINT8, DT_INT16, \
  305. DT_UINT16, DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  306. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  307. .OP_END_FACTORY_REG(SparseFillEmptyRowsGrad)
  308. /**
  309. *@brief Multiplies SparseTensor A (of rank 2) by dense matrix B . \n
  310. *@par Inputs:
  311. * @li x1_indices: A 2D Tensor of type int32 or int64.
  312. *The indices of the matrix "SparseTensor", with size [nnz, 2].
  313. * @li x1_values: A 1D Tensor. The values of the SparseTensor, with size [nnz].
  314. * @li x1_shape: A 1D Tensor of type int64. The shape of the SparseTensor, with size [2].
  315. * @li x2: A dense matrix Tensor of the same type as "x1_values". 2D . \n
  316. *@par Outputs:
  317. *y: A "Tensor". Has the same type as "x1_values" . \n
  318. *@par Attributes:
  319. *@li adjoint_a: An optional bool. Defaults to "False".Use the adjoint of A in the matrix multiply.
  320. *If A is complex, this is transpose(conj(A)). Otherwise it is transpose(A).
  321. *@li adjoint_b: An optional bool. Defaults to "False".Use the adjoint of B in the matrix multiply.
  322. *If B is complex, this is transpose(conj(B)). Otherwise it is transpose(B) . \n
  323. *@par Third-party framework compatibility
  324. * Compatible with the TensorFlow operator SparseTensorDenseMatMul.
  325. */
  326. REG_OP(SparseTensorDenseMatMul)
  327. .INPUT(x1_indices, TensorType({DT_INT32, DT_INT64}))
  328. .INPUT(x1_values, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, \
  329. DT_COMPLEXT64, DT_COMPLEX128, DT_FLOAT16, DT_INT64}))
  330. .INPUT(x1_shape, TensorType({DT_INT64}))
  331. .INPUT(x2, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_COMPLEXT64, \
  332. DT_COMPLEX128, DT_FLOAT16}))
  333. .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT64, DT_INT32, DT_COMPLEXT64, \
  334. DT_COMPLEX128, DT_FLOAT16}))
  335. .ATTR(adjoint_a, Bool, false)
  336. .ATTR(adjoint_b, Bool, false)
  337. .OP_END_FACTORY_REG(SparseTensorDenseMatMul)
  338. /**
  339. *@brief Converts a sparse representation into a dense tensor . \n
  340. *@par Inputs:
  341. * @li indices: A 0D, 1D, or 2D Tensor of type int32 or int64.
  342. * @li output_shape: A 1D Tensor of the same type as "sparse_indices". The shape of the dense output tensor.
  343. * @li values: A 1D Tensor. Values corresponding to each row of "sparse_indices",
  344. or a scalar value to be used for all sparse indices.
  345. * @li default_value: A Tensor of the same type as "sparse_values" . \n
  346. *@par Attributes:
  347. *validate_indices: If true, indices are checked to make sure they are sorted in
  348. lexicographic order and that there are no repeats. \n
  349. *@par Outputs:
  350. *y: A Tensor. Has the same type as "values" . \n
  351. *@par Third-party framework compatibility
  352. * Compatible with the TensorFlow operator SparseToDense.
  353. */
  354. REG_OP(SparseToDense)
  355. .INPUT(indices, TensorType({DT_INT32, DT_INT64}))
  356. .INPUT(output_shape, TensorType({DT_INT32, DT_INT64}))
  357. .INPUT(values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  358. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_BOOL, DT_DOUBLE}))
  359. .INPUT(default_value, TensorType({DT_INT8, DT_UINT8, DT_INT16, \
  360. DT_UINT16, DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_BOOL, \
  361. DT_DOUBLE}))
  362. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  363. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_BOOL, DT_DOUBLE}))
  364. .ATTR(validate_indices, Bool, true)
  365. .OP_END_FACTORY_REG(SparseToDense)
  366. /**
  367. *@brief Concatenates a list of `SparseTensor` along the specified dimension.
  368. *Concatenation is with respect to the dense versions of these sparse tensors . \n
  369. *@par Inputs:
  370. * @li indices:A list of at least 2 `Tensor` objects with type `int64`.2-D.
  371. *Indices of each input `SparseTensor`.It's a dynamic input.
  372. * @li values:A list with the same length as `indices` of `Tensor` objects with the same type.
  373. It's a dynamic input.
  374. * @li shapes:A list with the same length as `indices` of `Tensor` objects with type `int64`.1-D.
  375. * Shapes of each `SparseTensor`. It's a dynamic input. \n
  376. *@par Attributes:
  377. *@li concat_dim: An `int` Dimension to concatenate along
  378. *@li N:Number of sparse
  379. *@par Outputs:
  380. * @li y_indices:A `Tensor` of type `int64`.
  381. * @li y_values:A `Tensor`. Has the same type as `values`.
  382. * @li y_shape:A `Tensor` of type `int64` . \n
  383. *@par Third-party framework compatibility
  384. * Compatible SparseConcat operator in Tensorflow
  385. */
  386. REG_OP(SparseConcat)
  387. .DYNAMIC_INPUT(indices, TensorType({DT_INT64}))
  388. .DYNAMIC_INPUT(values,
  389. TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  390. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  391. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  392. .DYNAMIC_INPUT(shapes, TensorType({DT_INT64}))
  393. .OUTPUT(y_indices, TensorType({DT_INT64}))
  394. .OUTPUT(y_values,
  395. TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  396. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  397. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  398. .OUTPUT(y_shape, TensorType({DT_INT64}))
  399. .ATTR(concat_dim, Int, 0)
  400. .ATTR(N, Int, 1)
  401. .OP_END_FACTORY_REG(SparseConcat)
  402. /**
  403. *@brief Adds two `SparseTensor` objects to produce another `SparseTensor` . \n
  404. *@par Inputs:
  405. *7 inputs, contains:
  406. * @li x1_indices:A `Tensor` of type `int64`.2-D.
  407. * The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.
  408. * @li x1_values:A `Tensor`. Must be one of the following types:float,int8,int16,int32,int64, float64.
  409. * @li x1_shape:A `Tensor` of type `int64`.1-D. The `shape` of the first `SparseTensor`,
  410. * size `[ndims]` Vector.
  411. * @li x2_indices:A `Tensor` of type `int64`.2-D.The `indices` of the second `SparseTensor`,
  412. * size `[nnz, ndims]` Matrix.
  413. * @li x2_values:A `Tensor`. Must have the same type as `a_values`.1-D.
  414. * The `values` of the second `SparseTensor`, size `[nnz]` Vector.
  415. * @li x2_shape:A `Tensor` of type `int64`.1-D.
  416. * The `shape` of the second `SparseTensor`, size `[ndims]` Vector.
  417. * @li thresh:A `Tensor` 0-D.The magnitude threshold that determines if an output value/index pair takes space . \n
  418. *@par Outputs:
  419. * @li sum_indices:A `Tensor` of type `int64`.
  420. * @li sum_values:A `Tensor`. Has the same type as `x1_values`.
  421. * @li sum_shape:A `Tensor` of type `int64` . \n
  422. *@par Third-party framework compatibility
  423. * Compatible SparseAdd operator in Tensorflow
  424. */
  425. REG_OP(SparseAdd)
  426. .INPUT(x1_indices, TensorType({DT_INT64}))
  427. .INPUT(x1_values, TensorType({DT_FLOAT, DT_INT8, DT_INT16, \
  428. DT_INT32, DT_INT64, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  429. .INPUT(x1_shape, TensorType({DT_INT64}))
  430. .INPUT(x2_indices, TensorType({DT_INT64}))
  431. .INPUT(x2_values, TensorType({DT_FLOAT, DT_INT8, DT_INT16, DT_INT32, \
  432. DT_INT64, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  433. .INPUT(x2_shape, TensorType({DT_INT64}))
  434. .INPUT(thresh, TensorType({DT_FLOAT, DT_INT8, DT_INT16, DT_INT32, \
  435. DT_INT64, DT_DOUBLE}))
  436. .OUTPUT(sum_indices, TensorType({DT_INT64}))
  437. .OUTPUT(sum_values, TensorType({DT_FLOAT, DT_INT8, DT_INT16, \
  438. DT_INT32, DT_INT64, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  439. .OUTPUT(sum_shape, TensorType({DT_INT64}))
  440. .OP_END_FACTORY_REG(SparseAdd)
  441. /**
  442. *@brief Fills empty rows in the input 2-D `SparseTensor` with a default value . \n
  443. *@par Inputs:
  444. *4 inputs,contains:
  445. * @li indices: A `Tensor` of type `int64`.2-D. the indices of the sparse tensor.
  446. * @li values: A `Tensor`. 1-D. the values of the sparse tensor.
  447. * @li dense_shape: A `Tensor` of type `int64`.1-D. the shape of the sparse tensor.
  448. * @li default_value: `Tensor`. Must have the same type as `values`.
  449. *0-D. default value to insert into location `[row, 0, ..., 0]`
  450. *for rows missing from the input sparse tensor . \n
  451. *@par Outputs:
  452. * @li y_indices:A `Tensor` of type `int64`.
  453. * @li y_values:A `Tensor`. Has the same type as `values`.
  454. * @li empty_row_indicator:A `Tensor` of type `bool`.
  455. * @li reverse_index_map:A `Tensor` of type `int64` . \n
  456. *@par Third-party framework compatibility
  457. * Compatible SparseFillEmptyRows operator in Tensorflow
  458. */
  459. REG_OP(SparseFillEmptyRows)
  460. .INPUT(indices, TensorType({DT_INT64}))
  461. .INPUT(values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  462. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  463. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  464. .INPUT(dense_shape, TensorType({DT_INT64}))
  465. .INPUT(default_value, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, \
  466. DT_INT16, DT_UINT16, DT_UINT8, \
  467. DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  468. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  469. .OUTPUT(y_indices, TensorType({DT_INT64}))
  470. .OUTPUT(y_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, \
  471. DT_INT16, DT_UINT16, DT_UINT8, \
  472. DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  473. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  474. .OUTPUT(empty_row_indicator, TensorType({DT_BOOL}))
  475. .OUTPUT(reverse_index_map, TensorType({DT_INT64}))
  476. .OP_END_FACTORY_REG(SparseFillEmptyRows)
  477. /**
  478. *@brief Returns the element-wise max of two SparseTensors . \n
  479. *@par Inputs:
  480. *6 inputs,contains:
  481. * @li x1_indices:A `Tensor` of type `int64`.2-D.
  482. *`N x R` matrix with the indices of non-empty values in a SparseTensor,
  483. * in the canonical lexicographic ordering.
  484. * @li x1_values:A `Tensor`. 1-D. the values of the sparse tensor.
  485. * @li x1_shape:A `Tensor` of type `int64`.1-D. the shape of the sparse tensor.
  486. * @li x2_indices:A `Tensor` of type `int64`.2-D. the indices of the sparse tensor.
  487. * @li x2_values:A `Tensor`. 1-D. Must have the same type as `x1_values`.
  488. * @li x2_shape:A `Tensor` of type `int64`.1-D.
  489. *counterpart to `a_shape` for the other operand; the two shapes must be equal . \n
  490. *@par Outputs:
  491. * @li y_indices:A `Tensor` of type `int64`.
  492. * @li y_values:A `Tensor`. Has the same type as `x1_values` . \n
  493. *@par Third-party framework compatibility
  494. * Compatible SparseSparseMaximum operator in Tensorflow
  495. */
  496. REG_OP(SparseSparseMaximum)
  497. .INPUT(x1_indices, TensorType({DT_INT64}))
  498. .INPUT(x1_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  499. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  500. .INPUT(x1_shape, TensorType({DT_INT64}))
  501. .INPUT(x2_indices, TensorType({DT_INT64}))
  502. .INPUT(x2_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  503. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  504. .INPUT(x2_shape, TensorType({DT_INT64}))
  505. .OUTPUT(y_indices, TensorType({DT_INT64}))
  506. .OUTPUT(y_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  507. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  508. .OP_END_FACTORY_REG(SparseSparseMaximum)
  509. /**
  510. *@brief Returns the element-wise min of two SparseTensors . \n
  511. *@par Inputs:
  512. *6 inputs,contains:
  513. * @li x1_indices:A `Tensor` of type `int64`.2-D.
  514. *`N x R` matrix with the indices of non-empty values in a SparseTensor,
  515. * in the canonical lexicographic ordering.
  516. * @li x1_values:A `Tensor`. 1-D. the values of the sparse tensor.
  517. * @li x1_shape:A `Tensor` of type `int64`.1-D. the shape of the sparse tensor.
  518. * @li x2_indices:A `Tensor` of type `int64`.2-D. the indices of the sparse tensor.
  519. * @li x2_values:A `Tensor`. 1-D. Must have the same type as `x1_values`.
  520. * @li x2_shape:A `Tensor` of type `int64`.1-D.
  521. *counterpart to `a_shape` for the other operand; the two shapes must be equal . \n
  522. *@par Outputs:
  523. * @li y_indices:A `Tensor` of type `int64`.
  524. * @li y_values:A `Tensor`. Has the same type as `x1_values` . \n
  525. *@par Third-party framework compatibility
  526. * Compatible SparseSparseMinimum operator in Tensorflow
  527. */
  528. REG_OP(SparseSparseMinimum)
  529. .INPUT(x1_indices, TensorType({DT_INT64}))
  530. .INPUT(x1_values, TensorType({DT_INT64, DT_INT32, \
  531. DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, DT_FLOAT16, \
  532. DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  533. .INPUT(x1_shape, TensorType({DT_INT64}))
  534. .INPUT(x2_indices, TensorType({DT_INT64}))
  535. .INPUT(x2_values, TensorType({DT_INT64, DT_INT32, \
  536. DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, DT_FLOAT16, \
  537. DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  538. .INPUT(x2_shape, TensorType({DT_INT64}))
  539. .OUTPUT(y_indices, TensorType({DT_INT64}))
  540. .OUTPUT(y_values, TensorType({DT_INT64, DT_INT32, \
  541. DT_UINT16, DT_INT16, DT_UINT8, DT_INT8, DT_FLOAT16, \
  542. DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128}))
  543. .OP_END_FACTORY_REG(SparseSparseMinimum)
  544. /**
  545. *@brief Computes the max of elements across dimensions of a SparseTensor . \n
  546. *@par Inputs:
  547. *4 or 5 inputs,contains:
  548. * @li x_indices:A `Tensor` of type `int64`.2-D.
  549. *`N x R` matrix with the indices of non-empty values in a
  550. *SparseTensor, possibly not in canonical ordering.
  551. * @li x_values:A `Tensor`. 1-D. the values of the sparse tensor.
  552. *`N` non-empty values corresponding to `input_indices`.
  553. * @li x_shape:A `Tensor` of type `int64`.1-D. Shape of the input SparseTensor.
  554. * @li reduction_axes:A `Tensor` of type `int32`.1-D.
  555. *Length-`K` vector containing the reduction axes . \n
  556. *@par Attributes:
  557. * keep_dims:An optional `bool`. Defaults to `False`.
  558. *If true, retain reduced dimensions with length 1 . \n
  559. *@par Outputs:
  560. * y:A `Tensor`. Has the same type as `input_values` . \n
  561. *@par Third-party framework compatibility
  562. * Compatible SparseReduceMax operator in Tensorflow
  563. */
  564. REG_OP(SparseReduceMax)
  565. .INPUT(x_indices, TensorType({DT_INT64}))
  566. .INPUT(x_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  567. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  568. .INPUT(x_shape, TensorType({DT_INT64}))
  569. .INPUT(reduction_axes, TensorType({DT_INT32}))
  570. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16,
  571. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  572. .ATTR(keep_dims, Bool, false)
  573. .OP_END_FACTORY_REG(SparseReduceMax)
  574. /**
  575. *@brief Computes the max of elements across dimensions of a SparseTensor . \n
  576. *@par Inputs:
  577. *4 or 5 inputs,contains:
  578. * @li x_indices:A `Tensor` of type `int64`.2-D.
  579. *`N x R` matrix with the indices of non-empty values in a
  580. *SparseTensor, possibly not in canonical ordering.
  581. * @li x_values:A `Tensor`. 1-D. the values of the sparse tensor.
  582. *`N` non-empty values corresponding to `input_indices`.
  583. * @li x_shape:A `Tensor` of type `int64`.1-D. Shape of the input SparseTensor.
  584. * @li reduction_axes:A `Tensor` of type `int32`.1-D.
  585. *Length-`K` vector containing the reduction axes . \n
  586. *@par Attributes:
  587. * keep_dims:An optional `bool`. Defaults to `False`.
  588. *If true, retain reduced dimensions with length 1 . \n
  589. *@par Outputs:
  590. * @li y_indices:A `Tensor` of type `int64`.
  591. * @li y_values:A `Tensor`. Has the same type as `input_values`.
  592. * @li y_shape:A `Tensor` of type `int64` . \n
  593. *@par Third-party framework compatibility
  594. * Compatible SparseReduceMaxSparse operator in Tensorflow
  595. */
  596. REG_OP(SparseReduceMaxSparse)
  597. .INPUT(x_indices, TensorType({DT_INT64}))
  598. .INPUT(x_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  599. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  600. .INPUT(x_shape, TensorType({DT_INT64}))
  601. .INPUT(reduction_axes, TensorType({DT_INT32}))
  602. .OUTPUT(y_indices, TensorType({DT_INT64}))
  603. .OUTPUT(y_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  604. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  605. .OUTPUT(y_shape, TensorType({DT_INT64}))
  606. .ATTR(keep_dims, Bool, false)
  607. .OP_END_FACTORY_REG(SparseReduceMaxSparse)
  608. /**
  609. *@brief Computes the sum of elements across dimensions of a SparseTensor . \n
  610. *@par Inputs:
  611. * @li x_indices: A 2D Tensor of type int64.
  612. *"N x R" matrix with the indices of non-empty values in a
  613. *SparseTensor, possibly not in canonical ordering.
  614. * @li x_values: A 1D Tensor. The values of the SparseTensor.
  615. *"N" non-empty values corresponding to "input_indices".
  616. * @li x_shape: A 1D Tensor of type int64. Shape of the input SparseTensor.
  617. * @li reduction_axes: A 1D Tensor of type int32.
  618. *A length-"K" vector containing the reduction axes . \n
  619. *@par Attributes:
  620. *keep_dims: An optional bool. Defaults to "False".
  621. *If true, retains reduced dimensions with length 1 . \n
  622. *@par Outputs:
  623. *y: A Tensor. Has the same type as "x_values". \n
  624. *@par Third-party framework compatibility
  625. * Compatible with the TensorFlow operator SparseReduceSum.
  626. */
  627. REG_OP(SparseReduceSum)
  628. .INPUT(x_indices, TensorType({DT_INT64}))
  629. .INPUT(x_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  630. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE, \
  631. DT_COMPLEX64, DT_COMPLEX128}))
  632. .INPUT(x_shape, TensorType({DT_INT64}))
  633. .INPUT(reduction_axes, TensorType({DT_INT32}))
  634. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16,
  635. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE, \
  636. DT_COMPLEX64, DT_COMPLEX128}))
  637. .ATTR(keep_dims, Bool, false)
  638. .OP_END_FACTORY_REG(SparseReduceSum)
  639. /**
  640. *@brief Computes the sum of elements across dimensions of a SparseTensor . \n
  641. *@par Inputs:
  642. *4 or 5 inputs, including:
  643. * @li x_indices: A 2D Tensor of type int64.
  644. *"N x R" matrix with the indices of non-empty values in a
  645. *SparseTensor, possibly not in canonical ordering.
  646. * @li x_values: A 1D Tensor. The values of the SparseTensor.
  647. *"N" non-empty values corresponding to "input_indices".
  648. * @li x_shape: A 1D Tensor of type int64. Shape of the input SparseTensor.
  649. * @li reduction_axes: A 1D Tensor of type int32.
  650. * A length-"K" vector containing the reduction axes . \n
  651. *@par Attributes:
  652. * keep_dims: An optional bool. Defaults to "False".
  653. *If true, retains reduced dimensions with length 1 . \n
  654. *@par Outputs:
  655. * @li y_indices: A Tensor of type int64.
  656. * @li y_values: A Tensor. Has the same type as "input_values".
  657. * @li y_shape: A Tensor of type int64 . \n
  658. *@par Third-party framework compatibility
  659. * Compatible with the TensorFlow operator SparseReduceSumSparse.
  660. */
  661. REG_OP(SparseReduceSumSparse)
  662. .INPUT(x_indices, TensorType({DT_INT64}))
  663. .INPUT(x_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  664. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE, \
  665. DT_COMPLEX64, DT_COMPLEX128}))
  666. .INPUT(x_shape, TensorType({DT_INT64}))
  667. .INPUT(reduction_axes, TensorType({DT_INT32}))
  668. .OUTPUT(y_indices, TensorType({DT_INT64}))
  669. .OUTPUT(y_values, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  670. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE, \
  671. DT_COMPLEX64, DT_COMPLEX128}))
  672. .OUTPUT(y_shape, TensorType({DT_INT64}))
  673. .ATTR(keep_dims, Bool, false)
  674. .OP_END_FACTORY_REG(SparseReduceSumSparse)
  675. /**
  676. *@brief Splits a SparseTensor into "num_split" tensors along one dimension . \n
  677. *@par Inputs:
  678. *4 or 5 inputs, including:
  679. * @li split_dim: A 0D Tensor of type int64.
  680. *The dimension along which to split. Must be in the range "[0, rank(shape))".
  681. * @li indices: A 2D Tensor of type int64.
  682. * The indices of the SparseTensor.
  683. * @li values: A 1D Tensor. The values of the SparseTensor.
  684. * @li shape: A 1D Tensor of type int64. Shape of the SparseTensor . \n
  685. *@par Attributes:
  686. * num_split: An int that is >= 1. The number of ways to split . \n
  687. *@par Outputs:
  688. * @li y_indices: A list of "num_split" Tensor objects of type int64.
  689. * @li y_values: A list of "num_split" Tensor objects with the same type as "values".
  690. * @li y_shape: A list of "num_split" Tensor objects of type int64 . \n
  691. *@par Third-party framework compatibility
  692. * Compatible with the TensorFlow operator SparseSplit.
  693. */
  694. REG_OP(SparseSplit)
  695. .INPUT(split_dim, TensorType({DT_INT64}))
  696. .INPUT(indices, TensorType({DT_INT64}))
  697. .INPUT(values, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16, \
  698. DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, \
  699. DT_COMPLEX128, DT_BOOL, DT_STRING, DT_RESOURCE}))
  700. .INPUT(shape, TensorType({DT_INT64}))
  701. .DYNAMIC_OUTPUT(y_indices, TensorType({DT_INT64}))
  702. .DYNAMIC_OUTPUT(y_values, TensorType({DT_INT64, DT_INT32, DT_UINT16, \
  703. DT_INT16, DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  704. DT_COMPLEX64, DT_COMPLEX128, DT_BOOL, DT_STRING, DT_RESOURCE}))
  705. .DYNAMIC_OUTPUT(y_shape, TensorType({DT_INT64}))
  706. .ATTR(num_split, Int, 1)
  707. .OP_END_FACTORY_REG(SparseSplit)
  708. /**
  709. *@brief Generates sparse cross from a list of sparse and dense tensors . \n
  710. *@par Inputs:
  711. * @li indices: A list of 2D Tensor objects of type int64.
  712. * Indices of each input SparseTensor.It's a dynamic input.
  713. * @li values: A list of 1D Tensor objects of type int64 or string.
  714. * Values of each SparseTensor.It's a dynamic input.
  715. * @li shapes: A list with the same length as "indices" of 1D Tensor objects of type int64.
  716. * Shapes of each SparseTensor.It's a dynamic input.
  717. * @li dense_inputs: A list of 2D Tensor objects of type int64 or string.
  718. * Columns represented by dense Tensor .It's a dynamic input. \n
  719. *@par Attributes:
  720. * @li N: number of sparse.
  721. * @li hashed_output: A bool. If true, returns the hash of the cross instead of the string.
  722. * @li num_buckets: An int that is >= 0. It is used if "hashed_output" is true.
  723. *output = hashed_value%num_buckets if num_buckets > 0 else "hashed_value".
  724. * @li hash_key: An int. Specify the hash_key that will be used by the "FingerprintCat64"
  725. *function to combine the crosses fingerprints.
  726. * @li out_type: An int64 or string.
  727. * @li internal_type: An int64 or string . \n
  728. *@par Outputs:
  729. * @li output_indices: A Tensor of type int64.
  730. * @li output_values: A Tensor of type "out_type".
  731. * @li output_shape: A Tensor of type int64 . \n
  732. *@par Third-party framework compatibility
  733. * Compatible with the TensorFlow operator SparseCross.
  734. */
  735. REG_OP(SparseCross)
  736. .DYNAMIC_INPUT(indices, TensorType({DT_INT64}))
  737. .DYNAMIC_INPUT(values, TensorType({DT_INT64, DT_STRING}))
  738. .DYNAMIC_INPUT(shapes, TensorType({DT_INT64}))
  739. .DYNAMIC_INPUT(dense_inputs, TensorType({DT_INT64, DT_STRING}))
  740. .OUTPUT(output_indices, TensorType({DT_INT64}))
  741. .OUTPUT(output_values, TensorType({DT_INT64, DT_STRING}))
  742. .OUTPUT(output_shape, TensorType({DT_INT64}))
  743. .ATTR(N, Int, 0)
  744. .REQUIRED_ATTR(hashed_output, Bool)
  745. .ATTR(num_buckets, Int, 0)
  746. .REQUIRED_ATTR(hash_key, Int)
  747. .REQUIRED_ATTR(out_type, Type)
  748. .REQUIRED_ATTR(internal_type, Type)
  749. .OP_END_FACTORY_REG(SparseCross)
  750. /**
  751. *@brief Generates sparse cross from a list of sparse and dense tensors . \n
  752. *@par Inputs:
  753. *3 or 5 inputs, including:
  754. * @li indices: A 2D Tensor of type int64.
  755. * The "indices" of the minibatch SparseTensor.
  756. * @li values: A 1D Tensor. The "values" of the minibatch SparseTensor.
  757. * @li shape: A 1D Tensor of type int64. The "shape" of the minibatch SparseTensor . \n
  758. *@par Attributes:
  759. * @li container: An optional string. Defaults to "".
  760. *The container name for the "SparseTensorsMap" created by this op.
  761. * @li shared_name: An optional string. Defaults to "".
  762. *The shared name for the "SparseTensorsMap" created by this op . \n
  763. *@par Outputs:
  764. * handles: A Tensor of type int64 . \n
  765. *@par Third-party framework compatibility
  766. * Compatible with the TensorFlow operator AddManySparseToTensorsMap.
  767. */
  768. REG_OP(AddManySparseToTensorsMap)
  769. .INPUT(indices, TensorType({DT_INT64}))
  770. .INPUT(values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  771. DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  772. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  773. .INPUT(shape, TensorType({DT_INT64}))
  774. .OUTPUT(handles, TensorType({DT_INT64}))
  775. .ATTR(container, String, "")
  776. .ATTR(shared_name, String, "")
  777. .OP_END_FACTORY_REG(AddManySparseToTensorsMap)
  778. /**
  779. *@brief Reads SparseTensors from a "SparseTensorsMap" and concatenate them . \n
  780. *@par Inputs:
  781. * handles: A 1D Tensor of type int64.
  782. *The "N" serialized SparseTensor objects . \n
  783. *@par Attributes:
  784. * @li dtype: A tf.DType. The "dtype" of the SparseTensor objects stored in the "SparseTensorsMap".
  785. * @li container: An optional string. Defaults to "".
  786. *The container name for the "SparseTensorsMap" read by this op.
  787. * @li shared_name: An optional string. Defaults to "".
  788. *The shared name for the "SparseTensorsMap" read by this op . \n
  789. *@par Outputs:
  790. * @li indices: A Tensor of type int64.2-D. The `indices` of the minibatch `SparseTensor`.
  791. * @li values: A Tensor of type "dtype". 1-D. The `values` of the minibatch `SparseTensor`.
  792. * @li shape: A Tensor of type int64 . 1-D. The `shape` of the minibatch `SparseTensor`. \n
  793. *@par Third-party framework compatibility
  794. * Compatible with the TensorFlow operator TakeManySparseFromTensorsMap.
  795. */
  796. REG_OP(TakeManySparseFromTensorsMap)
  797. .INPUT(handles, TensorType({DT_INT64}))
  798. .OUTPUT(indices, TensorType({DT_INT64}))
  799. .OUTPUT(values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, \
  800. DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16}))
  801. .OUTPUT(shape, TensorType({DT_INT64}))
  802. .REQUIRED_ATTR(dtype, Type)
  803. .ATTR(container, String, "")
  804. .ATTR(shared_name, String, "")
  805. .OP_END_FACTORY_REG(TakeManySparseFromTensorsMap)
  806. /**
  807. *@brief Serializes a SparseTensor into a [3] Tensor object . \n
  808. *@par Inputs:
  809. *3 or 4 inputs, including:
  810. * @li indices: A 2D Tensor of type int64. The indices of the SparseTensor.
  811. * @li values: A 1D Tensor. The values of the SparseTensor.
  812. * @li shape: A 1D Tensor of type int64. The shape of the SparseTensor . \n
  813. *@par Attributes:
  814. * out_type: An optional type. Defaults to "string" . \n
  815. *@par Outputs:
  816. * serialized_sparse: A Tensor of type "out_type" . \n
  817. *@par Third-party framework compatibility
  818. * Compatible with the TensorFlow operator SerializeSparse.
  819. */
  820. REG_OP(SerializeSparse)
  821. .INPUT(indices, TensorType({DT_INT64}))
  822. .INPUT(values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, \
  823. DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, \
  824. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  825. .INPUT(shape, TensorType({DT_INT64}))
  826. .OUTPUT(serialized_sparse, TensorType({DT_STRING, DT_VARIANT}))
  827. .ATTR(out_type, Type, DT_STRING)
  828. .OP_END_FACTORY_REG(SerializeSparse)
  829. /**
  830. *@brief Serializes an "N"-minibatch SparseTensor into an [N, 3] Tensor object . \n
  831. *@par Inputs:
  832. *3 or 4 inputs, including:
  833. * @li indices: A 2D Tensor of type int64. The "indices" of the minibatch SparseTensor.
  834. * @li values: A 1D Tensor. The "values" of the minibatch SparseTensor.
  835. * @li shape: A 1D Tensor of type int64. The "shape" of the minibatch SparseTensor . \n
  836. *@par Attributes:
  837. * out_type: An optional type. Defaults to "string" . \n
  838. *@par Outputs:
  839. * serialized_sparse: A Tensor of type "out_type" . \n
  840. *@par Third-party framework compatibility
  841. * Compatible with the TensorFlow operator SerializeManySparse.
  842. */
  843. REG_OP(SerializeManySparse)
  844. .INPUT(indices, TensorType({DT_INT64}))
  845. .INPUT(values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, \
  846. DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, \
  847. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  848. .INPUT(shape, TensorType({DT_INT64}))
  849. .OUTPUT(serialized_sparse, TensorType({DT_STRING, DT_VARIANT}))
  850. .ATTR(out_type, Type, DT_STRING)
  851. .OP_END_FACTORY_REG(SerializeManySparse)
  852. /**
  853. *@brief Deserializes SparseTensor objects . \n
  854. *@par Inputs:
  855. *serialized_sparse: A Tensor. The serialized SparseTensor objects.
  856. *The last dimension must have 3 columns . \n
  857. *@par Attributes:
  858. * dtype: An optional type. The type of the serialized SparseTensor objects . \n
  859. *@par Outputs:
  860. * @li indices: A Tensor of type int64.
  861. * @li values: A Tensor of type "dtype".
  862. * @li shape: A Tensor of type int64 . \n
  863. *@par Third-party framework compatibility
  864. * Compatible with the TensorFlow operator DeserializeSparse.
  865. */
  866. REG_OP(DeserializeSparse)
  867. .INPUT(serialized_sparse, TensorType({DT_STRING, DT_VARIANT}))
  868. .OUTPUT(indices, TensorType({DT_INT64}))
  869. .OUTPUT(values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, \
  870. DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, \
  871. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  872. .OUTPUT(shape, TensorType({DT_INT64}))
  873. .REQUIRED_ATTR(dtype, Type)
  874. .OP_END_FACTORY_REG(DeserializeSparse)
  875. /**
  876. *@brief Deserializes and concatenates SparseTensors from a serialized minibatch . \n
  877. *@par Inputs:
  878. *Two inputs, including:
  879. * serialized_sparse: A 2D Tensor of type string.
  880. *The "N" serialized SparseTensor objects. Must have 3 columns . \n
  881. *@par Attributes:
  882. * dtype: An optional type. The type of the serialized SparseTensor objects . \n
  883. *@par Outputs:
  884. * @li indices: A Tensor of type int64.
  885. * @li values: A Tensor of type "dtype".
  886. * @li shape: A Tensor of type int64 . \n
  887. *@par Third-party framework compatibility
  888. * Compatible with the TensorFlow operator DeserializeManySparse.
  889. */
  890. REG_OP(DeserializeManySparse)
  891. .INPUT(serialized_sparse, TensorType({DT_STRING}))
  892. .OUTPUT(indices, TensorType({DT_INT64}))
  893. .OUTPUT(values, TensorType({DT_BOOL, DT_INT8, DT_UINT8, DT_INT16, \
  894. DT_UINT16, DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT, DT_FLOAT16, \
  895. DT_COMPLEX64, DT_COMPLEX128, DT_RESOURCE, DT_STRING}))
  896. .OUTPUT(shape, TensorType({DT_INT64}))
  897. .REQUIRED_ATTR(dtype, Type)
  898. .OP_END_FACTORY_REG(DeserializeManySparse)
  899. } // namespace ge
  900. #endif // OPS_BUILT_IN_OP_PROTO_INC_SPARSE_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示