You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

data_flow_ops.h 23 kB

5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_OP_DATA_FLOW_OPS_H_
  17. #define GE_OP_DATA_FLOW_OPS_H_
  18. #include <algorithm>
  19. #include "graph/operator_reg.h"
  20. namespace ge {
  21. REG_OP(QueueIsClosed)
  22. .INPUT(handle, TensorType({DT_RESOURCE}))
  23. .OUTPUT(is_closed, TensorType({DT_BOOL}))
  24. .OP_END_FACTORY_REG(QueueIsClosed)
  25. REG_OP(QueueSize)
  26. .INPUT(handle, TensorType({DT_RESOURCE}))
  27. .OUTPUT(size, TensorType({DT_INT32}))
  28. .OP_END_FACTORY_REG(QueueSize)
  29. REG_OP(FIFOQueue)
  30. .OUTPUT(handle, TensorType({DT_RESOURCE}))
  31. .REQUIRED_ATTR(component_types, ListType)
  32. .ATTR(shapes, ListListInt, {})
  33. .ATTR(capacity, Int, -1)
  34. .ATTR(container, String, "")
  35. .ATTR(shared_name, String, "")
  36. .OP_END_FACTORY_REG(FIFOQueue)
  37. REG_OP(QueueEnqueue)
  38. .INPUT(handle, TensorType({DT_RESOURCE}))
  39. .DYNAMIC_INPUT(components, TensorType({DT_INT8, DT_UINT8, \
  40. DT_INT16, DT_UINT16, DT_INT32, DT_INT64, DT_UINT32, \
  41. DT_UINT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_BOOL}))
  42. .ATTR(timeout_ms, Int, -1)
  43. .OP_END_FACTORY_REG(QueueEnqueue)
  44. REG_OP(QueueEnqueueMany)
  45. .INPUT(handle, TensorType({DT_RESOURCE}))
  46. .DYNAMIC_INPUT(components, TensorType({DT_INT8, DT_UINT8, DT_INT16, \
  47. DT_UINT16, DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, \
  48. DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_BOOL}))
  49. .ATTR(timeout_ms, Int, -1)
  50. .OP_END_FACTORY_REG(QueueEnqueueMany)
  51. REG_OP(QueueDequeue)
  52. .INPUT(handle, TensorType({DT_RESOURCE}))
  53. .DYNAMIC_OUTPUT(components, TensorType({DT_INT8, DT_UINT8, DT_INT16, \
  54. DT_UINT16, DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, \
  55. DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_BOOL}))
  56. .ATTR(timeout_ms, Int, -1)
  57. .REQUIRED_ATTR(component_types, ListType)
  58. .OP_END_FACTORY_REG(QueueDequeue)
  59. REG_OP(QueueDequeueMany)
  60. .INPUT(handle, TensorType({DT_RESOURCE}))
  61. .INPUT(n, TensorType({DT_INT32}))
  62. .DYNAMIC_OUTPUT(components, TensorType({DT_INT8, DT_UINT8, \
  63. DT_INT16, DT_UINT16, DT_INT32, DT_INT64, DT_UINT32, DT_UINT64, \
  64. DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_BOOL}))
  65. .ATTR(timeout_ms, Int, -1)
  66. .REQUIRED_ATTR(component_types, ListType)
  67. .OP_END_FACTORY_REG(QueueDequeueMany)
  68. REG_OP(QueueDequeueUpTo)
  69. .INPUT(handle, TensorType({DT_RESOURCE}))
  70. .INPUT(n, TensorType({DT_INT32}))
  71. .DYNAMIC_OUTPUT(components, TensorType({DT_INT8, DT_UINT8, \
  72. DT_INT16, DT_UINT16, DT_INT32, DT_INT64, DT_UINT32, \
  73. DT_UINT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_BOOL}))
  74. .ATTR(timeout_ms, Int, -1)
  75. .REQUIRED_ATTR(component_types, ListType)
  76. .OP_END_FACTORY_REG(QueueDequeueUpTo)
  77. REG_OP(Stage)
  78. .DYNAMIC_INPUT(values, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, \
  79. DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, \
  80. DT_DOUBLE, DT_UINT32, DT_UINT64}))
  81. .ATTR(capacity, Int, 0)
  82. .ATTR(memory_limit, Int, 0)
  83. .ATTR(container, String, "")
  84. .ATTR(shared_name, String, "")
  85. .OP_END_FACTORY_REG(Stage)
  86. REG_OP(StageClear)
  87. .ATTR(capacity, Int, 0)
  88. .ATTR(memory_limit, Int, 0)
  89. .ATTR(container, String, "")
  90. .ATTR(shared_name, String, "")
  91. .ATTR(dtypes, ListType, {})
  92. .OP_END_FACTORY_REG(StageClear)
  93. REG_OP(StagePeek)
  94. .INPUT(index, TensorType({DT_INT32}))
  95. .DYNAMIC_OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT16, \
  96. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, \
  97. DT_DOUBLE, DT_UINT32, DT_UINT64}))
  98. .ATTR(capacity, Int, 0)
  99. .ATTR(memory_limit, Int, 0)
  100. .ATTR(container, String, "")
  101. .ATTR(shared_name, String, "")
  102. .ATTR(dtypes, ListType, {})
  103. .OP_END_FACTORY_REG(StagePeek)
  104. REG_OP(StageSize)
  105. .OUTPUT(size, TensorType({DT_INT32}))
  106. .ATTR(capacity, Int, 0)
  107. .ATTR(memory_limit, Int, 0)
  108. .ATTR(container, String, "")
  109. .ATTR(shared_name, String, "")
  110. .ATTR(dtypes, ListType, {})
  111. .OP_END_FACTORY_REG(StageSize)
  112. REG_OP(StackPop)
  113. .INPUT(handle, TensorType({DT_RESOURCE}))
  114. .OUTPUT(element, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT16, \
  115. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, \
  116. DT_DOUBLE, DT_UINT32, DT_UNIT64}))
  117. .REQUIRED_ATTR(elem_type, Type)
  118. .OP_END_FACTORY_REG(StackPop)
  119. REG_OP(StackPush)
  120. .INPUT(handle, TensorType({DT_RESOURCE}))
  121. .INPUT(element, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT16, \
  122. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, \
  123. DT_DOUBLE, DT_UINT32, DT_UNIT64}))
  124. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT16, \
  125. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, \
  126. DT_DOUBLE, DT_UINT32, DT_UNIT64}))
  127. .ATTR(swap_memory, Bool, false)
  128. .OP_END_FACTORY_REG(StackPush)
  129. REG_OP(StackClose)
  130. .INPUT(handle, TensorType({DT_RESOURCE}))
  131. .OP_END_FACTORY_REG(StackClose)
  132. REG_OP(Stack)
  133. .INPUT(max_size, TensorType({DT_INT32}))
  134. .OUTPUT(handle, TensorType({DT_RESOURCE}))
  135. .ATTR(stack_name, String, "")
  136. .REQUIRED_ATTR(elem_type, Type)
  137. .OP_END_FACTORY_REG(Stack)
  138. REG_OP(DynamicPartition)
  139. .INPUT(x, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  140. DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  141. .INPUT(partitions, TensorType({DT_INT32}))
  142. .DYNAMIC_OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  143. DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  144. .ATTR(num_partitions, Int, 1)
  145. .OP_END_FACTORY_REG(DynamicPartition)
  146. REG_OP(DynamicStitch)
  147. .DYNAMIC_INPUT(indices, TensorType({DT_INT32}))
  148. .DYNAMIC_INPUT(x, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  149. DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  150. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  151. DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  152. .ATTR(N, Int, 1)
  153. .OP_END_FACTORY_REG(DynamicStitch)
  154. REG_OP(ParallelDynamicStitch)
  155. .DYNAMIC_INPUT(indices, TensorType({DT_INT32}))
  156. .DYNAMIC_INPUT(x, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  157. DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  158. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  159. DT_INT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  160. .ATTR(N, Int, 1)
  161. .OP_END_FACTORY_REG(ParallelDynamicStitch)
  162. REG_OP(MapClear)
  163. .ATTR(capacity, Int, 0)
  164. .ATTR(memory_limit, Int, 0)
  165. .ATTR(dtypes, ListType, {})
  166. .ATTR(container, String, "")
  167. .ATTR(shared_name, String, "")
  168. .OP_END_FACTORY_REG(MapClear)
  169. REG_OP(MapIncompleteSize)
  170. .OUTPUT(size, TensorType({DT_INT32}))
  171. .ATTR(capacity, Int, 0)
  172. .ATTR(memory_limit, Int, 0)
  173. .ATTR(dtypes, ListType, {})
  174. .ATTR(container, String, "")
  175. .ATTR(shared_name, String, "")
  176. .OP_END_FACTORY_REG(MapIncompleteSize)
  177. REG_OP(Unstage)
  178. .DYNAMIC_OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_INT16, \
  179. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, \
  180. DT_DOUBLE, DT_UINT32, DT_UINT64}))
  181. .ATTR(capacity, Int, 0)
  182. .ATTR(memory_limit, Int, 0)
  183. .ATTR(container, String, "")
  184. .ATTR(shared_name, String, "")
  185. .REQUIRED_ATTR(dtypes, ListType)
  186. .OP_END_FACTORY_REG(Unstage)
  187. REG_OP(MapStage)
  188. .INPUT(key, TensorType({DT_INT64}))
  189. .INPUT(indices, TensorType({DT_INT32}))
  190. .DYNAMIC_INPUT(values,
  191. TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  192. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  193. DT_UINT32, DT_UINT64}))
  194. .ATTR(capacity, Int, 0)
  195. .ATTR(memory_limit, Int, 0)
  196. .ATTR(dtypes, ListType, {})
  197. .ATTR(container, String, "")
  198. .ATTR(shared_name, String, "")
  199. .OP_END_FACTORY_REG(MapStage)
  200. REG_OP(MapUnstage)
  201. .INPUT(key, TensorType({DT_INT64}))
  202. .INPUT(indices, TensorType({DT_INT32}))
  203. .DYNAMIC_OUTPUT(values,
  204. TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  205. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  206. DT_UINT32, DT_UINT64}))
  207. .ATTR(capacity, Int, 0)
  208. .ATTR(memory_limit, Int, 0)
  209. .ATTR(dtypes, ListType, {})
  210. .ATTR(container, String, "")
  211. .ATTR(shared_name, String, "")
  212. .OP_END_FACTORY_REG(MapUnstage)
  213. REG_OP(MapUnstageNoKey)
  214. .INPUT(indices, TensorType({DT_INT32}))
  215. .OUTPUT(key, TensorType({DT_INT64}))
  216. .DYNAMIC_OUTPUT(values,
  217. TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  218. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  219. DT_UINT32, DT_UINT64}))
  220. .ATTR(capacity, Int, 0)
  221. .ATTR(memory_limit, Int, 0)
  222. .ATTR(dtypes, ListType, {})
  223. .ATTR(container, String, "")
  224. .ATTR(shared_name, String, "")
  225. .OP_END_FACTORY_REG(MapUnstageNoKey)
  226. REG_OP(MapPeek)
  227. .INPUT(key, TensorType({DT_INT64}))
  228. .INPUT(indices, TensorType({DT_INT32}))
  229. .DYNAMIC_OUTPUT(values,
  230. TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  231. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  232. DT_UINT32, DT_UINT64}))
  233. .ATTR(capacity, Int, 0)
  234. .ATTR(memory_limit, Int, 0)
  235. .ATTR(dtypes, ListType, {})
  236. .ATTR(container, String, "")
  237. .ATTR(shared_name, String, "")
  238. .OP_END_FACTORY_REG(MapPeek)
  239. REG_OP(MapSize)
  240. .OUTPUT(size, TensorType({DT_INT32}))
  241. .ATTR(capacity, Int, 0)
  242. .ATTR(memory_limit, Int, 0)
  243. .ATTR(dtypes, ListType, {})
  244. .ATTR(container, String, "")
  245. .ATTR(shared_name, String, "")
  246. .OP_END_FACTORY_REG(MapSize)
  247. REG_OP(TensorArray)
  248. .INPUT(size, TensorType({DT_INT32}))
  249. .OUTPUT(handle, TensorType({DT_RESOURCE}))
  250. .OUTPUT(flow, TensorType({DT_FLOAT}))
  251. .REQUIRED_ATTR(dtype, Type)
  252. .ATTR(element_shape, ListInt, ge::UNKNOWN_SHAPE)
  253. .ATTR(dynamic_size, Bool, false)
  254. .ATTR(clear_after_read, Bool, true)
  255. .ATTR(identical_element_shapes, Bool, false)
  256. .ATTR(tensor_array_name, String, "")
  257. .OP_END_FACTORY_REG(TensorArray)
  258. REG_OP(TensorArrayClose)
  259. .INPUT(handle, TensorType({DT_RESOURCE}))
  260. .OP_END_FACTORY_REG(TensorArrayClose)
  261. REG_OP(TensorArrayConcat)
  262. .INPUT(handle, TensorType({DT_RESOURCE}))
  263. .INPUT(flow_in, TensorType({DT_FLOAT}))
  264. .OUTPUT(value, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_INT8, \
  265. DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL}))
  266. .OUTPUT(lengths, TensorType({DT_INT64}))
  267. .REQUIRED_ATTR(dtype, Type)
  268. .ATTR(element_shape_except0, ListInt, ge::UNKNOWN_SHAPE)
  269. .OP_END_FACTORY_REG(TensorArrayConcat)
  270. REG_OP(TensorArrayGather)
  271. .INPUT(handle, TensorType({DT_RESOURCE}))
  272. .INPUT(indices, TensorType({DT_INT32}))
  273. .INPUT(flow_in, TensorType({DT_FLOAT}))
  274. .OUTPUT(value, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_INT8, \
  275. DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL}))
  276. .REQUIRED_ATTR(dtype, Type)
  277. .ATTR(element_shape, ListInt, ge::UNKNOWN_SHAPE)
  278. .OP_END_FACTORY_REG(TensorArrayGather)
  279. REG_OP(TensorArrayGrad)
  280. .INPUT(handle, TensorType({DT_RESOURCE}))
  281. .INPUT(flow_in, TensorType({DT_FLOAT}))
  282. .OUTPUT(grad_handle, TensorType({DT_RESOURCE}))
  283. .OUTPUT(flow_out, TensorType({DT_FLOAT}))
  284. .REQUIRED_ATTR(source, String)
  285. .OP_END_FACTORY_REG(TensorArrayGrad)
  286. REG_OP(TensorArrayWrite)
  287. .INPUT(handle, TensorType({DT_RESOURCE}))
  288. .INPUT(index, TensorType({DT_INT32}))
  289. .INPUT(value, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_INT8, \
  290. DT_INT16, DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL}))
  291. .INPUT(flow_in, TensorType({DT_FLOAT}))
  292. .OUTPUT(flow_out, TensorType({DT_FLOAT}))
  293. .OP_END_FACTORY_REG(TensorArrayWrite)
  294. REG_OP(TensorArrayGradWithShape)
  295. .INPUT(handle, TensorType({ DT_RESOURCE }))
  296. .INPUT(flow_in, TensorType({ DT_FLOAT }))
  297. .INPUT(shape_to_prepend, TensorType({ DT_INT32 }))
  298. .OUTPUT(grad_handle, TensorType({ DT_RESOURCE }))
  299. .OUTPUT(flow_out, TensorType({ DT_FLOAT }))
  300. .ATTR(source, String, "")
  301. .OP_END_FACTORY_REG(TensorArrayGradWithShape)
  302. REG_OP(TensorArrayRead)
  303. .INPUT(handle, TensorType({ DT_RESOURCE }))
  304. .INPUT(index, TensorType({ DT_INT32 }))
  305. .INPUT(flow_in, TensorType({ DT_FLOAT }))
  306. .OUTPUT(y, TensorType({ DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  307. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE }))
  308. .REQUIRED_ATTR(dtype, Type)
  309. .OP_END_FACTORY_REG(TensorArrayRead)
  310. REG_OP(TensorArrayScatter)
  311. .INPUT(handle, TensorType({ DT_RESOURCE }))
  312. .INPUT(indices, TensorType({ DT_INT32 }))
  313. .INPUT(value, TensorType({ DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  314. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE }))
  315. .INPUT(flow_in, TensorType({ DT_FLOAT }))
  316. .OUTPUT(flow_out, TensorType({ DT_FLOAT }))
  317. .OP_END_FACTORY_REG(TensorArrayScatter)
  318. REG_OP(TensorArraySplit)
  319. .INPUT(handle, TensorType({ DT_RESOURCE }))
  320. .INPUT(value, TensorType({ DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, \
  321. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE }))
  322. .INPUT(lengths, TensorType({ DT_INT64 }))
  323. .INPUT(flow_in, TensorType({ DT_FLOAT }))
  324. .OUTPUT(flow_out, TensorType({ DT_FLOAT }))
  325. .OP_END_FACTORY_REG(TensorArraySplit)
  326. REG_OP(TensorArraySize)
  327. .INPUT(handle, TensorType({ DT_RESOURCE }))
  328. .INPUT(flow_in, TensorType({ DT_FLOAT }))
  329. .OUTPUT(size, TensorType({ DT_INT32 }))
  330. .OP_END_FACTORY_REG(TensorArraySize)
  331. REG_OP(RandomShuffleQueue)
  332. .OUTPUT(handle, TensorType({DT_RESOURCE}))
  333. .REQUIRED_ATTR(component_types, ListType)
  334. .ATTR(shapes, ListListInt, {})
  335. .ATTR(capacity, Int, -1)
  336. .ATTR(min_after_dequeue, Int, 0)
  337. .ATTR(seed, Int, 0)
  338. .ATTR(seed2, Int, 0)
  339. .ATTR(container, String, "")
  340. .ATTR(shared_name, String, "")
  341. .OP_END_FACTORY_REG(RandomShuffleQueue)
  342. REG_OP(PaddingFIFOQueue)
  343. .OUTPUT(handle, TensorType({DT_RESOURCE}))
  344. .REQUIRED_ATTR(component_types, ListType)
  345. .ATTR(shapes, ListListInt, {})
  346. .ATTR(capacity, Int, -1)
  347. .ATTR(container, String, "")
  348. .ATTR(shared_name, String, "")
  349. .OP_END_FACTORY_REG(PaddingFIFOQueue)
  350. REG_OP(PriorityQueue)
  351. .OUTPUT(handle, TensorType({DT_RESOURCE}))
  352. .ATTR(component_types, ListType, {})
  353. .ATTR(shapes, ListListInt, {})
  354. .ATTR(capacity, Int, -1)
  355. .ATTR(container, String, "")
  356. .ATTR(shared_name, String, "")
  357. .OP_END_FACTORY_REG(PriorityQueue)
  358. REG_OP(QueueClose)
  359. .INPUT(handle, TensorType({DT_RESOURCE}))
  360. .ATTR(cancel_pending_enqueues, Bool, false)
  361. .OP_END_FACTORY_REG(QueueClose)
  362. REG_OP(OrderedMapStage)
  363. .INPUT(key, TensorType({DT_INT64}))
  364. .INPUT(indices, TensorType({DT_INT32}))
  365. .DYNAMIC_INPUT(values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16,
  366. DT_INT32, DT_INT64, DT_FLOAT, DT_FLOAT16,
  367. DT_DOUBLE, DT_BOOL, DT_UINT32, DT_UINT64}))
  368. .ATTR(capacity, Int, 0)
  369. .ATTR(memory_limit, Int, 0)
  370. .ATTR(dtypes, ListType, {})
  371. .ATTR(container, String, "")
  372. .ATTR(shared_name, String, "")
  373. .OP_END_FACTORY_REG(OrderedMapStage)
  374. REG_OP(OrderedMapSize)
  375. .OUTPUT(size, TensorType({DT_INT32}))
  376. .ATTR(capacity, Int, 0)
  377. .ATTR(memory_limit, Int, 0)
  378. .ATTR(dtypes, ListType, {})
  379. .ATTR(container, String, "")
  380. .ATTR(shared_name, String, "")
  381. .OP_END_FACTORY_REG(OrderedMapSize)
  382. REG_OP(OrderedMapClear)
  383. .ATTR(capacity, Int, 0)
  384. .ATTR(memory_limit, Int, 0)
  385. .ATTR(dtypes, ListType, {})
  386. .ATTR(container, String, "")
  387. .ATTR(shared_name, String, "")
  388. .OP_END_FACTORY_REG(OrderedMapClear)
  389. REG_OP(OrderedMapIncompleteSize)
  390. .OUTPUT(size, TensorType({DT_INT32}))
  391. .ATTR(capacity, Int, 0)
  392. .ATTR(memory_limit, Int, 0)
  393. .ATTR(dtypes, ListType, {})
  394. .ATTR(container, String, "")
  395. .ATTR(shared_name, String, "")
  396. .OP_END_FACTORY_REG(OrderedMapIncompleteSize)
  397. REG_OP(OrderedMapPeek)
  398. .INPUT(key, TensorType({DT_INT64}))
  399. .INPUT(indices, TensorType({DT_INT32}))
  400. .DYNAMIC_OUTPUT(values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16,
  401. DT_INT32, DT_INT64, DT_FLOAT, DT_FLOAT16,
  402. DT_DOUBLE, DT_BOOL, DT_UINT32, DT_UINT64}))
  403. .ATTR(capacity, Int, 0)
  404. .ATTR(memory_limit, Int, 0)
  405. .ATTR(dtypes, ListType, {})
  406. .ATTR(container, String, "")
  407. .ATTR(shared_name, String, "")
  408. .OP_END_FACTORY_REG(OrderedMapPeek)
  409. REG_OP(OrderedMapUnstageNoKey)
  410. .INPUT(indices, TensorType({DT_INT32}))
  411. .OUTPUT(key, TensorType({DT_INT64}))
  412. .DYNAMIC_OUTPUT(values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16,
  413. DT_INT32, DT_INT64, DT_FLOAT, DT_FLOAT16,
  414. DT_DOUBLE, DT_BOOL, DT_UINT32, DT_UINT64}))
  415. .ATTR(capacity, Int, 0)
  416. .ATTR(memory_limit, Int, 0)
  417. .ATTR(dtypes, ListType, {})
  418. .ATTR(container, String, "")
  419. .ATTR(shared_name, String, "")
  420. .OP_END_FACTORY_REG(OrderedMapUnstageNoKey)
  421. REG_OP(OrderedMapUnstage)
  422. .INPUT(key, TensorType({DT_INT64}))
  423. .INPUT(indices, TensorType({DT_INT32}))
  424. .DYNAMIC_OUTPUT(values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16,
  425. DT_INT32, DT_INT64, DT_FLOAT, DT_FLOAT16,
  426. DT_DOUBLE, DT_BOOL, DT_UINT32, DT_UINT64}))
  427. .ATTR(capacity, Int, 0)
  428. .ATTR(memory_limit, Int, 0)
  429. .ATTR(dtypes, ListType, {})
  430. .ATTR(container, String, "")
  431. .ATTR(shared_name, String, "")
  432. .OP_END_FACTORY_REG(OrderedMapUnstage)
  433. REG_OP(Barrier)
  434. .OUTPUT(handle, TensorType({DT_STRING_REF}))
  435. .REQUIRED_ATTR(component_types, ListType)
  436. .ATTR(shapes, ListListInt, {})
  437. .ATTR(capacity, Int, -1)
  438. .ATTR(container, String, "")
  439. .ATTR(shared_name, String, "")
  440. .OP_END_FACTORY_REG(Barrier)
  441. REG_OP(BarrierInsertMany)
  442. .INPUT(handle, TensorType({DT_STRING_REF}))
  443. .INPUT(keys, TensorType({DT_STRING}))
  444. .INPUT(values,
  445. TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  446. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  447. DT_UINT32, DT_UINT64}))
  448. .REQUIRED_ATTR(component_index, Int)
  449. .OP_END_FACTORY_REG(BarrierInsertMany)
  450. REG_OP(BarrierTakeMany)
  451. .INPUT(handle, TensorType({DT_STRING_REF}))
  452. .INPUT(num_elements, TensorType(DT_INT32))
  453. .OUTPUT(indices, TensorType({DT_INT64}))
  454. .OUTPUT(keys, TensorType({DT_STRING}))
  455. .DYNAMIC_OUTPUT(values,
  456. TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16, \
  457. DT_UINT8, DT_INT32, DT_INT64, DT_BOOL, DT_DOUBLE, \
  458. DT_UINT32, DT_UINT64}))
  459. .REQUIRED_ATTR(component_types, ListType)
  460. .ATTR(allow_small_batch, Bool, false)
  461. .ATTR(wait_for_incomplete, Bool, false)
  462. .ATTR(timeout_ms, Int, -1)
  463. .OP_END_FACTORY_REG(BarrierTakeMany)
  464. REG_OP(BarrierClose)
  465. .INPUT(handle, TensorType({DT_STRING_REF}))
  466. .ATTR(cancel_pending_enqueues, Bool, false)
  467. .OP_END_FACTORY_REG(BarrierClose)
  468. REG_OP(BarrierReadySize)
  469. .INPUT(handle, TensorType({DT_STRING_REF}))
  470. .OUTPUT(size, TensorType(DT_INT32))
  471. .OP_END_FACTORY_REG(BarrierReadySize)
  472. REG_OP(BarrierIncompleteSize)
  473. .INPUT(handle, TensorType({DT_STRING_REF}))
  474. .OUTPUT(size, TensorType(DT_INT32))
  475. .OP_END_FACTORY_REG(BarrierIncompleteSize)
  476. REG_OP(RecordInput)
  477. .OUTPUT(records, TensorType({DT_STRING}))
  478. .REQUIRED_ATTR(file_pattern, String)
  479. .ATTR(file_random_seed, Int, 301)
  480. .ATTR(file_shuffle_shift_ratio, Float, 0)
  481. .ATTR(file_buffer_size, Int, 10000)
  482. .ATTR(file_parallelism, Int, 16)
  483. .ATTR(batch_size, Int, 32)
  484. .ATTR(compression_type, String, "")
  485. .OP_END_FACTORY_REG(RecordInput)
  486. REG_OP(ConditionalAccumulator)
  487. .OUTPUT(handle, TensorType({DT_STRING_REF}))
  488. .REQUIRED_ATTR(dtype, Type)
  489. .REQUIRED_ATTR(shape, ListInt)
  490. .ATTR(container, String, "")
  491. .ATTR(shared_name, String, "")
  492. .ATTR(reduction_type, String, "MEAN")
  493. .OP_END_FACTORY_REG(ConditionalAccumulator)
  494. REG_OP(AccumulatorApplyGradient)
  495. .INPUT(handle, TensorType({DT_STRING_REF}))
  496. .INPUT(local_step, TensorType({DT_INT64}))
  497. .INPUT(gradient, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  498. DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT}))
  499. .REQUIRED_ATTR(dtype, Type)
  500. .OP_END_FACTORY_REG(AccumulatorApplyGradient)
  501. REG_OP(AccumulatorNumAccumulated)
  502. .INPUT(handle, TensorType({DT_STRING_REF}))
  503. .OUTPUT(y, TensorType({DT_INT32}))
  504. .OP_END_FACTORY_REG(AccumulatorNumAccumulated)
  505. REG_OP(AccumulatorSetGlobalStep)
  506. .INPUT(handle, TensorType({DT_STRING_REF}))
  507. .INPUT(new_global_step, TensorType({DT_INT64}))
  508. .OP_END_FACTORY_REG(AccumulatorSetGlobalStep)
  509. REG_OP(AccumulatorTakeGradient)
  510. .INPUT(handle, TensorType({DT_STRING_REF}))
  511. .INPUT(num_required, TensorType({DT_INT32}))
  512. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, \
  513. DT_INT64, DT_DOUBLE, DT_FLOAT}))
  514. .REQUIRED_ATTR(dtype, Type)
  515. .OP_END_FACTORY_REG(AccumulatorTakeGradient)
  516. REG_OP(SparseConditionalAccumulator)
  517. .OUTPUT(handle, TensorType({DT_STRING_REF}))
  518. .REQUIRED_ATTR(shape, ListInt)
  519. .REQUIRED_ATTR(dtype, Type)
  520. .ATTR(container, String, "")
  521. .ATTR(shared_name, String, "")
  522. .ATTR(reduction_type, String, "MEAN")
  523. .OP_END_FACTORY_REG(SparseConditionalAccumulator)
  524. REG_OP(SparseAccumulatorApplyGradient)
  525. .INPUT(handle, TensorType({DT_STRING_REF}))
  526. .INPUT(local_step, TensorType({DT_INT64}))
  527. .INPUT(indices, TensorType({DT_INT64}))
  528. .INPUT(values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  529. DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT}))
  530. .INPUT(shape, TensorType({DT_INT64}))
  531. .REQUIRED_ATTR(has_known_shape, Bool)
  532. .REQUIRED_ATTR(dtype, Type)
  533. .OP_END_FACTORY_REG(SparseAccumulatorApplyGradient)
  534. REG_OP(SparseAccumulatorTakeGradient)
  535. .INPUT(handle, TensorType({DT_STRING_REF}))
  536. .INPUT(num_required, TensorType({DT_INT32}))
  537. .OUTPUT(indices, TensorType({DT_INT64}))
  538. .OUTPUT(values, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  539. DT_INT32, DT_INT64, DT_DOUBLE, DT_FLOAT}))
  540. .OUTPUT(shape, TensorType({DT_INT64}))
  541. .REQUIRED_ATTR(dtype, Type)
  542. .OP_END_FACTORY_REG(SparseAccumulatorTakeGradient)
  543. } // namespace ge
  544. #endif // GE_OP_DATA_FLOW_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示