You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

experiment_ops.h 19 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493
  1. /**
  2. * Copyright (c) Huawei Technologies Co., Ltd. 2020-2021. All rights reserved.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file experiment_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_EXPERIMENT_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_EXPERIMENT_OPS_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. * @brief Updates "var" according to the AdamW algorithm.
  26. *
  27. * @attention Constraints:
  28. * The input tensors must have the same shape.*
  29. *
  30. * @par Inputs:
  31. * @li var: A mutable Tensor of the type TensorType::NumberType().
  32. * Should be from a Variable().
  33. * @li m: A mutable Tensor of the same type as "var".
  34. * Should be from a Variable().
  35. * @li v: A mutable Tensor of the same type as "var".
  36. * Should be from a Variable().
  37. * @li beta1_power: A scalar of the same type as "var".
  38. * @li beta2_power: A scalar of the same type as "var".
  39. * @li lr: learning_rate. A scalar of the same type as "var".
  40. * @li weight_decay: learning_rate. A scalar of the same type as "var".
  41. * @li beta1: A scalar of the same type as "var".
  42. * @li beta2: A scalar of the same type as "var".
  43. * @li epsilon: A scalar of the same type as "var".
  44. * @li grad: A Tensor of the same type as "var", for the gradient.
  45. * @li max_grad_norm: A mutable Tensor of the same type as "var", an optional input.
  46. * Should be from a Variable().
  47. *
  48. * @par Attributes:
  49. * @li amsgrad: An optional bool. Defaults to "False".
  50. * If "True", max_grad_norm input and output must be entered.
  51. * @li maximize: An optional bool. Defaults to "False".
  52. *
  53. * @par Outputs:
  54. * @li var: A mutable tensor. Has the same type as input "var".
  55. * @li m: A mutable tensor. Has the same type as input "m".
  56. * @li v: A mutable tensor. Has the same type as input "v". \n
  57. */
  58. REG_OP(ApplyAdamW)
  59. .INPUT(var, TensorType::NumberType())
  60. .INPUT(m, TensorType::NumberType())
  61. .INPUT(v, TensorType::NumberType())
  62. .INPUT(beta1_power, TensorType::NumberType())
  63. .INPUT(beta2_power, TensorType::NumberType())
  64. .INPUT(lr, TensorType::NumberType())
  65. .INPUT(weight_decay, TensorType::NumberType())
  66. .INPUT(beta1, TensorType::NumberType())
  67. .INPUT(beta2, TensorType::NumberType())
  68. .INPUT(epsilon, TensorType::NumberType())
  69. .INPUT(grad, TensorType::NumberType())
  70. .OPTIONAL_INPUT(max_grad_norm, TensorType::NumberType())
  71. .OUTPUT(var, TensorType::NumberType())
  72. .OUTPUT(m, TensorType::NumberType())
  73. .OUTPUT(v, TensorType::NumberType())
  74. .ATTR(amsgrad, Bool, false)
  75. .ATTR(maximize, Bool, false)
  76. .OP_END_FACTORY_REG(ApplyAdamW)
  77. /**
  78. * @brief Calculate SQ distance. \n
  79. *
  80. * @par Inputs:
  81. * @li ivf: A Tensor, dtype is uint8.
  82. * @li query: A Tensor, dtype is float16 or float32.
  83. * @li bucket_list: A Tensor, dtype is int32 or int64.
  84. * @li bucket_limits: A Tensor, dtype is int32 or int64.
  85. * @li bucket_offsets: A Tensor, dtype is int32 or int64.
  86. * @li vmin: A Tensor, dtype is float16 or float32.
  87. * @li vdiff: A Tensor, dtype is float16 or float32. \n
  88. *
  89. * @par Outputs:
  90. * @li actual_count: A Tensor, dtype is int32 or int64, the actual number of sq_distance.
  91. * @li sq_distance: A Tensor, dtype is float16 or float32.
  92. * @li grouped_extreme_distance: A Tensor, dtype is float16 or float32, the extremum in each group of sq_distance.
  93. * @li sq_ivf: A Tensor, dtype is int32 or int64.
  94. * @li sq_index: A Tensor, dtype is int32 or int64. \n
  95. *
  96. * @par Attributes:
  97. * @li total_limit: A Int, indicates the max length of the output sq_distance.
  98. * @li group_size: A Int, indicates the group size of the extremum.
  99. * @li extreme_mode: A Int, indicates the type of extremum, 0 means minimum, and 1 means maximum. \n
  100. *
  101. */
  102. REG_OP(ScanSQCodes)
  103. .INPUT(ivf, TensorType({DT_UINT8}))
  104. .INPUT(query, TensorType({DT_FLOAT16, DT_FLOAT}))
  105. .INPUT(bucket_list, TensorType({DT_INT32, DT_INT64}))
  106. .INPUT(bucket_limits, TensorType({DT_INT32, DT_INT64}))
  107. .INPUT(bucket_offsets, TensorType({DT_INT32, DT_INT64}))
  108. .INPUT(vmin, TensorType({DT_FLOAT16, DT_FLOAT}))
  109. .INPUT(vdiff, TensorType({DT_FLOAT16, DT_FLOAT}))
  110. .OUTPUT(actual_count, TensorType({DT_INT32, DT_INT64}))
  111. .OUTPUT(sq_distance, TensorType({DT_FLOAT16, DT_FLOAT}))
  112. .OUTPUT(grouped_extreme_distance, TensorType({DT_FLOAT16, DT_FLOAT}))
  113. .OUTPUT(sq_ivf, TensorType({DT_INT32, DT_INT64}))
  114. .OUTPUT(sq_index, TensorType({DT_INT32, DT_INT64}))
  115. .REQUIRED_ATTR(total_limit, Int)
  116. .ATTR(group_size, Int, 64)
  117. .ATTR(extreme_mode, Int, 0)
  118. .OP_END_FACTORY_REG(ScanSQCodes)
  119. /**
  120. * @brief Multiplies matrix "a" by matrix "b", producing "a * b". \n
  121. * @par Inputs:
  122. * Four inputs, including:
  123. * @li x1: A matrix Tensor. Must be one of the following types: float32,
  124. * float16, int32, int8, int4, bf16. 3D. Has format ND.
  125. * @li x2: A matrix Tensor. Must be one of the following types: float32,
  126. * float16, int32, int8, int4, bf16. 3D. Has format ND.
  127. * @li bias: A optional Tensor. Must be one of the following types:
  128. * float32, float16, int32, bf16. 1D. Has format ND.
  129. * @li offset_w: A optional Tensor. Must be one of the following types:
  130. * int8, int4. Has format ND. \n
  131. * @par Attributes:
  132. * Three attributes, including:
  133. * @li perm_x1: A list int. "x1" is permuted to shape [B, M, K] before multiplication.
  134. * @li perm_x2: A list int. "x2" is permuted to shape [B, K, N] before multiplication.
  135. * @li perm_y: A list int. "y" is permuted after multiplication.
  136. * @li offset_x: An optional integer for quantized TransposeBatchMatMul.
  137. * The negative offset added to the input "x1" for int8, int4 type. Ensure offset_x
  138. * within the effective range of input data type. Defaults to "0". \n
  139. * @par Outputs:
  140. * y: The result matrix Tensor. 3D. Must be one of the following
  141. * types: float32, float16, int32, bf16. 3D. Has format ND. \n
  142. */
  143. REG_OP(TransposeBatchMatMul)
  144. .INPUT(x1, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8, DT_INT4, DT_BF16}))
  145. .INPUT(x2, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8, DT_INT4, DT_BF16}))
  146. .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_BF16}))
  147. .OPTIONAL_INPUT(offset_w, TensorType({DT_INT8, DT_INT4}))
  148. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_BF16}))
  149. .ATTR(perm_x1, ListInt, {})
  150. .ATTR(perm_x2, ListInt, {})
  151. .ATTR(perm_y, ListInt, {})
  152. .ATTR(offset_x, Int, 0)
  153. .OP_END_FACTORY_REG(TransposeBatchMatMul)
  154. /**
  155. * @brief Performs non-maximum suppression (NMS) on the rotated boxes according
  156. * to their intersection-over-union (IoU). Rotated NMS interatively removes lower
  157. * scoring rotated boxes which have an IoU greater than iou_threshold with
  158. * another (higher scoring) rotated box.
  159. * @par Inputs:
  160. * Three inputs, including:
  161. * @li boxes: A 2D Tensor of float16 or float32 with shape (N, 5). Rotated boxes to
  162. * perform NMS on. They are expected to be in (x1, y1, x2, y2, angle_degress) format.
  163. * @li scores: A 1D Tensor of float16 or float32 with shape (N). Scores for each one of
  164. * the rotated boxes.
  165. * @li labels: A 1D Tensor of int32 or int64 with shape (N). Labels for each one of
  166. * the rotated boxes.
  167. * @par Attributes:
  168. * iou_threshold: A required float attribute. Discards all overlapping rotated
  169. * boxes with IoU < iou_threshold.
  170. * @par Outputs:
  171. * Two outputs, including:
  172. * @li selected_detections: A 2D Tensor of float16 or float32 with shape (N, 5).
  173. * The selected boxes that kept by Rotated NMS, sorted in decreasing order of scores.
  174. * @li keep_indices: A 1D Tensor of int32 or int64 with shape (N). The indices of
  175. * selected_detections.
  176. * @attention Constraints:
  177. * Currently, the tensor type of input (boxes, scores) only support float.
  178. * The tensor type of keep_indices only support int32.
  179. */
  180. REG_OP(RotatedNMS)
  181. .INPUT(boxes, TensorType({DT_FLOAT16, DT_FLOAT}))
  182. .INPUT(scores, TensorType({DT_FLOAT16, DT_FLOAT}))
  183. .INPUT(labels, TensorType({DT_INT32, DT_INT64}))
  184. .OUTPUT(selected_detections, TensorType({DT_FLOAT16, DT_FLOAT}))
  185. .OUTPUT(keep_indices, TensorType({DT_INT32, DT_INT64}))
  186. .REQUIRED_ATTR(iou_threshold, Float)
  187. .OP_END_FACTORY_REG(RotatedNMS)
  188. /**
  189. * @brief According to the indices, return the value.
  190. * @par Inputs:
  191. * Four inputs, including:
  192. * @li x: A ND Tensor.
  193. * @li indexed_sizes: A 1D Tensor of int64 with shape (N). Sizes for each one of the indexed data.
  194. * @li indexed_strides: A 1D Tensor of int64 with shape (N). Strides for each one of the indexed data.
  195. * @li indices: Dynamic input. A ND Tensor of int64. return the value according to the indices.
  196. * @par Outputs:
  197. * y: The indexed output tensor. Has the same type and format as input "x".
  198. */
  199. REG_OP(Index)
  200. .INPUT(x, TensorType::BasicType())
  201. .INPUT(indexed_sizes, TensorType({DT_INT64}))
  202. .INPUT(indexed_strides, TensorType({DT_INT64}))
  203. .DYNAMIC_INPUT(indices, TensorType({DT_INT64}))
  204. .OUTPUT(y, TensorType::BasicType())
  205. .OP_END_FACTORY_REG(Index)
  206. /**
  207. * @brief According to the index number of indexes, replace the value
  208. * corresponding to X with the value.
  209. * @par Inputs:
  210. * Five inputs, including:
  211. * @li x: A ND Tensor.
  212. * @li value: A Tensor of the same type as "x".
  213. * @li indexed_sizes: A 1D Tensor of int64 with shape (N). Sizes for each one of the indexed data.
  214. * @li indexed_strides: A 1D Tensor of int64 with shape (N). Strides for each one of the indexed data.
  215. * @li indices: Dynamic input. A Tensor of the indices.
  216. * @par Attributes:
  217. * @li accumulate: Does it support self accumulation. Defaults to false.
  218. * @par Outputs:
  219. * @li x: A Tensor.
  220. * @par Third-party framework compatibility
  221. * Compatible with the Pytorch operator index_put.
  222. * @par Restrictions:
  223. * Warning:THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  224. */
  225. REG_OP(IndexPutV2)
  226. .INPUT(x, TensorType::BasicType())
  227. .INPUT(value, TensorType::BasicType())
  228. .INPUT(indexed_sizes, TensorType({DT_INT64}))
  229. .INPUT(indexed_strides, TensorType({DT_INT64}))
  230. .DYNAMIC_INPUT(indices, TensorType({DT_INT64}))
  231. .OUTPUT(x, TensorType::BasicType())
  232. .ATTR(accumulate, Bool, false)
  233. .OP_END_FACTORY_REG(IndexPutV2)
  234. /**
  235. * @brief Performs average pooling on the input. Used in the combination of conv + avgpoolupdate to replace avgpool
  236. * @par Inputs:
  237. * x1: Output of upstream Conv2d. A tensor of type float16, float32.
  238. * x2: Input feature map of upstream Conv2d. A tensor of type int8, float16, float32.
  239. * @par Attributes:
  240. * @li ksize: A required list of 4 ints, specifying the size (N, C, H, and W) of the sliding window,
  241. * where N = C = 1, and H and W are positive integers within the range [1, 255].
  242. * @li strides: A required list of 4 ints, specifying the stride of the sliding window.
  243. * The strides of the N and C dimensions are 1.
  244. * The strides of the H and W dimensions are positive integers within the range [1, 63].
  245. * @li padding_mode: A required string, specifying the padding algorithm,
  246. * either "VALID", "SAME" and "CALCULATED".
  247. * With "SAME" means that the outputs will have the same spatial dimensions as its inputs.
  248. * With "VALID" means no padding.
  249. * @li pads: Pad value when padding_mode is "CALCULATED".
  250. * @li data_format: An optional string, specifying the data format of "ksize" and "strides",
  251. * either "NCHW", or "NHWC" (default).
  252. * @li ceil_mode: Use ceil or floor to calculate the output size when padding_mode is "CALCULATED".
  253. * @li exclusive: Ignore padding area or not when calculating average.
  254. * @par Outputs:
  255. * y: The average pooled output tensor. Has the same type and format as input "x1".
  256. * @attention Constraints:
  257. * @li Only single input and single output are supported.
  258. * @li "ksize_H" and "ksize_W" are positive integers within the range [1, 255]. ksize_H * ksize_W < 256
  259. * @li Due to instruction restrictions,
  260. * the values of "strides_h" and "strides_w" are positive integers within the range [1, 63].
  261. * @par Third-party framework compatibility
  262. * Compatible with the TensorFlow/Pytorch/Onnx operator AvgPoolV2.
  263. */
  264. REG_OP(AvgPoolUpdate)
  265. .INPUT(x1, TensorType({DT_FLOAT16, DT_FLOAT}))
  266. .INPUT(x2, TensorType({DA_INT4, DT_INT8, DT_FLOAT16, DT_FLOAT}))
  267. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  268. .REQUIRED_ATTR(ksize, ListInt)
  269. .REQUIRED_ATTR(strides, ListInt)
  270. .ATTR(padding_mode, String, "CALCULATED")
  271. .ATTR(pads, ListInt, {0, 0, 0, 0})
  272. .ATTR(data_format, String, "NHWC")
  273. .ATTR(ceil_mode, Bool, false)
  274. .ATTR(exclusive, Bool, true)
  275. .OP_END_FACTORY_REG(AvgPoolUpdate)
  276. /**
  277. * @brief batch input by time
  278. * @par Inputs:
  279. * x: A list of input tensors. It's a dynamic input
  280. * @par Attributes:
  281. * @li window: time window, [-1, int64_max], if -1 will batch by input data flag,
  282. * else will batch by input timestamp and data flag.
  283. * @li batch_dim: [-1, input_shape_range), if -1 input shape:[x, ..., x] ---> output shape:[-1, x, ..., x],
  284. * else output shape:[x, ..., -1(batch_dim), ..., x];
  285. * @li drop_remainder: a bool flag, take effect when window > -1,
  286. * if true when batch data window < window, will drop data.
  287. * @par Outputs:
  288. * y: A list of output tensors. It's a dynamic input, the same size as "x".
  289. * @attention Constraints:
  290. * @li Only support in helper udf
  291. */
  292. REG_OP(TimeBatch)
  293. .DYNAMIC_INPUT(x, TensorType::RealNumberType())
  294. .DYNAMIC_OUTPUT(y, TensorType::RealNumberType())
  295. .REQUIRED_ATTR(window, Int)
  296. .ATTR(batch_dim, Int, -1)
  297. .ATTR(drop_remainder, Bool, false)
  298. .OP_END_FACTORY_REG(TimeBatch)
  299. /**
  300. * @brief Auto Batch process. \n
  301. * @par Inputs:
  302. * @li x: A list of input tensor objects. It's a dynamic input. \n
  303. * @par Outputs:
  304. * @li y: A list of output tensor objects. It's a dynamic output. \n
  305. * @par Attributes:
  306. * @li batch_size: auto batch size.
  307. * @li timeout: auto batch wait timeout(unit:ms).
  308. * @li padding: weather to pad when batch is insufficient.
  309. * @li slide_stride: sliding window step.
  310. */
  311. REG_OP(AutoBatch)
  312. .DYNAMIC_INPUT(x, TensorType::RealNumberType())
  313. .DYNAMIC_OUTPUT(y, TensorType::RealNumberType())
  314. .REQUIRED_ATTR(batch_size, Int)
  315. .ATTR(timeout, Int, 0)
  316. .ATTR(padding, Bool, false)
  317. .ATTR(slide_stride, Int, 0)
  318. .OP_END_FACTORY_REG(AutoBatch)
  319. /**
  320. * @brief YUVToRGB
  321. * @par Inputs:
  322. * @li x: A 4-D uint8 Tensor.
  323. * Must set the format, supported format list ["NYUV"].
  324. * @li matrix: A 1-D float tensor of 2x3x3 elements
  325. * @par Outputs:
  326. * @li y: A 4-D uint8 Tensor.
  327. * Must set the format, supported format list ["NCHW, NHWC"].
  328. * @par Attributes:
  329. * @li matrix_type: An Int attr, Defaults to 0.
  330. * support list [ 0: CSC_MATRIX_BT601_WIDE,
  331. * 1: CSC_MATRIX_BT601_NARROW,
  332. * 2: CSC_MATRIX_BT709_WIDE,
  333. * 3: CSC_MATRIX_BT709_NARROW,
  334. * 4: CSC_MATRIX_BT2020_WIDE,
  335. * 5: CSC_MATRIX_BT2020_NARROW,
  336. * 6: CSC_MATRIX_USR_DEFINE ]
  337. * @li rb_swap: An Int attr, Defaults to 0.
  338. * support list [ 0: RGB, 1: BGR ]
  339. * @attention Constraints:
  340. * @li Only support in dvpp
  341. */
  342. REG_OP(YUVToRGB)
  343. .INPUT(x, TensorType({DT_UINT8}))
  344. .OPTIONAL_INPUT(matrix, TensorType({DT_FLOAT}))
  345. .OUTPUT(y, TensorType({DT_UINT8}))
  346. .ATTR(matrix_type, Int, 0)
  347. .ATTR(rb_swap, Int, 0)
  348. .OP_END_FACTORY_REG(YUVToRGB)
  349. /**
  350. * @brief DecodeJpegPre
  351. * @par Inputs:
  352. * @li contents: A Tensor of type string. 0-D. The JPEG-encoded image.
  353. * @par Outputs:
  354. * @li dvpp_support: indicates if the dvpp support this jpeg image decode.
  355. * @par Attributes:
  356. * @li w_range: An required listInt contains width [min, max].
  357. * @li h_range: An required listInt contains height [min, max].
  358. * @attention Constraints:
  359. * @li Only support in dvpp
  360. */
  361. REG_OP(DecodeJpegPre)
  362. .INPUT(contents, TensorType({DT_STRING}))
  363. .OUTPUT(dvpp_support, BOOL)
  364. .REQUIRED_ATTR(w_range, ListInt)
  365. .REQUIRED_ATTR(h_range, ListInt)
  366. .OP_END_FACTORY_REG(DecodeJpegPre)
  367. /**
  368. * @brief init PartitionMap table. \n
  369. * @par Inputs:
  370. * @li ps_num: A Tensor, dtype is uint32. 0-D. indicates ps number.
  371. * @li ps_ids: A Tensor, dtype is uint32. 1-D. indicates the id of ps. \n
  372. * @par Attributes:
  373. * @li partition_num: A Int, indicates the number of partition. \n
  374. */
  375. REG_OP(InitPartitionMap)
  376. .INPUT(ps_num, TensorType({DT_UINT32}))
  377. .INPUT(ps_ids, TensorType({DT_UINT32}))
  378. .ATTR(partition_num, Int, 65537)
  379. .OP_END_FACTORY_REG(InitPartitionMap)
  380. /**
  381. * @brief uninit PartitionMap table. \n
  382. */
  383. REG_OP(UninitPartitionMap)
  384. .OP_END_FACTORY_REG(UninitPartitionMap)
  385. /**
  386. * @brief init Embedding hashtable. \n
  387. * @par Inputs:
  388. * @li table_id: A Tensor, dtype is uint32. 0-D. indicates the id of hashtable. \n
  389. * @par Attributes:
  390. * @li bucket_size: A Int. \n
  391. */
  392. REG_OP(InitEmbeddingHashmap)
  393. .INPUT(table_id, TensorType({DT_UINT32}))
  394. .ATTR(bucket_size, Int, 0)
  395. .OP_END_FACTORY_REG(InitEmbeddingHashmap)
  396. /**
  397. * @brief embedding hsahtable data import. \n
  398. * @par Inputs:
  399. * @li file_path: A Tensor, dtype is string. 0-D. indicates embedding filepath.
  400. * @li file_name: A Tensor, dtype is string. 0-D. indicates embedding filename.
  401. * @li ps_id: A Tensor, dtype is uint32. 0-D. indicates the id of ps.
  402. * @li table_id: A Tensor, dtype is uint32. 0-D. indicates the id of hashtable.
  403. * @li embedding_dim: A Tensor, dtype is uint32. 0-D. indicates the hashtable value number. \n
  404. */
  405. REG_OP(EmbeddingTableImport)
  406. .INPUT(file_path, TensorType({DT_STRING}))
  407. .INPUT(file_name, TensorType({DT_STRING}))
  408. .INPUT(ps_id, TensorType({DT_UINT32}))
  409. .INPUT(table_id, TensorType({DT_UINT32}))
  410. .INPUT(embedding_dim, TensorType({DT_UINT32}))
  411. .OP_END_FACTORY_REG(EmbeddingTableImport)
  412. /**
  413. * @brief embedding hsahtable data lookup. \n
  414. * @par Inputs:
  415. * @li table_id: A Tensor, dtype is uint32. 0-D. indicates the id of hashtable.
  416. * @li keys: A Tensor, dtype is uint32. 1-D. indicates the hashtable key. \n
  417. * @par Outputs:
  418. * @li values: indicates the hashtable value. \n
  419. */
  420. REG_OP(EmbeddingTableFind)
  421. .INPUT(table_id, TensorType({DT_UINT32}))
  422. .INPUT(keys, TensorType({DT_UINT64}))
  423. .OUTPUT(values, TensorType({DT_FLOAT}))
  424. .OP_END_FACTORY_REG(EmbeddingTableFind)
  425. /**
  426. * @brief uninit embedding hsahtable. \n
  427. * @par Inputs:
  428. * @li table_id: A Tensor, dtype is uint32. 0-D. indicates the id of hashtable. \n
  429. */
  430. REG_OP(UninitEmbeddingHashmap)
  431. .INPUT(table_id, TensorType({DT_UINT32}))
  432. .OP_END_FACTORY_REG(UninitEmbeddingHashmap)
  433. } // namespace ge
  434. #endif // OPS_BUILT_IN_OP_PROTO_INC_EXPERIMENT_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示