You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

experiment_ops.h 12 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. /**
  2. * Copyright (c) Huawei Technologies Co., Ltd. 2020-2021. All rights reserved.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file experiment_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_EXPERIMENT_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_EXPERIMENT_OPS_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. * @brief Updates "var" according to the AdamW algorithm.
  26. *
  27. * @attention Constraints:
  28. * The input tensors must have the same shape.*
  29. *
  30. * @par Inputs:
  31. * @li var: A mutable Tensor of the type TensorType::NumberType().
  32. * Should be from a Variable().
  33. * @li m: A mutable Tensor of the same type as "var".
  34. * Should be from a Variable().
  35. * @li v: A mutable Tensor of the same type as "var".
  36. * Should be from a Variable().
  37. * @li beta1_power: A scalar of the same type as "var".
  38. * @li beta2_power: A scalar of the same type as "var".
  39. * @li lr: learning_rate. A scalar of the same type as "var".
  40. * @li weight_decay: learning_rate. A scalar of the same type as "var".
  41. * @li beta1: A scalar of the same type as "var".
  42. * @li beta2: A scalar of the same type as "var".
  43. * @li epsilon: A scalar of the same type as "var".
  44. * @li grad: A Tensor of the same type as "var", for the gradient.
  45. * @li max_grad_norm: A mutable Tensor of the same type as "var", an optional input.
  46. * Should be from a Variable().
  47. *
  48. * @par Attributes:
  49. * @li amsgrad: An optional bool. Defaults to "False".
  50. * If "True", max_grad_norm input and output must be entered.
  51. * @li maximize: An optional bool. Defaults to "False".
  52. *
  53. * @par Outputs:
  54. * @li var: A mutable tensor. Has the same type as input "var".
  55. * @li m: A mutable tensor. Has the same type as input "m".
  56. * @li v: A mutable tensor. Has the same type as input "v". \n
  57. */
  58. REG_OP(ApplyAdamW)
  59. .INPUT(var, TensorType::NumberType())
  60. .INPUT(m, TensorType::NumberType())
  61. .INPUT(v, TensorType::NumberType())
  62. .INPUT(beta1_power, TensorType::NumberType())
  63. .INPUT(beta2_power, TensorType::NumberType())
  64. .INPUT(lr, TensorType::NumberType())
  65. .INPUT(weight_decay, TensorType::NumberType())
  66. .INPUT(beta1, TensorType::NumberType())
  67. .INPUT(beta2, TensorType::NumberType())
  68. .INPUT(epsilon, TensorType::NumberType())
  69. .INPUT(grad, TensorType::NumberType())
  70. .OPTIONAL_INPUT(max_grad_norm, TensorType::NumberType())
  71. .OUTPUT(var, TensorType::NumberType())
  72. .OUTPUT(m, TensorType::NumberType())
  73. .OUTPUT(v, TensorType::NumberType())
  74. .ATTR(amsgrad, Bool, false)
  75. .ATTR(maximize, Bool, false)
  76. .OP_END_FACTORY_REG(ApplyAdamW)
  77. /**
  78. * @brief Calculate SQ distance. \n
  79. *
  80. * @par Inputs:
  81. * @li ivf: A Tensor, dtype is uint8.
  82. * @li query: A Tensor, dtype is float16 or float32.
  83. * @li bucket_list: A Tensor, dtype is int32 or int64.
  84. * @li bucket_limits: A Tensor, dtype is int32 or int64.
  85. * @li bucket_offsets: A Tensor, dtype is int32 or int64.
  86. * @li vmin: A Tensor, dtype is float16 or float32.
  87. * @li vdiff: A Tensor, dtype is float16 or float32. \n
  88. *
  89. * @par Outputs:
  90. * @li actual_count: A Tensor, dtype is int32 or int64, the actual number of sq_distance.
  91. * @li sq_distance: A Tensor, dtype is float16 or float32.
  92. * @li grouped_extreme_distance: A Tensor, dtype is float16 or float32, the extremum in each group of sq_distance.
  93. * @li sq_ivf: A Tensor, dtype is int32 or int64.
  94. * @li sq_index: A Tensor, dtype is int32 or int64. \n
  95. *
  96. * @par Attributes:
  97. * @li total_limit: A Int, indicates the max length of the output sq_distance.
  98. * @li group_size: A Int, indicates the group size of the extremum.
  99. * @li extreme_mode: A Int, indicates the type of extremum, 0 means minimum, and 1 means maximum. \n
  100. *
  101. */
  102. REG_OP(ScanSQCodes)
  103. .INPUT(ivf, TensorType({DT_UINT8}))
  104. .INPUT(query, TensorType({DT_FLOAT16, DT_FLOAT}))
  105. .INPUT(bucket_list, TensorType({DT_INT32, DT_INT64}))
  106. .INPUT(bucket_limits, TensorType({DT_INT32, DT_INT64}))
  107. .INPUT(bucket_offsets, TensorType({DT_INT32, DT_INT64}))
  108. .INPUT(vmin, TensorType({DT_FLOAT16, DT_FLOAT}))
  109. .INPUT(vdiff, TensorType({DT_FLOAT16, DT_FLOAT}))
  110. .OUTPUT(actual_count, TensorType({DT_INT32, DT_INT64}))
  111. .OUTPUT(sq_distance, TensorType({DT_FLOAT16, DT_FLOAT}))
  112. .OUTPUT(grouped_extreme_distance, TensorType({DT_FLOAT16, DT_FLOAT}))
  113. .OUTPUT(sq_ivf, TensorType({DT_INT32, DT_INT64}))
  114. .OUTPUT(sq_index, TensorType({DT_INT32, DT_INT64}))
  115. .REQUIRED_ATTR(total_limit, Int)
  116. .ATTR(group_size, Int, 64)
  117. .ATTR(extreme_mode, Int, 0)
  118. .OP_END_FACTORY_REG(ScanSQCodes)
  119. /**
  120. * @brief Multiplies matrix "a" by matrix "b", producing "a * b". \n
  121. * @par Inputs:
  122. * Four inputs, including:
  123. * @li x1: A matrix Tensor. Must be one of the following types: float32,
  124. * float16, int32, int8, int4, bf16. 3D. Has format ND.
  125. * @li x2: A matrix Tensor. Must be one of the following types: float32,
  126. * float16, int32, int8, int4, bf16. 3D. Has format ND.
  127. * @li bias: A optional Tensor. Must be one of the following types:
  128. * float32, float16, int32, bf16. 1D. Has format ND.
  129. * @li offset_w: A optional Tensor. Must be one of the following types:
  130. * int8, int4. Has format ND. \n
  131. * @par Attributes:
  132. * Three attributes, including:
  133. * @li perm_x1: A list int. "x1" is permuted to shape [B, M, K] before multiplication.
  134. * @li perm_x2: A list int. "x2" is permuted to shape [B, K, N] before multiplication.
  135. * @li perm_y: A list int. "y" is permuted after multiplication.
  136. * @li offset_x: An optional integer for quantized TransposeBatchMatMul.
  137. * The negative offset added to the input "x1" for int8, int4 type. Ensure offset_x
  138. * within the effective range of input data type. Defaults to "0". \n
  139. * @par Outputs:
  140. * y: The result matrix Tensor. 3D. Must be one of the following
  141. * types: float32, float16, int32, bf16. 3D. Has format ND. \n
  142. */
  143. REG_OP(TransposeBatchMatMul)
  144. .INPUT(x1, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8, DT_INT4, DT_BF16}))
  145. .INPUT(x2, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8, DT_INT4, DT_BF16}))
  146. .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_BF16}))
  147. .OPTIONAL_INPUT(offset_w, TensorType({DT_INT8, DT_INT4}))
  148. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_BF16}))
  149. .ATTR(perm_x1, ListInt, {})
  150. .ATTR(perm_x2, ListInt, {})
  151. .ATTR(perm_y, ListInt, {})
  152. .ATTR(offset_x, Int, 0)
  153. .OP_END_FACTORY_REG(TransposeBatchMatMul)
  154. /**
  155. * @brief Performs non-maximum suppression (NMS) on the rotated boxes according
  156. * to their intersection-over-union (IoU). Rotated NMS interatively removes lower
  157. * scoring rotated boxes which have an IoU greater than iou_threshold with
  158. * another (higher scoring) rotated box.
  159. * @par Inputs:
  160. * Three inputs, including:
  161. * @li boxes: A 2D Tensor of float16 or float32 with shape (N, 5). Rotated boxes to
  162. * perform NMS on. They are expected to be in (x1, y1, x2, y2, angle_degress) format.
  163. * @li scores: A 1D Tensor of float16 or float32 with shape (N). Scores for each one of
  164. * the rotated boxes.
  165. * @li labels: A 1D Tensor of int32 or int64 with shape (N). Labels for each one of
  166. * the rotated boxes.
  167. * @par Attributes:
  168. * iou_threshold: A required float attribute. Discards all overlapping rotated
  169. * boxes with IoU < iou_threshold.
  170. * @par Outputs:
  171. * Two outputs, including:
  172. * @li selected_detections: A 2D Tensor of float16 or float32 with shape (N, 5).
  173. * The selected boxes that kept by Rotated NMS, sorted in decreasing order of scores.
  174. * @li keep_indices: A 1D Tensor of int32 or int64 with shape (N). The indices of
  175. * selected_detections.
  176. * @attention Constraints:
  177. * Currently, the tensor type of input (boxes, scores) only support float.
  178. * The tensor type of keep_indices only support int32.
  179. */
  180. REG_OP(RotatedNMS)
  181. .INPUT(boxes, TensorType({DT_FLOAT16, DT_FLOAT}))
  182. .INPUT(scores, TensorType({DT_FLOAT16, DT_FLOAT}))
  183. .INPUT(labels, TensorType({DT_INT32, DT_INT64}))
  184. .OUTPUT(selected_detections, TensorType({DT_FLOAT16, DT_FLOAT}))
  185. .OUTPUT(keep_indices, TensorType({DT_INT32, DT_INT64}))
  186. .REQUIRED_ATTR(iou_threshold, Float)
  187. .OP_END_FACTORY_REG(RotatedNMS)
  188. /**
  189. * @brief Performs average pooling on the input. Used in the combination of conv + avgpoolupdate to replace avgpool
  190. * @par Inputs:
  191. * x1: Output of upstream Conv2d. A tensor of type float16, float32.
  192. * x2: Input feature map of upstream Conv2d. A tensor of type int8, float16, float32.
  193. * @par Attributes:
  194. * @li ksize: A required list of 4 ints, specifying the size (N, C, H, and W) of the sliding window,
  195. * where N = C = 1, and H and W are positive integers within the range [1, 255].
  196. * @li strides: A required list of 4 ints, specifying the stride of the sliding window.
  197. * The strides of the N and C dimensions are 1.
  198. * The strides of the H and W dimensions are positive integers within the range [1, 63].
  199. * @li padding_mode: A required string, specifying the padding algorithm,
  200. * either "VALID", "SAME" and "CALCULATED".
  201. * With "SAME" means that the outputs will have the same spatial dimensions as its inputs.
  202. * With "VALID" means no padding.
  203. * @li pads: Pad value when padding_mode is "CALCULATED".
  204. * @li data_format: An optional string, specifying the data format of "ksize" and "strides",
  205. * either "NCHW", or "NHWC" (default).
  206. * @li ceil_mode: Use ceil or floor to calculate the output size when padding_mode is "CALCULATED".
  207. * @li exclusive: Ignore padding area or not when calculating average.
  208. * @par Outputs:
  209. * y: The average pooled output tensor. Has the same type and format as input "x1".
  210. * @attention Constraints:
  211. * @li Only single input and single output are supported.
  212. * @li "ksize_H" and "ksize_W" are positive integers within the range [1, 255]. ksize_H * ksize_W < 256
  213. * @li Due to instruction restrictions,
  214. * the values of "strides_h" and "strides_w" are positive integers within the range [1, 63].
  215. * @par Third-party framework compatibility
  216. * Compatible with the TensorFlow/Pytorch/Onnx operator AvgPoolV2.
  217. */
  218. REG_OP(AvgPoolUpdate)
  219. .INPUT(x1, TensorType({DT_FLOAT16, DT_FLOAT}))
  220. .INPUT(x2, TensorType({DA_INT4, DT_INT8, DT_FLOAT16, DT_FLOAT}))
  221. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  222. .REQUIRED_ATTR(ksize, ListInt)
  223. .REQUIRED_ATTR(strides, ListInt)
  224. .ATTR(padding_mode, String, "CALCULATED")
  225. .ATTR(pads, ListInt, {0, 0, 0, 0})
  226. .ATTR(data_format, String, "NHWC")
  227. .ATTR(ceil_mode, Bool, false)
  228. .ATTR(exclusive, Bool, true)
  229. .OP_END_FACTORY_REG(AvgPoolUpdate)
  230. /**
  231. * @brief batch input by time
  232. * @par Inputs:
  233. * x: A list of input tensors. It's a dynamic input
  234. * @par Attributes:
  235. * @li window: time window, [-1, int64_max], if -1 will batch by input data flag,
  236. * else will batch by input timestamp and data flag.
  237. * @li batch_dim: [-1, input_shape_range), if -1 input shape:[x, ..., x] ---> output shape:[-1, x, ..., x],
  238. * else output shape:[x, ..., -1(batch_dim), ..., x];
  239. * @li drop_remainder: a bool flag, take effect when window > -1,
  240. * if true when batch data window < window, will drop data.
  241. * @par Outputs:
  242. * y: A list of output tensors. It's a dynamic input, the same size as "x".
  243. * @attention Constraints:
  244. * @li Only support in helper udf
  245. */
  246. REG_OP(TimeBatch)
  247. .DYNAMIC_INPUT(x, TensorType::RealNumberType())
  248. .DYNAMIC_OUTPUT(y, TensorType::RealNumberType())
  249. .REQUIRED_ATTR(window, Int)
  250. .ATTR(batch_dim, Int, -1)
  251. .ATTR(drop_remainder, Bool, false)
  252. .OP_END_FACTORY_REG(TimeBatch)
  253. /**
  254. * @brief Auto Batch process. \n
  255. * @par Inputs:
  256. * @li x: A list of input tensor objects. It's a dynamic input. \n
  257. * @par Outputs:
  258. * @li y: A list of output tensor objects. It's a dynamic output. \n
  259. * @par Attributes:
  260. * @li batch_size: auto batch size.
  261. * @li timeout: auto batch wait timeout(unit:ms).
  262. * @li padding: weather to pad when batch is insufficient.
  263. * @li slide_stride: sliding window step.
  264. */
  265. REG_OP(AutoBatch)
  266. .DYNAMIC_INPUT(x, TensorType::RealNumberType())
  267. .DYNAMIC_OUTPUT(y, TensorType::RealNumberType())
  268. .REQUIRED_ATTR(batch_size, Int)
  269. .ATTR(timeout, Int, 0)
  270. .ATTR(padding, Bool, false)
  271. .ATTR(slide_stride, Int, 0)
  272. .OP_END_FACTORY_REG(AutoBatch)
  273. } // namespace ge
  274. #endif // OPS_BUILT_IN_OP_PROTO_INC_EXPERIMENT_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示