You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

experiment_ops.h 6.6 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. /**
  2. * Copyright (c) Huawei Technologies Co., Ltd. 2020-2021. All rights reserved.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file experiment_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_EXPERIMENT_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_EXPERIMENT_OPS_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. * @brief Updates "var" according to the AdamW algorithm.
  26. *
  27. * @attention Constraints:
  28. * The input tensors must have the same shape.*
  29. *
  30. * @par Inputs:
  31. * @li var: A mutable Tensor of the type TensorType::NumberType().
  32. * Should be from a Variable().
  33. * @li m: A mutable Tensor of the same type as "var".
  34. * Should be from a Variable().
  35. * @li v: A mutable Tensor of the same type as "var".
  36. * Should be from a Variable().
  37. * @li beta1_power: A scalar of the same type as "var".
  38. * @li beta2_power: A scalar of the same type as "var".
  39. * @li lr: learning_rate. A scalar of the same type as "var".
  40. * @li weight_decay: learning_rate. A scalar of the same type as "var".
  41. * @li beta1: A scalar of the same type as "var".
  42. * @li beta2: A scalar of the same type as "var".
  43. * @li epsilon: A scalar of the same type as "var".
  44. * @li grad: A Tensor of the same type as "var", for the gradient.
  45. * @li max_grad_norm: A mutable Tensor of the same type as "var", an optional input.
  46. * Should be from a Variable().
  47. *
  48. * @par Attributes:
  49. * @li amsgrad: An optional bool. Defaults to "False".
  50. * If "True", max_grad_norm input and output must be entered.
  51. * @li maximize: An optional bool. Defaults to "False".
  52. *
  53. * @par Outputs:
  54. * @li var: A mutable tensor. Has the same type as input "var".
  55. * @li m: A mutable tensor. Has the same type as input "m".
  56. * @li v: A mutable tensor. Has the same type as input "v". \n
  57. */
  58. REG_OP(ApplyAdamW)
  59. .INPUT(var, TensorType::NumberType())
  60. .INPUT(m, TensorType::NumberType())
  61. .INPUT(v, TensorType::NumberType())
  62. .INPUT(beta1_power, TensorType::NumberType())
  63. .INPUT(beta2_power, TensorType::NumberType())
  64. .INPUT(lr, TensorType::NumberType())
  65. .INPUT(weight_decay, TensorType::NumberType())
  66. .INPUT(beta1, TensorType::NumberType())
  67. .INPUT(beta2, TensorType::NumberType())
  68. .INPUT(epsilon, TensorType::NumberType())
  69. .INPUT(grad, TensorType::NumberType())
  70. .OPTIONAL_INPUT(max_grad_norm, TensorType::NumberType())
  71. .OUTPUT(var, TensorType::NumberType())
  72. .OUTPUT(m, TensorType::NumberType())
  73. .OUTPUT(v, TensorType::NumberType())
  74. .ATTR(amsgrad, Bool, false)
  75. .ATTR(maximize, Bool, false)
  76. .OP_END_FACTORY_REG(ApplyAdamW)
  77. /**
  78. * @brief Multiplies matrix "a" by matrix "b", producing "a * b". \n
  79. * @par Inputs:
  80. * Four inputs, including:
  81. * @li x1: A matrix Tensor. Must be one of the following types: float32,
  82. * float16, int32, int8, int4, bf16. 3D. Has format ND.
  83. * @li x2: A matrix Tensor. Must be one of the following types: float32,
  84. * float16, int32, int8, int4, bf16. 3D. Has format ND.
  85. * @li bias: A optional Tensor. Must be one of the following types:
  86. * float32, float16, int32, bf16. 1D. Has format ND.
  87. * @li offset_w: A optional Tensor. Must be one of the following types:
  88. * int8, int4. Has format ND. \n
  89. * @par Attributes:
  90. * Three attributes, including:
  91. * @li perm_x1: A list int. "x1" is permuted to shape [B, M, K] before multiplication.
  92. * @li perm_x2: A list int. "x2" is permuted to shape [B, K, N] before multiplication.
  93. * @li perm_y: A list int. "y" is permuted after multiplication.
  94. * @li offset_x: An optional integer for quantized TransposeBatchMatMul.
  95. * The negative offset added to the input "x1" for int8, int4 type. Ensure offset_x
  96. * within the effective range of input data type. Defaults to "0". \n
  97. * @par Outputs:
  98. * y: The result matrix Tensor. 3D. Must be one of the following
  99. * types: float32, float16, int32, bf16. 3D. Has format ND. \n
  100. */
  101. REG_OP(TransposeBatchMatMul)
  102. .INPUT(x1, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8, DT_INT4, DT_BF16}))
  103. .INPUT(x2, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_INT8, DT_INT4, DT_BF16}))
  104. .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_BF16}))
  105. .OPTIONAL_INPUT(offset_w, TensorType({DT_INT8, DT_INT4}))
  106. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32, DT_BF16}))
  107. .ATTR(perm_x1, ListInt, {})
  108. .ATTR(perm_x2, ListInt, {})
  109. .ATTR(perm_y, ListInt, {})
  110. .ATTR(offset_x, Int, 0)
  111. .OP_END_FACTORY_REG(TransposeBatchMatMul)
  112. /**
  113. * @brief Performs non-maximum suppression (NMS) on the rotated boxes according
  114. * to their intersection-over-union (IoU). Rotated NMS interatively removes lower
  115. * scoring rotated boxes which have an IoU greater than iou_threshold with
  116. * another (higher scoring) rotated box.
  117. * @par Inputs:
  118. * Three inputs, including:
  119. * @li boxes: A 2D Tensor of float16 or float32 with shape (N, 5). Rotated boxes to
  120. * perform NMS on. They are expected to be in (x1, y1, x2, y2, angle_degress) format.
  121. * @li scores: A 1D Tensor of float16 or float32 with shape (N). Scores for each one of
  122. * the rotated boxes.
  123. * @li labels: A 1D Tensor of int32 or int64 with shape (N). Labels for each one of
  124. * the rotated boxes.
  125. * @par Attributes:
  126. * iou_threshold: A required float attribute. Discards all overlapping rotated
  127. * boxes with IoU < iou_threshold.
  128. * @par Outputs:
  129. * Two outputs, including:
  130. * @li selected_detections: A 2D Tensor of float16 or float32 with shape (N, 5).
  131. * The selected boxes that kept by Rotated NMS, sorted in decreasing order of scores.
  132. * @li keep_indices: A 1D Tensor of int32 or int64 with shape (N). The indices of
  133. * selected_detections.
  134. * @attention Constraints:
  135. * Currently, the tensor type of input (boxes, scores) only support float.
  136. * The tensor type of keep_indices only support int32.
  137. */
  138. REG_OP(RotatedNMS)
  139. .INPUT(boxes, TensorType({DT_FLOAT16, DT_FLOAT}))
  140. .INPUT(scores, TensorType({DT_FLOAT16, DT_FLOAT}))
  141. .INPUT(labels, TensorType({DT_INT32, DT_INT64}))
  142. .OUTPUT(selected_detections, TensorType({DT_FLOAT16, DT_FLOAT}))
  143. .OUTPUT(keep_indices, TensorType({DT_INT32, DT_INT64}))
  144. .REQUIRED_ATTR(iou_threshold, Float)
  145. .OP_END_FACTORY_REG(RotatedNMS)
  146. } // namespace ge
  147. #endif // OPS_BUILT_IN_OP_PROTO_INC_EXPERIMENT_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示