You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

control_flow_ops.h 6.1 kB

5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_CONTROL_FLOW_OPS_H_
  17. #define GE_CONTROL_FLOW_OPS_H_
  18. #include "graph/operator_reg.h"
  19. #include "graph/operator.h"
  20. namespace ge {
  21. REG_OP(Merge)
  22. .DYNAMIC_INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  23. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  24. DT_UINT64, DT_BOOL}))
  25. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  26. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  27. DT_UINT64, DT_BOOL}))
  28. .OUTPUT(value_index, TensorType({DT_INT32}))
  29. .OP_END_FACTORY_REG(Merge)
  30. REG_OP(RefMerge)
  31. .DYNAMIC_INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  32. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  33. DT_UINT64, DT_BOOL}))
  34. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  35. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  36. DT_UINT64, DT_BOOL}))
  37. .OUTPUT(value_index, TensorType({DT_INT32}))
  38. .OP_END_FACTORY_REG(RefMerge)
  39. REG_OP(Switch)
  40. .INPUT(data, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  41. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  42. DT_UINT64, DT_BOOL}))
  43. .INPUT(pred, TensorType({DT_BOOL}))
  44. .OUTPUT(output_false, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  45. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  46. DT_UINT64, DT_BOOL}))
  47. .OUTPUT(output_true, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  48. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  49. DT_UINT64, DT_BOOL}))
  50. .OP_END_FACTORY_REG(Switch)
  51. REG_OP(RefSwitch)
  52. .INPUT(data, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  53. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  54. DT_UINT64, DT_BOOL}))
  55. .INPUT(pred, TensorType({DT_BOOL}))
  56. .OUTPUT(output_false, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  57. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  58. DT_UINT64, DT_BOOL}))
  59. .OUTPUT(output_true, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  60. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  61. DT_UINT64, DT_BOOL}))
  62. .OP_END_FACTORY_REG(RefSwitch)
  63. REG_OP(SwitchN)
  64. .INPUT(data, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  65. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  66. DT_UINT64, DT_BOOL}))
  67. .INPUT(pred_value, TensorType({DT_INT64}))
  68. .DYNAMIC_OUTPUT(output, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  69. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  70. DT_UINT64, DT_BOOL}))
  71. .OP_END_FACTORY_REG(SwitchN)
  72. REG_OP(Enter)
  73. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  74. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  75. DT_UINT64, DT_BOOL}))
  76. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  77. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  78. DT_UINT64, DT_BOOL}))
  79. .ATTR(frame_name, String, "")
  80. .ATTR(is_constant, Bool, false)
  81. .OP_END_FACTORY_REG(Enter)
  82. REG_OP(RefEnter)
  83. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  84. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  85. DT_UINT64, DT_BOOL}))
  86. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  87. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  88. DT_UINT64, DT_BOOL}))
  89. .ATTR(frame_name, String, "")
  90. .ATTR(is_constant, Bool, false)
  91. .OP_END_FACTORY_REG(RefEnter)
  92. REG_OP(LoopCond)
  93. .INPUT(x, TensorType({DT_BOOL}))
  94. .OUTPUT(y, TensorType({DT_BOOL}))
  95. .OP_END_FACTORY_REG(LoopCond)
  96. REG_OP(NextIteration)
  97. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  98. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  99. DT_UINT64, DT_BOOL}))
  100. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  101. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  102. DT_UINT64, DT_BOOL}))
  103. .OP_END_FACTORY_REG(NextIteration)
  104. REG_OP(RefNextIteration)
  105. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  106. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  107. DT_UINT64, DT_BOOL}))
  108. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  109. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  110. DT_UINT64, DT_BOOL}))
  111. .OP_END_FACTORY_REG(RefNextIteration)
  112. REG_OP(Exit)
  113. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  114. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  115. DT_UINT64, DT_BOOL}))
  116. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  117. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  118. DT_UINT64, DT_BOOL}))
  119. .OP_END_FACTORY_REG(Exit)
  120. REG_OP(RefExit)
  121. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  122. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  123. DT_UINT64, DT_BOOL}))
  124. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE,
  125. DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_UINT32,
  126. DT_UINT64, DT_BOOL}))
  127. .OP_END_FACTORY_REG(RefExit)
  128. REG_OP(ControlTrigger)
  129. .OP_END_FACTORY_REG(ControlTrigger)
  130. } // namespace ge
  131. #endif // GE_CONTROL_FLOW_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示