You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

functional_ops.h 12 kB

5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file functional_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_FUNCTIONAL_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_FUNCTIONAL_OPS_H_
  22. #include "graph/operator_reg.h"
  23. #include "graph/operator.h"
  24. namespace ge {
  25. /**
  26. *@brief Select one of the subgraphs to pass the input tensors and return the output tensors.
  27. * If "cond" means True, the selected subgraph is "then_branch".
  28. * Otherwise, the selected subgraph is "else_branch" . \n
  29. *@par Inputs:
  30. *@li cond: A Tensor. If "cond" is not a scalar of boolean type,
  31. * it will be converted to a boolean according to the following rule:
  32. * if "cond" is a numerical scalar, non-zero means True and zero means False;
  33. * if "cond" is a string scalar, non-empty means True and empty means False;
  34. * if "cond" is not a scalar, non-empty means True and empty means False.
  35. *@li input: The input tensors . It's a dynamic input. \n
  36. *@par Graphs:
  37. *@li then_branch: A subgraph takes 'input' and returns a list of tensors,
  38. * whose types are the same as what else_branch returns.
  39. *@li else_branch: A subgraph takes 'input' and returns a list of tensors,
  40. * whose types are the same as what then_branch returns . \n
  41. *@par Outputs:
  42. *output: The output tensors returned by either then_branch(input) or else_branch(input) . \n
  43. *@par Third-party framework compatibility
  44. *@Compatible with the TensorFlow operator _If.
  45. */
  46. REG_OP(_If)
  47. .INPUT(cond, TensorType::ALL())
  48. .DYNAMIC_INPUT(input, TensorType::ALL())
  49. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  50. .GRAPH(then_branch)
  51. .GRAPH(else_branch)
  52. .OP_END_FACTORY_REG(_If)
  53. /**
  54. *@brief Select one of the subgraphs to pass the input tensors and return the output tensors.
  55. * If "cond" means True, the selected subgraph is "then_branch".
  56. * Otherwise, the selected subgraph is "else_branch" . \n
  57. *@par Inputs:
  58. *@li cond: A Tensor. If "cond" is not a scalar of boolean type,
  59. * it will be converted to a boolean according to the following rule:
  60. * if "cond" is a numerical scalar, non-zero means True and zero means False;
  61. * if "cond" is a string scalar, non-empty means True and empty means False;
  62. * if "cond" is not a scalar, non-empty means True and empty means False.
  63. *@li input: The input tensors . It's a dynamic input. \n
  64. *@par Graphs:
  65. *@li then_branch: A subgraph takes 'input' and returns a list of tensors,
  66. * whose types are the same as what else_branch returns.
  67. *@li else_branch: A subgraph takes 'input' and returns a list of tensors,
  68. * whose types are the same as what then_branch returns . \n
  69. *@par Outputs:
  70. *output: The output tensors returned by either then_branch(input) or else_branch(input) . \n
  71. *@par Third-party framework compatibility
  72. *@Compatible with the TensorFlow operator StatelessIf.
  73. */
  74. REG_OP(StatelessIf)
  75. .INPUT(cond, TensorType::ALL())
  76. .DYNAMIC_INPUT(input, TensorType::ALL())
  77. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  78. .GRAPH(then_branch)
  79. .GRAPH(else_branch)
  80. .OP_END_FACTORY_REG(StatelessIf)
  81. /**
  82. *@brief Select one of the subgraphs to pass the input tensors and return the output tensors.
  83. * If "cond" means True, the selected subgraph is "then_branch".
  84. * Otherwise, the selected subgraph is "else_branch" . \n
  85. *@par Inputs:
  86. *@li cond: A Tensor. If "cond" is not a scalar of boolean type,
  87. * it will be converted to a boolean according to the following rule:
  88. * if "cond" is a numerical scalar, non-zero means True and zero means False;
  89. * if "cond" is a string scalar, non-empty means True and empty means False;
  90. * if "cond" is not a scalar, non-empty means True and empty means False.
  91. *@li input: The input tensors . It's a dynamic input. \n
  92. *@par Graphs:
  93. *@li then_branch: A subgraph takes 'input' and returns a list of tensors,
  94. * whose types are the same as what else_branch returns.
  95. *@li else_branch: A subgraph takes 'input' and returns a list of tensors,
  96. * whose types are the same as what then_branch returns . \n
  97. *@par Outputs:
  98. *output: The output tensors returned by either then_branch(input) or else_branch(input) . \n
  99. *@par Third-party framework compatibility
  100. *@Compatible with the TensorFlow operator If.
  101. */
  102. REG_OP(If)
  103. .INPUT(cond, TensorType::ALL())
  104. .DYNAMIC_INPUT(input, TensorType::ALL())
  105. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  106. .GRAPH(then_branch)
  107. .GRAPH(else_branch)
  108. .OP_END_FACTORY_REG(If)
  109. /**
  110. *@brief Select one of the subgraphs to pass the input tensors and return the output tensors . \n
  111. *@par Inputs:
  112. *@li branch_index: A int32 scalar which determines the selected subgraph.
  113. *@li input: The input tensors, which will be passed to the subgraph . It's a dynamic input. \n
  114. *@par Graphs:
  115. *branches: A list of subgraphs, each of which takes 'input' and returns a list of tensors,
  116. * whose types are the same as what every other subgraph returns . \n
  117. *@par Outputs:
  118. *output: The output tensors returned by one of branches . It's a dynamic output. \n
  119. *@par Third-party framework compatibility
  120. *@Compatible with the TensorFlow operator Case.
  121. */
  122. REG_OP(Case)
  123. .INPUT(branch_index, DT_INT32)
  124. .DYNAMIC_INPUT(input, TensorType::ALL())
  125. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  126. .DYNAMIC_GRAPH(branches)
  127. .OP_END_FACTORY_REG(Case)
  128. /**
  129. *@brief Cyclic execute the "body" subgraph until the return tensor of "cond" subgraph means False . \n
  130. *@par Inputs:
  131. *input: The input tensors . It's a dynamic input. \n
  132. *@par Graphs:
  133. *@li cond: A subgraph takes 'input' and returns a tensor.
  134. * If the tensor is not a scalar of boolean type,
  135. * it will be converted to a boolean according to the following rule:
  136. * if it is a numerical scalar, non-zero means True and zero means False;
  137. * if it is a string scalar, non-empty means True and empty means False;
  138. * if it is not a scalar, non-empty means True and empty means False.
  139. *@li body: A subgraph takes 'input' and returns a another list of tensors . \n
  140. *@par Outputs:
  141. *output: The output tensors returned by "body". Has the same type as "input" . \n
  142. *@par Third-party framework compatibility
  143. *@Compatible with the TensorFlow operator _While.
  144. */
  145. REG_OP(_While)
  146. .DYNAMIC_INPUT(input, TensorType::ALL())
  147. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  148. .GRAPH(cond)
  149. .GRAPH(body)
  150. .OP_END_FACTORY_REG(_While)
  151. /**
  152. *@brief Cyclic execute the "body" subgraph until the return tensor of "cond" subgraph means False . \n
  153. *@par Inputs:
  154. *input: The input tensors . It's a dynamic input. \n
  155. *@par Graphs:
  156. *@li cond: A subgraph takes 'input' and returns a tensor.
  157. * If the tensor is not a scalar of boolean type,
  158. * it will be converted to a boolean according to the following rule:
  159. * if it is a numerical scalar, non-zero means True and zero means False;
  160. * if it is a string scalar, non-empty means True and empty means False;
  161. * if it is not a scalar, non-empty means True and empty means False.
  162. *@li body: A subgraph takes 'input' and returns a another list of tensors . \n
  163. *@par Attributes:
  164. *parallel_iterations: An optional int, default as 10 . \n
  165. *@par Outputs:
  166. *output: The output tensors returned by "body". Has the same type as "input" . It's a dynamic output. \n
  167. *@par Third-party framework compatibility
  168. *@Compatible with the TensorFlow operator While.
  169. */
  170. REG_OP(While)
  171. .DYNAMIC_INPUT(input, TensorType::ALL())
  172. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  173. .GRAPH(cond)
  174. .GRAPH(body)
  175. .ATTR(parallel_iterations, Int, 10)
  176. .OP_END_FACTORY_REG(While)
  177. /**
  178. *@brief Cyclic execute the "body" subgraph until the return tensor of "cond" subgraph means False . \n
  179. *@par Inputs:
  180. *input: The input tensors . It's a dynamic input. \n
  181. *@par Graphs:
  182. *@li cond: A subgraph takes 'input' and returns a tensor.
  183. * If the tensor is not a scalar of boolean type,
  184. * it will be converted to a boolean according to the following rule:
  185. * if it is a numerical scalar, non-zero means True and zero means False;
  186. * if it is a string scalar, non-empty means True and empty means False;
  187. * if it is not a scalar, non-empty means True and empty means False.
  188. *@li body: A subgraph takes 'input' and returns a another list of tensors . \n
  189. *@par Attributes:
  190. *parallel_iterations: An optional int, default as 10 . \n
  191. *@par Outputs:
  192. *output: The output tensors returned by "body". Has the same type as "input" . It's a dynamic output. \n
  193. *@par Third-party framework compatibility
  194. *@Compatible with the TensorFlow operator StatelessWhile.
  195. */
  196. REG_OP(StatelessWhile)
  197. .DYNAMIC_INPUT(input, TensorType::ALL())
  198. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  199. .GRAPH(cond)
  200. .GRAPH(body)
  201. .ATTR(parallel_iterations, Int, 10)
  202. .OP_END_FACTORY_REG(StatelessWhile)
  203. /**
  204. *@brief Cyclic execute the "body" subgraph until the first input of For op exceed upper bound . \n
  205. *@par Inputs:
  206. *@li start: A int32 scalar. The lower bound.
  207. *@li limit: A int32 scalar. The upper bound.
  208. *@li delta: A int32 scalar. The step size.
  209. *@li input: The input tensors, which will be passed to "body" . It's a dynamic input. \n
  210. *@par Graphs:
  211. *body: A subgraph takes 'input' and returns a another list of tensors . \n
  212. *@par Outputs:
  213. *output: The output tensors returned by "body". Has the same type as "input" . It's a dynamic output. \n
  214. *@par Third-party framework compatibility
  215. *@Compatible with the TensorFlow operator For.
  216. */
  217. REG_OP(For)
  218. .INPUT(start, DT_INT32)
  219. .INPUT(limit, DT_INT32)
  220. .INPUT(delta, DT_INT32)
  221. .DYNAMIC_INPUT(input, TensorType::ALL())
  222. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  223. .GRAPH(body)
  224. .OP_END_FACTORY_REG(For)
  225. /**
  226. *@brief Pass the input tensors to the subgraph "f" and return the output tensors . \n
  227. *@par Inputs:
  228. *args: The input tensors, which will be passed to "f" . It's a dynamic input. \n
  229. *@par Graphs:
  230. *f: A subgraph takes 'args' and returns a another list of tensors . \n
  231. *@par Attributes:
  232. *@li config: An optional string, default as "".
  233. *@li config_proto: An optional int, default as "".
  234. *@li executor_type: An optional int, default as "" . \n
  235. *@par Outputs:
  236. *output: The output tensors returned by "f" . It's a dynamic output. \n
  237. *@par Third-party framework compatibility
  238. *@Compatible with the TensorFlow operator PartitionedCall.
  239. */
  240. REG_OP(PartitionedCall)
  241. .DYNAMIC_INPUT(args, TensorType::ALL())
  242. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  243. .GRAPH(f)
  244. .ATTR(config, String, "")
  245. .ATTR(config_proto, String, "")
  246. .ATTR(executor_type, String, "")
  247. .OP_END_FACTORY_REG(PartitionedCall)
  248. /**
  249. *@brief Pass the input tensors to the subgraph "f" and return the output tensors . \n
  250. *@par Inputs:
  251. *args: The input tensors, which will be passed to "f" . It's a dynamic input. \n
  252. *@par Graphs:
  253. *f: A subgraph takes 'args' and returns a another list of tensors . \n
  254. *@par Attributes:
  255. *@li config: An optional string, default as "".
  256. *@li config_proto: An optional int, default as "".
  257. *@li executor_type: An optional int, default as "" . \n
  258. *@par Outputs:
  259. *output: The output tensors returned by "f" . It's a dynamic output. \n
  260. *@par Third-party framework compatibility
  261. *@Compatible with the TensorFlow operator StatefulPartitionedCall.
  262. */
  263. REG_OP(StatefulPartitionedCall)
  264. .DYNAMIC_INPUT(args, TensorType::ALL())
  265. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  266. .GRAPH(f)
  267. .ATTR(config, String, "")
  268. .ATTR(config_proto, String, "")
  269. .ATTR(executor_type, String, "")
  270. .OP_END_FACTORY_REG(StatefulPartitionedCall)
  271. } // namespace ge
  272. #endif // OPS_BUILT_IN_OP_PROTO_INC_FUNCTIONAL_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示