You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

functional_ops.h 13 kB

5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file functional_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_FUNCTIONAL_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_FUNCTIONAL_OPS_H_
  22. #include "graph/operator_reg.h"
  23. #include "graph/operator.h"
  24. namespace ge {
  25. /**
  26. *@brief Select one of the subgraphs to pass the input tensors and return the output tensors.
  27. * If "cond" means True, the selected subgraph is "then_branch".
  28. * Otherwise, the selected subgraph is "else_branch" . \n
  29. *@par Inputs:
  30. *@li cond: A Tensor. If "cond" is not a scalar of boolean type,
  31. * it will be converted to a boolean according to the following rule:
  32. * if "cond" is a numerical scalar, non-zero means True and zero means False;
  33. * if "cond" is a string scalar, non-empty means True and empty means False;
  34. * if "cond" is not a scalar, non-empty means True and empty means False.
  35. *@li input: The input tensors . It's a dynamic input. \n
  36. *@par Graphs:
  37. *@li then_branch: A subgraph takes 'input' and returns a list of tensors,
  38. * whose types are the same as what else_branch returns.
  39. *@li else_branch: A subgraph takes 'input' and returns a list of tensors,
  40. * whose types are the same as what then_branch returns . \n
  41. *@par Outputs:
  42. *output: The output tensors returned by either then_branch(input) or else_branch(input) . \n
  43. *@par Third-party framework compatibility
  44. *@Compatible with the TensorFlow operator _If.
  45. */
  46. REG_OP(_If)
  47. .INPUT(cond, TensorType::ALL())
  48. .DYNAMIC_INPUT(input, TensorType::ALL())
  49. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  50. .GRAPH(then_branch)
  51. .GRAPH(else_branch)
  52. .OP_END_FACTORY_REG(_If)
  53. /**
  54. *@brief Select one of the subgraphs to pass the input tensors and return the output tensors.
  55. * If "cond" means True, the selected subgraph is "then_branch".
  56. * Otherwise, the selected subgraph is "else_branch" . \n
  57. *@par Inputs:
  58. *@li cond: A Tensor. If "cond" is not a scalar of boolean type,
  59. * it will be converted to a boolean according to the following rule:
  60. * if "cond" is a numerical scalar, non-zero means True and zero means False;
  61. * if "cond" is a string scalar, non-empty means True and empty means False;
  62. * if "cond" is not a scalar, non-empty means True and empty means False.
  63. *@li input: The input tensors . It's a dynamic input. \n
  64. *@par Graphs:
  65. *@li then_branch: A subgraph takes 'input' and returns a list of tensors,
  66. * whose types are the same as what else_branch returns.
  67. *@li else_branch: A subgraph takes 'input' and returns a list of tensors,
  68. * whose types are the same as what then_branch returns . \n
  69. *@par Outputs:
  70. *output: The output tensors returned by either then_branch(input) or else_branch(input) . \n
  71. *@par Third-party framework compatibility
  72. *@Compatible with the TensorFlow operator StatelessIf.
  73. */
  74. REG_OP(StatelessIf)
  75. .INPUT(cond, TensorType::ALL())
  76. .DYNAMIC_INPUT(input, TensorType::ALL())
  77. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  78. .GRAPH(then_branch)
  79. .GRAPH(else_branch)
  80. .OP_END_FACTORY_REG(StatelessIf)
  81. /**
  82. *@brief Select one of the subgraphs to pass the input tensors and return the output tensors.
  83. * If "cond" means True, the selected subgraph is "then_branch".
  84. * Otherwise, the selected subgraph is "else_branch" . \n
  85. *@par Inputs:
  86. *@li cond: A Tensor. If "cond" is not a scalar of boolean type,
  87. * it will be converted to a boolean according to the following rule:
  88. * if "cond" is a numerical scalar, non-zero means True and zero means False;
  89. * if "cond" is a string scalar, non-empty means True and empty means False;
  90. * if "cond" is not a scalar, non-empty means True and empty means False.
  91. *@li input: The input tensors . It's a dynamic input. \n
  92. *@par Graphs:
  93. *@li then_branch: A subgraph takes 'input' and returns a list of tensors,
  94. * whose types are the same as what else_branch returns.
  95. *@li else_branch: A subgraph takes 'input' and returns a list of tensors,
  96. * whose types are the same as what then_branch returns . \n
  97. *@par Outputs:
  98. *output: The output tensors returned by either then_branch(input) or else_branch(input) . \n
  99. *@par Third-party framework compatibility
  100. *@Compatible with the TensorFlow operator If.
  101. */
  102. REG_OP(If)
  103. .INPUT(cond, TensorType::ALL())
  104. .DYNAMIC_INPUT(input, TensorType::ALL())
  105. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  106. .GRAPH(then_branch)
  107. .GRAPH(else_branch)
  108. .OP_END_FACTORY_REG(If)
  109. /**
  110. *@brief Select one of the subgraphs to pass the input tensors and return the output tensors . \n
  111. *@par Inputs:
  112. *@li branch_index: A int32 scalar which determines the selected subgraph.
  113. *@li input: The input tensors, which will be passed to the subgraph . It's a dynamic input. \n
  114. *@par Graphs:
  115. *branches: A list of subgraphs, each of which takes 'input' and returns a list of tensors,
  116. * whose types are the same as what every other subgraph returns . \n
  117. *@par Outputs:
  118. *output: The output tensors returned by one of branches . It's a dynamic output. \n
  119. *@par Third-party framework compatibility
  120. *@Compatible with the TensorFlow operator Case.
  121. */
  122. REG_OP(Case)
  123. .INPUT(branch_index, DT_INT32)
  124. .DYNAMIC_INPUT(input, TensorType::ALL())
  125. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  126. .DYNAMIC_GRAPH(branches)
  127. .OP_END_FACTORY_REG(Case)
  128. /**
  129. *@brief Cyclic execute the "body" subgraph until the return tensor of "cond" subgraph means False . \n
  130. *@par Inputs:
  131. *input: The input tensors . It's a dynamic input. \n
  132. *@par Graphs:
  133. *@li cond: A subgraph takes 'input' and returns a tensor.
  134. * If the tensor is not a scalar of boolean type,
  135. * it will be converted to a boolean according to the following rule:
  136. * if it is a numerical scalar, non-zero means True and zero means False;
  137. * if it is a string scalar, non-empty means True and empty means False;
  138. * if it is not a scalar, non-empty means True and empty means False.
  139. *@li body: A subgraph takes 'input' and returns a another list of tensors . \n
  140. *@par Attributes:
  141. *parallel_iterations: An optional int, default as 10 . \n
  142. *@par Outputs:
  143. *output: The output tensors returned by "body". Has the same type as "input" . \n
  144. *@par Third-party framework compatibility
  145. *@Compatible with the TensorFlow operator _While.
  146. */
  147. REG_OP(_While)
  148. .DYNAMIC_INPUT(input, TensorType::ALL())
  149. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  150. .GRAPH(cond)
  151. .GRAPH(body)
  152. .OP_END_FACTORY_REG(_While)
  153. /**
  154. *@brief Cyclic execute the "body" subgraph until the return tensor of "cond" subgraph means False . \n
  155. *@par Inputs:
  156. *input: The input tensors . It's a dynamic input. \n
  157. *@par Graphs:
  158. *@li cond: A subgraph takes 'input' and returns a tensor.
  159. * If the tensor is not a scalar of boolean type,
  160. * it will be converted to a boolean according to the following rule:
  161. * if it is a numerical scalar, non-zero means True and zero means False;
  162. * if it is a string scalar, non-empty means True and empty means False;
  163. * if it is not a scalar, non-empty means True and empty means False.
  164. *@li body: A subgraph takes 'input' and returns a another list of tensors . \n
  165. *@par Attributes:
  166. *parallel_iterations: An optional int, default as 10 . \n
  167. *@par Outputs:
  168. *output: The output tensors returned by "body". Has the same type as "input" . It's a dynamic output. \n
  169. *@par Third-party framework compatibility
  170. *@Compatible with the TensorFlow operator While.
  171. */
  172. REG_OP(While)
  173. .DYNAMIC_INPUT(input, TensorType::ALL())
  174. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  175. .GRAPH(cond)
  176. .GRAPH(body)
  177. .ATTR(parallel_iterations, Int, 10)
  178. .OP_END_FACTORY_REG(While)
  179. /**
  180. *@brief Cyclic execute the "body" subgraph until the return tensor of "cond" subgraph means False . \n
  181. *@par Inputs:
  182. *input: The input tensors . It's a dynamic input. \n
  183. *@par Graphs:
  184. *@li cond: A subgraph takes 'input' and returns a tensor.
  185. * If the tensor is not a scalar of boolean type,
  186. * it will be converted to a boolean according to the following rule:
  187. * if it is a numerical scalar, non-zero means True and zero means False;
  188. * if it is a string scalar, non-empty means True and empty means False;
  189. * if it is not a scalar, non-empty means True and empty means False.
  190. *@li body: A subgraph takes 'input' and returns a another list of tensors . \n
  191. *@par Attributes:
  192. *parallel_iterations: An optional int, default as 10 . \n
  193. *@par Outputs:
  194. *output: The output tensors returned by "body". Has the same type as "input" . It's a dynamic output. \n
  195. *@par Third-party framework compatibility
  196. *@Compatible with the TensorFlow operator StatelessWhile.
  197. */
  198. REG_OP(StatelessWhile)
  199. .DYNAMIC_INPUT(input, TensorType::ALL())
  200. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  201. .GRAPH(cond)
  202. .GRAPH(body)
  203. .ATTR(parallel_iterations, Int, 10)
  204. .OP_END_FACTORY_REG(StatelessWhile)
  205. /**
  206. *@brief Cyclic execute the "body" subgraph until the first input of For op exceed upper bound . \n
  207. *@par Inputs:
  208. *@li start: A int32 scalar. The lower bound.
  209. *@li limit: A int32 scalar. The upper bound.
  210. *@li delta: A int32 scalar. The step size.
  211. *@li input: The input tensors, which will be passed to "body" . It's a dynamic input. \n
  212. *@par Graphs:
  213. *body: A subgraph takes 'input' and returns a another list of tensors . \n
  214. *@par Outputs:
  215. *output: The output tensors returned by "body". Has the same type as "input" . It's a dynamic output. \n
  216. *@par Third-party framework compatibility
  217. *@Compatible with the TensorFlow operator For.
  218. */
  219. REG_OP(For)
  220. .INPUT(start, DT_INT32)
  221. .INPUT(limit, DT_INT32)
  222. .INPUT(delta, DT_INT32)
  223. .DYNAMIC_INPUT(input, TensorType::ALL())
  224. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  225. .GRAPH(body)
  226. .OP_END_FACTORY_REG(For)
  227. /**
  228. *@brief Pass the input tensors to the subgraph "f" and return the output tensors . \n
  229. *@par Inputs:
  230. *args: The input tensors, which will be passed to "f" . It's a dynamic input. \n
  231. *@par Graphs:
  232. *f: A subgraph takes 'args' and returns a another list of tensors . \n
  233. *@par Attributes:
  234. *@li config: An optional string, default as "".
  235. *@li config_proto: An optional int, default as "".
  236. *@li executor_type: An optional int, default as "" . \n
  237. *@par Outputs:
  238. *output: The output tensors returned by "f" . It's a dynamic output. \n
  239. *@par Third-party framework compatibility
  240. *@Compatible with the TensorFlow operator PartitionedCall.
  241. */
  242. REG_OP(PartitionedCall)
  243. .DYNAMIC_INPUT(args, TensorType::ALL())
  244. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  245. .GRAPH(f)
  246. .ATTR(config, String, "")
  247. .ATTR(config_proto, String, "")
  248. .ATTR(executor_type, String, "")
  249. .OP_END_FACTORY_REG(PartitionedCall)
  250. /**
  251. *@brief Pass the input tensors to the subgraph "f" and return the output tensors . \n
  252. *@par Inputs:
  253. *args: The input tensors, which will be passed to "f" . It's a dynamic input. \n
  254. *@par Graphs:
  255. *f: A subgraph takes 'args' and returns a another list of tensors . \n
  256. *@par Attributes:
  257. *@li config: An optional string, default as "".
  258. *@li config_proto: An optional int, default as "".
  259. *@li executor_type: An optional int, default as "" . \n
  260. *@par Outputs:
  261. *output: The output tensors returned by "f" . It's a dynamic output. \n
  262. *@par Third-party framework compatibility
  263. *@Compatible with the TensorFlow operator StatefulPartitionedCall.
  264. */
  265. REG_OP(StatefulPartitionedCall)
  266. .DYNAMIC_INPUT(args, TensorType::ALL())
  267. .DYNAMIC_OUTPUT(output, TensorType::ALL())
  268. .GRAPH(f)
  269. .ATTR(config, String, "")
  270. .ATTR(config_proto, String, "")
  271. .ATTR(executor_type, String, "")
  272. .OP_END_FACTORY_REG(StatefulPartitionedCall)
  273. } // namespace ge
  274. #endif // OPS_BUILT_IN_OP_PROTO_INC_FUNCTIONAL_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示