You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

pad_ops.h 18 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
3 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file pad_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_PAD_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_PAD_OPS_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. *@brief Creates a tensor filled with a scalar value.
  26. * This operation creates a tensor of shape "dims" and fills it with "value".
  27. *
  28. *@par Inputs:
  29. *@li dims: A 1D tensor of types int32 or int64. Represents the shape of the output tensor . \n
  30. *@li value: A 0D scalar. Specifies the value to fill the returned tensor.
  31. * Must be one of the following types:
  32. * float16, float32, double, int32, uint8, int16, int8, complex64, int64, bool,
  33. * qint8, quint8, qint32, qint16, quint16, uint16, complex128, uint32, uint64, .
  34. *
  35. *@par Outputs:
  36. * y: A tensor. Has the same type as "value".
  37. *
  38. *@par Third-party framework compatibility
  39. *@li Compatible with the TensorFlow operator Fill.
  40. *@li Compatible with the Caffe operator Filler.
  41. *
  42. */
  43. REG_OP(Fill)
  44. .INPUT(dims, TensorType::IndexNumberType())
  45. .INPUT(value, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_UINT8, DT_INT16,
  46. DT_INT8, DT_COMPLEX64, DT_INT64, DT_BOOL, DT_QINT8,
  47. DT_QUINT8, DT_QINT32, DT_QINT16, DT_QUINT16, DT_UINT16,
  48. DT_COMPLEX128, DT_FLOAT16, DT_UINT32, DT_UINT64}))
  49. .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_UINT8, DT_INT16,
  50. DT_INT8, DT_COMPLEX64, DT_INT64, DT_BOOL, DT_QINT8,
  51. DT_QUINT8, DT_QINT32, DT_QINT16, DT_QUINT16, DT_UINT16,
  52. DT_COMPLEX128, DT_FLOAT16, DT_UINT32, DT_UINT64}))
  53. .OP_END_FACTORY_REG(Fill)
  54. /**
  55. *@brief Creates a tensor filled with a scalar value.
  56. * This operation creates a tensor of shape "dims" and fills it with "value".
  57. *
  58. *@par Inputs:
  59. * value: A 0D scalar for the value to fill the returned tensor. Must be one of
  60. * the following types:
  61. * float16, float32, uint8, int8, int16, int32, int64, quint8, qint8, qint32
  62. *
  63. *@par Attributes:
  64. * dims: A tensor. Must be one of the following types:"int32"
  65. * 1-D. Represents the shape of the output tensor.
  66. *
  67. *@par Outputs:
  68. * y: A tensor. Has the same type as "value".
  69. *
  70. * @par Restrictions:
  71. * Warning: THIS FUNCTION IS DEPRECATED. Please use Fill instead.
  72. */
  73. REG_OP(FillD)
  74. .INPUT(value, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16,
  75. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64,
  76. DT_UINT32, DT_UINT64, DT_BOOL, DT_DOUBLE}))
  77. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16, DT_UINT16,
  78. DT_UINT8, DT_INT32, DT_INT64, DT_UINT32,
  79. DT_UINT64, DT_BOOL, DT_DOUBLE}))
  80. .REQUIRED_ATTR(dims, ListInt)
  81. .OP_END_FACTORY_REG(FillD)
  82. /**
  83. *@brief Broadcasts an array for a compatible shape.
  84. * Broadcasting is the process of making arrays to have compatible shapes
  85. * for arithmetic operations. Two shapes are compatible if for each
  86. * dimension pair they are either equal or one of them is one. When trying
  87. * to broadcast a Tensor to a shape, it starts with the trailing dimensions,
  88. * and works its way forward.
  89. *
  90. *@par Inputs:
  91. *@li x: A tensor.
  92. *@li shape: A tensor of type int32.
  93. * A 1D tensor of type int32, for the shape of the desired output.
  94. *
  95. *@par Outputs:
  96. * y: A tensor. Has the same type as "x".
  97. *
  98. *@par Third-party framework compatibility
  99. *Compatible with the TensorFlow operator BroadcastTo.
  100. *
  101. */
  102. REG_OP(BroadcastTo)
  103. .INPUT(x, TensorType::BasicType())
  104. .INPUT(shape, TensorType({DT_INT32,DT_INT64}))
  105. .OUTPUT(y, TensorType::BasicType())
  106. .OP_END_FACTORY_REG(BroadcastTo)
  107. /**
  108. *@brief Broadcasts an array for a compatible shape.
  109. * Broadcasting is the process of making arrays to have compatible shapes
  110. * for arithmetic operations. Two shapes are compatible if for each
  111. * dimension pair they are either equal or one of them is one. When trying
  112. * to broadcast a Tensor to a shape, it starts with the trailing dimensions,
  113. * and works its way forward.
  114. *
  115. *@par Inputs:
  116. * x: A tensor. A tensor to broadcast.
  117. *
  118. *@par Attributes:
  119. * shape: A tensor of type int32.
  120. * A 1D tensor of type int32, for the shape of the desired output.
  121. *
  122. *@par Outputs:
  123. * y: A tensor. Has the same type as "x".
  124. *
  125. *@par Third-party framework compatibility
  126. *Compatible with the TensorFlow operator BroadcastTo.
  127. *
  128. * @par Restrictions:
  129. * Warning: THIS FUNCTION IS DEPRECATED. Please use BroadcastTo instead.
  130. */
  131. REG_OP(BroadcastToD)
  132. .INPUT(x, TensorType::BasicType())
  133. .OUTPUT(y, TensorType::BasicType())
  134. .REQUIRED_ATTR(shape, ListInt)
  135. .OP_END_FACTORY_REG(BroadcastToD)
  136. /**
  137. *@brief Pads a tensor . \n
  138. *@par Inputs:
  139. *Two inputs, including:
  140. * @li x: A Tensor. Must be one of the following types: float16, float32, double, int32,
  141. * uint8, int16, int8, complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16,
  142. * complex128, uint32, uint64.
  143. * @li paddings: A Tensor of type int32 or int64 . \n
  144. *@par Outputs:
  145. *y: A Tensor of the same type as "x" . \n
  146. *@par Third-party framework compatibility:
  147. * Compatible with TensorFlow operator Pad.
  148. */
  149. REG_OP(Pad)
  150. .INPUT(x, TensorType::BasicType())
  151. .INPUT(paddings, TensorType::IndexNumberType())
  152. .OUTPUT(y, TensorType::BasicType())
  153. .OP_END_FACTORY_REG(Pad)
  154. /**
  155. *@brief Pads a tensor . \n
  156. *@par Inputs:
  157. *x: A Tensor. Must be one of the following types: float16, float32, int32 . \n
  158. *@par Attributes:
  159. *paddings: An optional "vector<vector<int>>". Defaults to "{}".
  160. * For each dimension D of input, paddings[D, 0] indicates how many
  161. * values to add before the contents of tensor in that dimension,
  162. * and paddings[D, 1] indicates how many values to add after the
  163. * contents of tensor in that dimension . \n
  164. *@par Outputs:
  165. *y: A Tensor of the same type as "x" . \n
  166. *@par Third-party framework compatibility:
  167. * Compatible with TensorFlow operator Pad.
  168. *
  169. * @par Restrictions:
  170. * Warning: THIS FUNCTION IS DEPRECATED. Please use Pad instead.
  171. */
  172. REG_OP(PadD)
  173. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  174. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  175. .REQUIRED_ATTR(paddings, ListListInt)
  176. .OP_END_FACTORY_REG(PadD)
  177. /**
  178. *@brief Pads a tensor . \n
  179. *@par Inputs:
  180. *Three inputs, including:
  181. * @li x: A Tensor. Must be one of the following types: float16, float32, double, int32,
  182. * uint8, int16, int8, complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16,
  183. * complex128, uint32, uint64.
  184. * @li constant_values: A Tensor. Must have the same type as input.
  185. * @li paddings: A Tensor of type int32 or int64 . \n
  186. *@par Outputs:
  187. *y: A Tensor of the same type as "x" . \n
  188. *@par Third-party framework compatibility:
  189. * Compatible with TensorFlow operator Pad.
  190. */
  191. REG_OP(PadV2)
  192. .INPUT(x, TensorType::BasicType())
  193. .INPUT(paddings, TensorType::IndexNumberType())
  194. .INPUT(constant_values, TensorType::BasicType())
  195. .OUTPUT(y, TensorType::BasicType())
  196. .OP_END_FACTORY_REG(PadV2)
  197. /**
  198. *@brief Pads a tensor . \n
  199. *@par Inputs:
  200. *@li x: A Tensor. Must be one of the following types: float16, float32, int32 . \n
  201. *@li constant_values: A Tensor. Must have the same type as input.
  202. *@par Attributes:
  203. *paddings: A required Attribute.
  204. * For each dimension D of input, paddings[D, 0] indicates how many
  205. * values to add before the contents of tensor in that dimension,
  206. * and paddings[D, 1] indicates how many values to add after the
  207. * contents of tensor in that dimension . \n
  208. *@par Outputs:
  209. *y: A Tensor of the same type as "x" . \n
  210. *@par Third-party framework compatibility:
  211. * Compatible with TensorFlow operator PadV2.
  212. */
  213. REG_OP(PadV2D)
  214. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  215. .INPUT(constant_values, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  216. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  217. .REQUIRED_ATTR(paddings, ListListInt)
  218. .OP_END_FACTORY_REG(PadV2D)
  219. /**
  220. *@brief Pads a tensor.
  221. *@par Inputs:
  222. *Two inputs, including:
  223. * @li x: A Tensor. Must be one of the following types: float16, float32, double, int32,
  224. * uint8, int16, int8, complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16,
  225. * complex128, uint32, uint64.
  226. * @li paddings: A Tensor of type int32 or int64.
  227. * @li constant_values: A optional Tensor of int32 or int64
  228. *@par Attributes:
  229. * @li mode: An optional string, Defaults to "constant", indicates paddings mode,
  230. * support "constant", "reflect", "edge"
  231. * @li paddings_contiguous: An optional bool value, Defaults to true.
  232. * If true, paddings is arranged as [[begin0, end0], [begin1, end1], ...]
  233. * If false, paddings is arranged as [[begin0, begin1], ..., [end0, end1], ...]
  234. *@par Outputs:
  235. *y: A Tensor of the same type as "x".
  236. *@par Third-party framework compatibility:
  237. * Compatible with ONNX operator Pad.
  238. */
  239. REG_OP(PadV3)
  240. .INPUT(x, TensorType::BasicType())
  241. .INPUT(paddings, TensorType::IndexNumberType())
  242. .OPTIONAL_INPUT(constant_values, TensorType::BasicType())
  243. .OUTPUT(y, TensorType::BasicType())
  244. .ATTR(mode, String, "constant")
  245. .ATTR(paddings_contiguous, Bool, true)
  246. .OP_END_FACTORY_REG(PadV3)
  247. /**
  248. *@brief Cal the grad of Pads.
  249. *@par Inputs:
  250. *Two inputs, including:
  251. * @li x: A Tensor. Must be one of the following types: float16, float32, double, int32,
  252. * uint8, int16, int8, complex64, int64, qint8, quint8, qint32, qint16, quint16, uint16,
  253. * complex128, uint32, uint64.
  254. * @li paddings: A Tensor of type int32 or int64.
  255. *@par Attributes:
  256. * @li mode: An optional string, Defaults to "reflect", indicates paddings mode,
  257. * support "reflect", "edge"
  258. * @li paddings_contiguous: An optional bool value, Defaults to true.
  259. * If true, paddings is arranged as [[begin0, end0], [begin1, end1], ...]
  260. * If false, paddings is arranged as [[begin0, begin1], ..., [end0, end1], ...]
  261. *@par Outputs:
  262. *y: A Tensor of the same type as "x".
  263. *@par Third-party framework compatibility:
  264. * Compatible with ONNX operator PadGrad.
  265. */
  266. REG_OP(PadV3Grad)
  267. .INPUT(x, TensorType::BasicType())
  268. .INPUT(paddings, TensorType::IndexNumberType())
  269. .OUTPUT(y, TensorType::BasicType())
  270. .ATTR(mode, String, "reflect")
  271. .ATTR(paddings_contiguous, Bool, true)
  272. .OP_END_FACTORY_REG(PadV3Grad)
  273. /**
  274. *@brief Pads a tensor.
  275. *@par Inputs:
  276. *x: A Tensor. Must be one of the following types: float16, float32, int8, uint8, int32.
  277. *@par Attributes:
  278. * @li paddings: An required "vector<vector<int>>".
  279. * For each dimension D of input, paddings[D, 0] indicates how many
  280. * values to add before the contents of tensor in that dimension,
  281. * and paddings[D, 1] indicates how many values to add after the
  282. * contents of tensor in that dimension.
  283. * @li constant_values: An optional int value for pad.
  284. * @li mode: An optional string, Defaults to "constant", indicates paddings mode,
  285. * support "constant", "reflect", "edge"
  286. * @li paddings_contiguous: An optional bool value, Defaults to true.
  287. * If true, paddings is arranged as [[begin0, end0], [begin1, end1], ...]
  288. * If false, paddings is arranged as [[begin0, begin1], ..., [end0, end1], ...]
  289. *@par Outputs:
  290. *y: A Tensor of the same type as "x".
  291. *@par Third-party framework compatibility:
  292. * Compatible with ONNX operator Pad.
  293. * @par Restrictions:
  294. * Warning: THIS FUNCTION IS DEPRECATED. Please use PadV3 instead.
  295. */
  296. REG_OP(PadV3D)
  297. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8}))
  298. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8, DT_UINT8}))
  299. .REQUIRED_ATTR(paddings, ListListInt)
  300. .ATTR(constant_values, Int, 0)
  301. .ATTR(mode, String, "constant")
  302. .ATTR(paddings_contiguous, Bool, true)
  303. .OP_END_FACTORY_REG(PadV3D)
  304. /**
  305. *@brief Create a diagonal tensor
  306. *@par Inputs:
  307. *Two inputs, including:
  308. * @li x: A mutable Tensor. Must be one of the following types:
  309. * float16, float32, int32 . \n
  310. * @li assist: A mutable Tensor with rank k is at most 1,
  311. * Has the same type as "x" . \n
  312. *@par Outputs:
  313. *y: A mutable Tensor. Has the same type as "x" . \n
  314. *@see Diag()
  315. *@par Third-party framework compatibility
  316. * Compatible with the TensorFlow operator Diag.
  317. *
  318. * @par Restrictions:
  319. * Warning: THIS FUNCTION IS DEPRECATED. Please use Diag instead.
  320. */
  321. REG_OP(DiagD)
  322. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  323. .INPUT(assist, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  324. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT32}))
  325. .OP_END_FACTORY_REG(DiagD)
  326. /**
  327. *@brief Create a diagonal tensor
  328. *@par Inputs:
  329. *One input, include:
  330. * x: A mutable Tensor with rank k, where k is at most 1. Must be one of the
  331. * following types:
  332. * float16, float32, double, int32, int64, complex64, complex128 . \n
  333. *@par Outputs:
  334. *y: A mutable Tensor. Has the same type as "x" . \n
  335. *@see DiagD()
  336. *@par Third-party framework compatibility
  337. * Compatible with the TensorFlow operator Diag.
  338. */
  339. REG_OP(Diag)
  340. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32,
  341. DT_INT64, DT_COMPLEX64, DT_COMPLEX128}))
  342. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32,
  343. DT_INT64, DT_COMPLEX64, DT_COMPLEX128}))
  344. .OP_END_FACTORY_REG(Diag)
  345. /**
  346. *@brief Ascend Padding, pad the last dimension of input
  347. *@par Inputs:
  348. *One input, include:
  349. *x: Tensor which last dimension must be 1. For example: [624000, 1] . \n
  350. *@par Outputs:
  351. *y: Padding the last dimension of x to padDimSize, [624000, padDimSize] . \n
  352. *@par Third-party framework compatibility
  353. * Compatible with the TensorFlow operator Diag.
  354. */
  355. REG_OP(AscendPadding)
  356. .INPUT(x, TensorType::BasicType())
  357. .OUTPUT(y, TensorType::BasicType())
  358. .ATTR(pad_dim_size, Int, 8)
  359. .OP_END_FACTORY_REG(AscendPadding)
  360. /**
  361. *@brief EmbeddingRankId, traverse the index calculation server and its position in the server . \n
  362. *@par Restrictions:
  363. *Warning:THIS FUNCTION IS DEPRECATED. Please do not use. \n
  364. *@par Inputs:
  365. *One input, include:
  366. *addr_table: Tensor which last dimension must be 3. For example: [8, 3].
  367. *index: Tensor For example: [640000].
  368. *@par Outputs:
  369. *rank_id: Tensor the first dimension of index to Size, [size, 3].
  370. Tensor which last dimension must be 3.For example: [640000, 3]
  371. *@par Third-party framework compatibility
  372. * Compatible with the TensorFlow operator Diag.
  373. */
  374. REG_OP(EmbeddingRankId)
  375. .INPUT(addr_table, TensorType({DT_UINT64}))
  376. .INPUT(index, TensorType({DT_INT64,DT_INT32,DT_UINT64}))
  377. .OUTPUT(rank_id, TensorType({DT_UINT64}))
  378. .ATTR(row_memory, Int, 320)
  379. .ATTR(mode, String, "mod")
  380. .OP_END_FACTORY_REG(EmbeddingRankId)
  381. /**
  382. *@brief EmbeddingLocalIndex, Sort statistics index according to rank_id \n
  383. *@par Inputs:
  384. * @li addr_table: A 2D tensor which last dimension must be 3.
  385. * @li index: A tensor with data type int32, int64, uint32, uint64.
  386. *@par Attributes:
  387. * @li row_memory: The size of Embedding vector in a row, the default is 320.
  388. * @li mode: String type, currently there are two options: 'mod' and 'order'
  389. *@par Outputs:
  390. * @li local_idx:Index on each server.
  391. * @li nums:The number of local_idx found on each server.
  392. * @li recover_idx:The sorted local_idx element is at the position corresponding
  393. * to the original input index.
  394. *@par Third-party framework compatibility
  395. * Compatible with the TensorFlow operator Diag.
  396. */
  397. REG_OP(EmbeddingLocalIndex)
  398. .INPUT(addr_table, TensorType({DT_UINT64}))
  399. .INPUT(index, TensorType({DT_INT64,DT_INT32,DT_UINT32,DT_UINT64}))
  400. .OUTPUT(local_idx, TensorType({DT_INT64,DT_INT32,DT_UINT32,DT_UINT64}))
  401. .OUTPUT(nums, TensorType({DT_INT64,DT_INT32,DT_UINT32,DT_UINT64}))
  402. .OUTPUT(recover_idx, TensorType({DT_INT64,DT_INT32,DT_UINT32,DT_UINT64}))
  403. .ATTR(row_memory, Int, 320)
  404. .ATTR(mode, String, "mod")
  405. .OP_END_FACTORY_REG(EmbeddingLocalIndex)
  406. /**
  407. * @brief Fill the value to a tensor has the specified shape.
  408. * @par Inputs:
  409. * One inputs, including:
  410. * @li dims: An Tensor, specify the shape that the value to fill.
  411. * @par Attributes:
  412. * @li value: An optional float value. Defaults to 0.0.
  413. * @par Outputs:
  414. * @li y: A Tensor. Has the shape specify by attr shape, and full of the value specify by attr value.
  415. * @par Third-party framework compatibility
  416. * Compatible with the ONNX operator ConstantOfShape.
  417. */
  418. REG_OP(FillV2)
  419. .INPUT(dims, TensorType({DT_INT16, DT_INT32, DT_INT64}))
  420. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_INT16, DT_INT32, DT_INT64}))
  421. .ATTR(value, Float, 0)
  422. .OP_END_FACTORY_REG(FillV2)
  423. /**
  424. * @brief Fill the value to a tensor has the specified shape.
  425. * @par Attributes:
  426. * @li value: An optional float value. Defaults to 0.0.
  427. * @li dims: An required listInt to specify the shape that the value to fill.
  428. * @par Outputs:
  429. * y: A Tensor. Has the shape specify by attr shape, and full of the value specify by attr value.
  430. * @par Third-party framework compatibility
  431. * Compatible with the ONNX operator ConstantOfShape.
  432. */
  433. REG_OP(FillV2D)
  434. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT8, DT_UINT8, DT_INT16, DT_INT32, DT_INT64}))
  435. .ATTR(value, Float, 0)
  436. .REQUIRED_ATTR(dims, ListInt)
  437. .OP_END_FACTORY_REG(FillV2D)
  438. } // namespace ge
  439. #endif // OPS_BUILT_IN_OP_PROTO_INC_PAD_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示