You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

nn_norm_ops.h 36 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_OP_NN_NORM_OPS_H
  17. #define GE_OP_NN_NORM_OPS_H
  18. #include "graph/operator_reg.h"
  19. namespace ge {
  20. /**
  21. *@brief Computes the gradient for log softmax activations.
  22. *@par Inputs:
  23. *@li grad: A Tensor. Must be one of the following types: float16, float32.
  24. *@li x: A Tensor. Must be one of the following types: float16, float32.
  25. *@par Attributes:
  26. * axis: An optional list of ints. Defaults to "{-1}".
  27. *@par Outputs:
  28. * y: A Tensor. Has the same type as "grad".
  29. *@par Third-party framework compatibility
  30. *Compatible with the TensorFlow operator LogSoftmaxGrad.
  31. */
  32. REG_OP(LogSoftmaxGrad)
  33. .INPUT(grad, TensorType({DT_FLOAT16, DT_FLOAT}))
  34. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  35. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  36. .ATTR(axis, ListInt, {-1})
  37. .OP_END_FACTORY_REG(LogSoftmaxGrad)
  38. REG_OP(SparseSoftmaxCrossEntropyWithLogitsCCE)
  39. .INPUT(features, TensorType{DT_FLOAT})
  40. .INPUT(labels, TensorType{DT_FLOAT})
  41. .OUTPUT(out, TensorType{DT_FLOAT})
  42. .OUTPUT(non, TensorType{DT_FLOAT})
  43. .ATTR(cross_entropy_is_grad, Bool, 0)
  44. .ATTR(cross_entropy_mode, Int, 1)
  45. .ATTR(softmax_cross_entropy_lossscale_div_batch, Float, 1.0)
  46. .OP_END_FACTORY_REG(SparseSoftmaxCrossEntropyWithLogitsCCE)
  47. /**
  48. *@brief Computes sparse softmax cross entropy cost and gradients to backpropagate.
  49. *@par Inputs:
  50. *Two inputs, including:
  51. * @li features: A Tensor. Must be one of the following types: half, float32, double.
  52. * A "batch_size * num_classes" matrix.
  53. * @li labels: A Tensor of the same type as "features". batch_size vector with values in [0, num_classes).
  54. *@par Outputs:
  55. *loss: A Tensor for per example loss (a "batch_size" vector). Has the same type as "features".
  56. *backprop: A Tensor for the backpropagated gradients (a batch_size * num_classes matrix). Has the same type as "features".
  57. *@par Third-party framework compatibility
  58. *Compatible with the TensorFlow operator SparseSoftmaxCrossEntropyWithLogits.
  59. */
  60. REG_OP(SparseSoftmaxCrossEntropyWithLogits)
  61. .INPUT(features, TensorType({DT_FLOAT16,DT_FLOAT}))
  62. .INPUT(labels, TensorType({DT_INT32, DT_INT64}))
  63. .OUTPUT(loss, TensorType({DT_FLOAT16,DT_FLOAT}))
  64. .OUTPUT(backprop, TensorType({DT_FLOAT16,DT_FLOAT}))
  65. .OP_END_FACTORY_REG(SparseSoftmaxCrossEntropyWithLogits)
  66. /**
  67. *@brief Computes softmax cross entropy cost and gradients to backpropagate.
  68. *@par Inputs:
  69. *Two inputs, including:
  70. * @li features: A Tensor. Must be one of the following types: half, float32, double.
  71. * A "batch_size * num_classes" matrix.
  72. * @li labels: A Tensor of the same type as "features". A "batch_size * num_classes" matrix.
  73. *@par Outputs:
  74. *loss: A Tensor for per example loss (a "batch_size" vector). Has the same type as "features".
  75. *backprop: A Tensor for the backpropagated gradients (a batch_size * num_classes matrix). Has the same type as "features".
  76. *@par Third-party framework compatibility
  77. *Compatible with the TensorFlow operator SoftmaxCrossEntropyWithLogits.
  78. */
  79. REG_OP(SoftmaxCrossEntropyWithLogits)
  80. .INPUT(features, TensorType({DT_DOUBLE,DT_FLOAT16,DT_FLOAT}))
  81. .INPUT(labels, TensorType({DT_DOUBLE,DT_FLOAT16,DT_FLOAT}))
  82. .OUTPUT(loss, TensorType({DT_DOUBLE,DT_FLOAT16,DT_FLOAT}))
  83. .OUTPUT(backprop, TensorType({DT_DOUBLE,DT_FLOAT16,DT_FLOAT}))
  84. .OP_END_FACTORY_REG(SoftmaxCrossEntropyWithLogits)
  85. /**
  86. *@brief Computes gradients for a softmax operation.
  87. *@par Inputs:
  88. * Two inputs, including:
  89. * @li softmax: Output of the softmax operator. Must be one of the following
  90. * types: float16, float31, int32, int8, uint8. The format is NC1HWC0 or DN.
  91. * @li grad_softmax: A Tensor. Has the same shape and type as "softmax".
  92. * The format is NC1HWC0 or DN.
  93. *@par Outputs:
  94. *grad_x: A Tensor. Has the same shape and type as "softmax".
  95. *@par Third-party framework compatibility
  96. * Compatible with TensorFlow operator SoftmaxGrad.
  97. */
  98. REG_OP(SoftmaxGrad)
  99. .INPUT(softmax, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  100. .INPUT(grad_softmax, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  101. .OUTPUT(grad_x, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  102. .OP_END_FACTORY_REG(SoftmaxGrad)
  103. /**
  104. *@brief Computes the sigmoid cross entropy loss of "predict" and "target".
  105. *@par Inputs:
  106. * Two inputs, including: \n
  107. *@li predict: A multi-dimensional Tensor of type float16 or float32, specifying the predictive value.
  108. *@li target: A multi-dimensional Tensor of type float16 or float32, specifying the target value.
  109. *@par Outputs:
  110. *loss: Sigmoid cross entropy between the predictive value and target value. Has the same dimensions as "predict".
  111. *@par Third-party framework compatibility
  112. * Compatible with the scenario where "reduction" is set to "none"of PyTorch operator SigmoidCrossEntropyWithLogitsGrad.
  113. */
  114. REG_OP(SigmoidCrossEntropyWithLogitsGrad)
  115. .INPUT(predict, TensorType({DT_FLOAT16, DT_FLOAT}))
  116. .INPUT(target, TensorType({DT_FLOAT16, DT_FLOAT}))
  117. .INPUT(dout, TensorType({DT_FLOAT16, DT_FLOAT}))
  118. .OUTPUT(gradient, TensorType({DT_FLOAT16, DT_FLOAT}))
  119. .OP_END_FACTORY_REG(SigmoidCrossEntropyWithLogitsGrad)
  120. /**
  121. *@brief Performs the backpropagation of SigmoidCrossEntropyWithLogits for training scenarios.
  122. *@par Inputs:
  123. * Three inputs, including: \n
  124. *@li predict: A multi-dimensional Tensor of type float16 or float32, specifying the predictive value.
  125. *@li target: A multi-dimensional Tensor of type float16 or float32, specifying the target value.
  126. *@li dout: A multi-dimensional Tensor of float16 or float32, specifying the gradient transferred from the upper layer.
  127. *@par Outputs:
  128. *gradient: Return gradient. Has the same dimensions and type as "predict".
  129. *@par Third-party framework compatibility
  130. * Compatible with the scenario where "reduction" is set to "none"of PyTorch operator SigmoidCrossEntropyWithLogits.
  131. */
  132. REG_OP(SigmoidCrossEntropyWithLogits)
  133. .INPUT(predict, TensorType({DT_FLOAT16, DT_FLOAT}))
  134. .INPUT(target, TensorType({DT_FLOAT16, DT_FLOAT}))
  135. .OUTPUT(loss, TensorType({DT_FLOAT16, DT_FLOAT}))
  136. .OP_END_FACTORY_REG(SigmoidCrossEntropyWithLogits)
  137. /**
  138. *@brief Computes the regression box of the RPN. It is a FasterRCNN operator.
  139. *@par Inputs:
  140. * Two inputs, including: \n
  141. *@li predict: A multi-dimensional Tensor of type float16 or float32, specifying the predictive value.
  142. *@li label: A multi-dimensional Tensor of type float16 or float32, specifying the target value.
  143. *@par Attributes:
  144. * sigma: Must be a floating point number. Defaults to "1.0".
  145. *@par Outputs:
  146. *loss: Indicates the loss between the predictive value and target value. Has the same dimensions as "predict".
  147. *@attention Constraints:
  148. * This operator does not perform the "reduce" operation on the loss value. Call other reduce operators to perform "reduce" operation on the loss if required.
  149. *@par Third-party framework compatibility
  150. * Compatible with the scenario where "reduction" is set to "none"of PyTorch operator SmoothL1Loss.
  151. */
  152. REG_OP(SmoothL1Loss)
  153. .INPUT(predict, TensorType({DT_FLOAT16, DT_FLOAT}))
  154. .INPUT(label, TensorType({DT_FLOAT16, DT_FLOAT}))
  155. .OUTPUT(loss, TensorType({DT_FLOAT16, DT_FLOAT}))
  156. .ATTR(sigma, Float, 1.0)
  157. .OP_END_FACTORY_REG(SmoothL1Loss)
  158. /**
  159. *@brief Performs the backpropagation of SmoothL1Loss for training scenarios.
  160. *@par Inputs:
  161. * Three inputs, including: \n
  162. *@li predict: A multi-dimensional Tensor of type float16 or float32, specifying the predictive value.
  163. *@li label: A multi-dimensional Tensor of float16 or float32, specifying the target value.
  164. *@li dout: A multi-dimensional Tensor of float16 or float32, specifying the gradient transferred from the upper layer.
  165. *@par Attributes:
  166. * sigma: Must be a floating point number. Defaults to "1.0".
  167. *@par Outputs:
  168. *gradient: Return gradient. Has the same dimensions and type as "predict".
  169. *@par Third-party framework compatibility
  170. * Compatible with the scenario where "reduction" is set to "none"of PyTorch operator SmoothL1LossGrad.
  171. */
  172. REG_OP(SmoothL1LossGrad)
  173. .INPUT(predict, TensorType({DT_FLOAT16, DT_FLOAT}))
  174. .INPUT(label, TensorType({DT_FLOAT16, DT_FLOAT}))
  175. .INPUT(dout, TensorType({DT_FLOAT16, DT_FLOAT}))
  176. .OUTPUT(gradient, TensorType({DT_FLOAT16, DT_FLOAT}))
  177. .ATTR(sigma, Float, 1.0)
  178. .OP_END_FACTORY_REG(SmoothL1LossGrad)
  179. /**
  180. *@brief Creates a criterion that measures the Binary Cross Entropy between the target and the output.
  181. *@par Inputs:
  182. * Three inputs, including: \n
  183. *@li x: A 1D or 2D Tensor of type float16 or float32, specifying a predictive value.
  184. *@li y: A 1D or 2D Tensor of type float16 or float32, indicating a tag.
  185. *@li weight: An optional 1D or 2D Tensor, specifying the weight.
  186. *@par Attributes:
  187. *reduction: A character string from "none", "mean", and "sum", specifying the reduction type to be applied to the output. Defaults to "mean".
  188. *@par Outputs:
  189. *output: Output loss. Has the same dimension with the inputs. When "reduction" is set to "none", a Tensor with the same size as "x" is output. Otherwise, a Scalar is output.
  190. *@attention Constraints:
  191. *@li The value of "x" must range from 0 to 1.
  192. *@li The value of "y" must be "0" or "1".
  193. *@par Third-party framework compatibility
  194. * Compatible with PyTorch operator BCELoss.
  195. */
  196. REG_OP(BinaryCrossEntropy)
  197. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16}))
  198. .INPUT(y, TensorType({DT_FLOAT, DT_FLOAT16}))
  199. .OPTIONAL_INPUT(weight, TensorType({DT_FLOAT, DT_FLOAT16}))
  200. .OUTPUT(output, TensorType({DT_FLOAT, DT_FLOAT16}))
  201. .ATTR(reduction, String, "mean")
  202. .OP_END_FACTORY_REG(BinaryCrossEntropy)
  203. /**
  204. *@brief Performs the backpropagation of BinaryCrossEntropy for training scenarios.
  205. *@par Inputs:
  206. * Four inputs, including: \n
  207. *@li x: A 1D or 2D Tensor of type float16 or float32, specifying a predictive value.
  208. *@li y: A 1D or 2D Tensor of type float16 or float32, indicating a tag.
  209. *@li grad_output: A 1D or 2D Tensor of type float16 or float32, specifying the backpropagation gradient.
  210. *@li weight: An optional 1D or 2D Tensor, specifying the weight.
  211. *@par Attributes: \n
  212. *reduction: A character string from "none", "mean", and "sum", specifying the gradient output mode. Defaults to "mean".
  213. *@par Outputs: \n
  214. *output: A 1D or 2D Tensor. When "reduction" is set to "none", a Tensor with the same size as "x" is output. Otherwise, a Scalar is output.
  215. *@attention Constraints:
  216. *@li The value of "x" must range from 0 to 1.
  217. *@li The value of "y" must be "0" or "1".
  218. *@par Third-party framework compatibility
  219. * Compatible with PyTorch operator BCELossGrad.
  220. */
  221. REG_OP(BinaryCrossEntropyGrad)
  222. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16}))
  223. .INPUT(y, TensorType({DT_FLOAT, DT_FLOAT16}))
  224. .INPUT(grad_output, TensorType({DT_FLOAT, DT_FLOAT16}))
  225. .OPTIONAL_INPUT(weight, TensorType({DT_FLOAT, DT_FLOAT16}))
  226. .OUTPUT(output, TensorType({DT_FLOAT, DT_FLOAT16}))
  227. .ATTR(reduction, String, "mean")
  228. .OP_END_FACTORY_REG(BinaryCrossEntropyGrad)
  229. /**
  230. *@brief Applies the Softmax function to an n-dimensional input Tensor
  231. * rescaling them. so that the elements of the n-dimensional output Tensor lie
  232. * in the range [0,1] and sum to 1.
  233. *@par Inputs:
  234. *One input:
  235. *x: A mutable Tensor. Must be one of the following types: float16, float32,
  236. * double. Should be a Variable Tensor.
  237. *@par Attributes:
  238. *axes: A list of ints. The dimension softmax would be performed on. Defaults
  239. * to "{-1}".
  240. *@par Outputs:
  241. *y: A Tensor. Has the same dimensionality and shape as the "x" with values in
  242. * the range [0, 1]. Must be one of the following types: float16, float32,
  243. * double.
  244. *@par Third-party framework compatibility
  245. * Compatible with the TensorFlow operator Softmax.
  246. */
  247. REG_OP(SoftmaxV2)
  248. .INPUT(x, TensorType({DT_DOUBLE, DT_FLOAT16, DT_FLOAT}))
  249. .OUTPUT(y, TensorType({DT_DOUBLE, DT_FLOAT16, DT_FLOAT}))
  250. .ATTR(axes, ListInt, {-1})
  251. .OP_END_FACTORY_REG(SoftmaxV2)
  252. /**
  253. *@brief Computes log softmax activations.
  254. *@par Inputs:
  255. *One input:
  256. * logits: A Tensor. Must be one of the following types: double, float16, float32.
  257. *@par Attributes:
  258. * axes: An optional list of ints. Defaults to "{-1}".
  259. *@par Outputs:
  260. * logsoftmax: A Tensor. Has the same type as "logits".
  261. *@par Third-party framework compatibility
  262. *Compatible with the TensorFlow operator LogSoftmax.
  263. */
  264. REG_OP(LogSoftmaxV2)
  265. .INPUT(logits, TensorType({DT_DOUBLE, DT_FLOAT16, DT_FLOAT}))
  266. .OUTPUT(logsoftmax, TensorType({DT_DOUBLE, DT_FLOAT16, DT_FLOAT}))
  267. .ATTR(axes, ListInt, {-1})
  268. .OP_END_FACTORY_REG(LogSoftmaxV2)
  269. REG_OP(FusedBatchNormV2)
  270. .INPUT(x, TensorType{DT_FLOAT}) /* Input data tensor from the previous operator"" */
  271. .INPUT(scale, TensorType{DT_FLOAT}) /* If spatial is true, the dimension of bias is (C) If spatial is false, the dimensions of scale are (C x D1 x ... x Dn)*/
  272. .INPUT(b, TensorType{DT_FLOAT}) /* If spatial is true, the dimension of bias is (C) If spatial is false, the dimensions of scale are (C x D1 x ... x Dn)*/
  273. .OPTIONAL_INPUT(mean, TensorType{DT_FLOAT}) /* If spatial is true, the dimension of the running mean (training) or the estimated mean (testing) is (C).If spatial is false, the dimensions of the running mean (training) or the estimated mean (testing) are (C x D1 x ... x Dn)*/
  274. .OPTIONAL_INPUT(variance, TensorType{DT_FLOAT}) /* If spatial is true, the dimension of the running variance(training) or the estimated variance (testing) is (C). If spatial is false, the dimensions of the running variance(training) or the estimated variance (testing) are (C x D1 x ... x Dn).*/
  275. .OUTPUT(y, TensorType{DT_FLOAT}) /* The output tensor of the same shape as X */
  276. .ATTR(momentum, Float, 0.9) // Factor used in computing the running mean and variance.
  277. .ATTR(epsilon, Float, 1e-5f) // The epsilon value to use to avoid division by zero
  278. .ATTR(mode, Int, 1) // 1 means using "CC_BATCHNORM_SPATIAL"; 0 means using "CC_BATCHNORM_PER_ACTIVATION"; only support 1 now
  279. .ATTR(use_global_stats, Bool, true)
  280. .ATTR(alpha, Float, 1)
  281. .ATTR(beta, Float, 0)
  282. .OP_END_FACTORY_REG(FusedBatchNormV2)
  283. /**
  284. *@brief Confuse mul, sum and sub.
  285. *@par Inputs:
  286. *Two inputs, including:
  287. * @li grad: A Tensor. Must be one of the following types: float16, float32.
  288. * @li x: A Tensor. Must be one of the following types: float16, float32.
  289. *@par Outputs:
  290. * y: A Tensor of the same type as "grad".
  291. */
  292. REG_OP(ConfusionSoftmaxGrad)
  293. .INPUT(grad, TensorType({DT_FLOAT16,DT_FLOAT}))
  294. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  295. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT}))
  296. .OP_END_FACTORY_REG(ConfusionSoftmaxGrad)
  297. REG_OP(SoftmaxGradExt)
  298. .INPUT(grad, TensorType({DT_FLOAT16,DT_FLOAT}))
  299. .INPUT(x1, TensorType({DT_FLOAT16,DT_FLOAT}))
  300. .INPUT(x2, TensorType({DT_FLOAT16,DT_FLOAT}))
  301. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT}))
  302. .ATTR(axes, Int, 1)
  303. .ATTR(keep_dims, Bool, false)
  304. .OP_END_FACTORY_REG(SoftmaxGradExt)
  305. /**
  306. *@brief Normalizes the input.
  307. *@par Inputs:
  308. * One input:
  309. *x: An NCHW tensor of type float16 or float32.
  310. *@par Attributes:
  311. *@li normalize_variance: An optional bool specifying whether to normalize the variance, either "true" (default) or "false"
  312. * the value "false" indicates only to subtract the mean.
  313. *@li across_channels: An optional bool specifying whether to perform across-channel MVN, either "true" or "false" (default)
  314. * The value "true" indicates "CHW" is treated as a vector.
  315. *@li eps: An optional float32 epsilon for not dividing by zero. Defaults to "1e-9".
  316. *@par Outputs:
  317. *y: An NCHW tensor of type float16 or float32.
  318. *@attention Constraints:\n
  319. * The input tensor must have the NCHW format, whose shape length must be 4.
  320. *@par Third-party framework compatibility
  321. * Compatible with the Caffe operator MVN.
  322. */
  323. REG_OP(MVN)
  324. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) /* "First operand." */
  325. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) /* "Result, has same element type as inputs" */
  326. .ATTR(normalize_variance, Bool, true)
  327. .ATTR(across_channels, Bool, false)
  328. .ATTR(eps, Float, 1e-9)
  329. .OP_END_FACTORY_REG(MVN)
  330. /**
  331. *@brief Normalizes the input "x1".
  332. *@par Inputs:
  333. * Two inputs, including:
  334. *@li x1: A required NCHW or NHWC tensor of type float32, float16, or int8.
  335. *@li x2: A required ND tensor of type float32, float16, or int8, specifying
  336. * the scaling factor. If "channel_shared" is "true", "x2" is a [1]-dimensional
  337. * vector. If "channel_shared" is "false", "x2" is a [C]-dimensional vector.
  338. *@par Attributes:
  339. *@li across_spatial: An optional bool, specifying the dimension of input "x1"
  340. * to be summed. The value "true" (default) indicates dimensions C, H, W, and
  341. * the value "false" indicates dimension C.
  342. *@li channel_shared: An optional bool, specifying the dimension count of input
  343. * "x2". The value "true" (default) indicates 1, and the value "false" indicates
  344. * dimension C of "x1".
  345. *@li eps: An optional float32, specifying the bias when "across_spatial" is
  346. * "true". Defaults to "1e-10".
  347. *@par Outputs:
  348. *y: A Tensor. Has the same type and format as "x1".
  349. *@par Third-party framework compatibility
  350. * Compatible with the Caffe operator Normalize.
  351. */
  352. REG_OP(Normalize)
  353. .INPUT(x1, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8}))
  354. .INPUT(x2, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8}))
  355. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_INT8}))
  356. .ATTR(across_spatial, Bool, true)
  357. .ATTR(channel_shared, Bool, true)
  358. .ATTR(eps, Float, 1e-10)
  359. .OP_END_FACTORY_REG(Normalize);
  360. /**
  361. *@brief Layernorm operator interface implementation
  362. * calculating: x, gamma, beta
  363. * mean = np.mean(x, reduce_axis, keepdims=True)
  364. * variance = np.mean(np.power((x - mean),2), reduce_axis, keepdims=True)
  365. * y = gamma*((x - mean) / np.sqrt(variance + 0.001)) + beta
  366. *@par Inputs:
  367. *Three inputs, including:
  368. * @li x: A Tensor. Must be one of the following types: float16, float32.
  369. * @li gamma: A Tensor. Must be one of the following types: float16, float32.
  370. * @li beta: A Tensor. Must be one of the following types: float16, float32.
  371. *@par Attributes:
  372. * @li begin_norm_axis: A optional attribute, the type is int32. Defaults to 0.
  373. * @li begin_params_axis: A optional attribute, the type is int32. Defaults to 0.
  374. * @li epsilon: A optional attribute, the type is float32. Defaults to 1e-7.
  375. *@par Outputs:
  376. *Three outputs, including:
  377. * @li y: A Tensor. Must be one of the following types: float16, float32.
  378. * @li mean: A Tensor. Must be one of the following types: float16, float32.
  379. * @li variance: A Tensor. Must be one of the following types: float16, float32.
  380. */
  381. REG_OP(LayerNorm)
  382. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16}))
  383. .INPUT(gamma, TensorType({DT_FLOAT, DT_FLOAT16}))
  384. .INPUT(beta, TensorType({DT_FLOAT, DT_FLOAT16}))
  385. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16}))
  386. .OUTPUT(mean, TensorType({DT_FLOAT, DT_FLOAT16}))
  387. .OUTPUT(variance, TensorType({DT_FLOAT, DT_FLOAT16}))
  388. .ATTR(begin_norm_axis, Int, 0)
  389. .ATTR(begin_params_axis, Int, 0)
  390. .ATTR(epsilon, Float, 0.0000001)
  391. .OP_END_FACTORY_REG(LayerNorm)
  392. /**
  393. *@brief LayerNormGrad operator interface implementation
  394. * calculating: dy, x, variance, mean, gamma
  395. * pd_xl = data_dy*data_gamma
  396. * pd_var = np.sum(((-0.5)*pd_xl*(data_x - data_mean)
  397. * np.power((data_variance + EPSLON), (-1.5))),
  398. * reduce_axis, keepdims=True)
  399. * pd_mean = np.sum(((-1.0)*pd_xl
  400. * np.power((data_variance + EPSLON), (-0.5))),
  401. * reduce_axis, keepdims=True)
  402. * + pd_var*(1.0/m)
  403. * np.sum(((-2.0)*(data_x - data_mean)), reduce_axis, keepdims=True)
  404. * pd_x = pd_xl*np.power((data_variance + EPSLON), (-0.5)) +
  405. * pd_var*(2.0/m)*(data_x - data_mean) + pd_mean*(1.0/m)
  406. * pd_gamma = np.sum((data_dy*(data_x - data_mean)
  407. * np.power((data_variance + EPSLON), (-0.5))), param_axis, keepdims=True)
  408. * pd_beta = np.sum(data_dy, param_axis, keepdims=True)
  409. *@par Inputs:
  410. *Five inputs, including:
  411. * @li dy: A Tensor. Must be one of the following types: float16, float32.
  412. * @li x: A Tensor. Must be one of the following types: float16, float32.
  413. * @li variance: A Tensor. Must be one of the following types: float16, float32.
  414. * @li mean: A Tensor. Must be one of the following types: float16, float32.
  415. * @li gamma: A Tensor. Must be one of the following types: float16, float32.
  416. *@par Outputs:
  417. *Three outputs, including:
  418. * @li pd_x: A Tensor. Must be one of the following types: float16, float32.
  419. * @li pd_gamma: A Tensor. Must be one of the following types: float16, float32.
  420. * @li pd_beta: A Tensor. Must be one of the following types: float16, float32.
  421. */
  422. REG_OP(LayerNormGrad)
  423. .INPUT(dy, TensorType({DT_FLOAT, DT_FLOAT16}))
  424. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16}))
  425. .INPUT(variance, TensorType({DT_FLOAT, DT_FLOAT16}))
  426. .INPUT(mean, TensorType({DT_FLOAT, DT_FLOAT16}))
  427. .INPUT(gamma, TensorType({DT_FLOAT, DT_FLOAT16}))
  428. .OUTPUT(pd_x, TensorType({DT_FLOAT, DT_FLOAT16}))
  429. .OUTPUT(pd_gamma, TensorType({DT_FLOAT, DT_FLOAT16}))
  430. .OUTPUT(pd_beta, TensorType({DT_FLOAT, DT_FLOAT16}))
  431. .OP_END_FACTORY_REG(LayerNormGrad)
  432. /**
  433. *@brief LayerNormXBackprop operator interface implementation
  434. * calculating: dy, x, variance, mean, gamma
  435. * pd_xl = data_dy*data_gamma
  436. * pd_var = np.sum(((-0.5)*pd_xl*(data_x - data_mean)
  437. * np.power((data_variance + EPSLON), (-1.5))),
  438. * reduce_axis, keepdims=True)
  439. * pd_mean = np.sum(((-1.0)*pd_xl
  440. * np.power((data_variance + EPSLON), (-0.5))),
  441. * reduce_axis, keepdims=True)
  442. * + pd_var*(1.0/m)
  443. * np.sum(((-2.0)*(data_x - data_mean)), reduce_axis, keepdims=True)
  444. * pd_x = pd_xl*np.power((data_variance + EPSLON), (-0.5)) +
  445. * pd_var*(2.0/m)*(data_x - data_mean) + pd_mean*(1.0/m)
  446. * pd_gamma = np.sum((data_dy*(data_x - data_mean)
  447. * np.power((data_variance + EPSLON), (-0.5))), param_axis, keepdims=True)
  448. * pd_beta = np.sum(data_dy, param_axis, keepdims=True)
  449. *@par Inputs:
  450. *Five inputs, including:
  451. * @li dy: A Tensor. Must be one of the following types: float16, float32.
  452. * @li x: A Tensor. Must be one of the following types: float16, float32.
  453. * @li variance: A Tensor. Must be one of the following types: float16, float32.
  454. * @li mean: A Tensor. Must be one of the following types: float16, float32.
  455. * @li gamma: A Tensor. Must be one of the following types: float16, float32.
  456. *@par Outputs:
  457. *Three outputs, including:
  458. * @li pd_x: A Tensor. Must be one of the following types: float16, float32.
  459. */
  460. REG_OP(LayerNormXBackprop)
  461. .INPUT(dy, TensorType({DT_FLOAT, DT_FLOAT16}))
  462. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16}))
  463. .INPUT(variance, TensorType({DT_FLOAT, DT_FLOAT16}))
  464. .INPUT(mean, TensorType({DT_FLOAT, DT_FLOAT16}))
  465. .INPUT(gamma, TensorType({DT_FLOAT, DT_FLOAT16}))
  466. .OUTPUT(pd_x, TensorType({DT_FLOAT, DT_FLOAT16}))
  467. .OP_END_FACTORY_REG(LayerNormXBackprop)
  468. /**
  469. *@brief LayerNormBetaGammaBackprop operator interface implementation
  470. * calculating: dy, x, variance, mean
  471. * pd_xl = data_dy*data_gamma
  472. * pd_var = np.sum(((-0.5)*pd_xl*(data_x - data_mean)
  473. * np.power((data_variance + EPSLON), (-1.5))),
  474. * reduce_axis, keepdims=True)
  475. * pd_mean = np.sum(((-1.0)*pd_xl
  476. * np.power((data_variance + EPSLON), (-0.5))),
  477. * reduce_axis, keepdims=True)
  478. * + pd_var*(1.0/m)
  479. * np.sum(((-2.0)*(data_x - data_mean)), reduce_axis, keepdims=True)
  480. * pd_x = pd_xl*np.power((data_variance + EPSLON), (-0.5)) +
  481. * pd_var*(2.0/m)*(data_x - data_mean) + pd_mean*(1.0/m)
  482. * pd_gamma = np.sum((data_dy*(data_x - data_mean)
  483. * np.power((data_variance + EPSLON), (-0.5))), param_axis, keepdims=True)
  484. * pd_beta = np.sum(data_dy, param_axis, keepdims=True)
  485. *@par Inputs:
  486. *Three inputs, including:
  487. * @li dy: A Tensor. Must be one of the following types: float16, float32.
  488. * @li x: A Tensor. Must be one of the following types: float16, float32.
  489. * @li variance: A Tensor. Must be one of the following types: float16, float32.
  490. * @li mean: A Tensor. Must be one of the following types: float16, float32.
  491. *@par Outputs:
  492. *Three outputs, including:
  493. * @li pd_gamma: A Tensor. Must be one of the following types: float16, float32.
  494. * @li pd_beta: A Tensor. Must be one of the following types: float16, float32.
  495. */
  496. REG_OP(LayerNormBetaGammaBackprop)
  497. .INPUT(dy, TensorType({DT_FLOAT, DT_FLOAT16}))
  498. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16}))
  499. .INPUT(variance, TensorType({DT_FLOAT, DT_FLOAT16}))
  500. .INPUT(mean, TensorType({DT_FLOAT, DT_FLOAT16}))
  501. .OUTPUT(pd_gamma, TensorType({DT_FLOAT, DT_FLOAT16}))
  502. .OUTPUT(pd_beta, TensorType({DT_FLOAT, DT_FLOAT16}))
  503. .REQUIRED_ATTR(shape_gamma, ListInt)
  504. .OP_END_FACTORY_REG(LayerNormBetaGammaBackprop)
  505. /**
  506. *@brief Return "output" according to the algorithm of dropout_do_mask: \n
  507. * scale_x = x *(1 / keep_prob)
  508. * output = select(mask == 1, scale_x, 0)
  509. *@par Inputs:
  510. *Three inputs, including: \n
  511. * @li x: A mutable Tensor. Must be one of the following types:
  512. * float16, float32
  513. * @li mask: A mutable Tensor. Must met all of the following rules:
  514. * shape of mask should be 1D.
  515. * dtype of mask should be uint8.
  516. * value of shape should met the following algorithm:
  517. * value = (size(x) + 128 - 1) // 128 * 128 //8
  518. * @li keep_prob: A mutable Tensor. Must met all of the following rules:
  519. * shape of "keep_prob" should be (1,) or [1,].
  520. * Has the same type as "x".
  521. *@par Output:
  522. *y: A mutable Tensor. Has the same type as "x".
  523. */
  524. REG_OP(DropOutDoMask)
  525. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16}))
  526. .INPUT(mask, TensorType({DT_UINT8}))
  527. .INPUT(keep_prob, TensorType({DT_FLOAT, DT_FLOAT16}))
  528. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16}))
  529. .OP_END_FACTORY_REG(DropOutDoMask)
  530. /**
  531. *@brief Scales the input.
  532. *@par Inputs:
  533. * Three inputs, including:
  534. *@li x: An ND tensor of type float16 or float32.
  535. *@li scale: An ND tensor of type float16 or float32.
  536. *@li bias: An ND tensor of type float16 or float32.
  537. *@par Attributes:
  538. *@li axis: An optional int32 used to compute the shape of scale and bias input from the online bottoms. Defaults to "1".
  539. *@li num_axes: An optional int32 used to compute the shape of scale and bias input from a Caffe model trained offline. Defaults to "1".
  540. *@li scale_from_blob: An optional bool. If "true", scale and bias are input from a Caffe model trained offline. If "false", scale and bias are input from online bottoms. Defaults to "true".
  541. *@par Outputs:
  542. *y: An ND tensor of type float16 or float32.
  543. *@attention Constraints:\n
  544. * Assume that the shape length of "x" is "n" and that of "scale" is "m".
  545. *@li "axis" is within the range [-n, n-1]. num_axes >= -1.
  546. *@li If "scale_from_blob = true", "num_axes = -1", and "axis >= 0", the ith axis of "scale" and the (i+"axis")th axis of "x" must have the same size (0 <= i < n-axis).\n
  547. * If "axis < 0", the ith axis of "scale" and the (i+n+"axis")th axis of "x" must have the same size (0 <= i < -axis).
  548. *@li If "scale_from_blob = true" and "num_axes = 0", "scale" is a scalar with shape length 1 and dimension size 1.
  549. *@li If "scale_from_blob = true", "num_axes > 0, and "axis >= 0", "axis + num_axes" must be less than or equal to "n" and the ith axis of "scale" and the (i+"axis")th axis of "x" must have the same size (0 <= i < num_axes).\n
  550. * If "axis < 0", "n + axis + num_axes" must be less than or equal to "n" and the ith axis of "scale" and the (i+n+"axis")th axis of "x" must have the same size (0 <= i < num_axes).
  551. *@li If "scale_from_blob = false", "scale" is not a scalar, and "axis >= 0","axis + m" must be less than or equal to "n" and the ith axis of "scale" and the (i+"axis")th axis of "x" must have the same size (0 <= i < m).\n
  552. * If "axis < 0", "n + axis + m" must be less than or equal to "n" and the ith axis of "scale" and the (i+n+"axis")th axis of "x" must have the same size (0 <= i < m).
  553. *@li If "bias" is not None, the constraints for "bias" is the same as that for "scale".
  554. *@par Third-party framework compatibility
  555. * Compatible with the Caffe operator Scale.
  556. */
  557. REG_OP(Scale)
  558. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16})) /* "First operand." */
  559. .INPUT(scale, TensorType({DT_FLOAT, DT_FLOAT16})) /* "Second operand." */
  560. .OPTIONAL_INPUT(bias, TensorType({DT_FLOAT, DT_FLOAT16})) /* "Third operand." */
  561. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16})) /* "Result, has same element type as x" */
  562. .ATTR(axis, Int, 1)
  563. .ATTR(num_axes, Int, 1)
  564. .ATTR(scale_from_blob, Bool, true)
  565. .OP_END_FACTORY_REG(Scale)
  566. /**
  567. *@brief Local Response Normalization.
  568. *@par Inputs:
  569. *One input, including:
  570. *@li x: A Tensor. Must be 4-D shape, and only support the following types: float16, float32.
  571. *@par Attributes:
  572. *@li depth_radius: An optional int32, specifying the half-width of the normalization window. Defaults to "5".
  573. * under the caffe framework, if local_size is provided and is an odd number,
  574. * depth_radius = (local_size - 1) / 2. local_size is the number of channels to sum over (for ACROSS_CHANNELS)
  575. * or the side length of the square region to sum over (for WITHIN_CHANNEL).
  576. *@li bias: An optional float32. An offset, usually > 0 to avoid dividing by 0.
  577. * Defaults to "1".
  578. *@li alpha: An optional float32. A scaling factor, usually positive.
  579. * Defaults to "1".
  580. *@li beta: An optional float32. An exponent. Defaults to "0.75" for the caffe framework, Defaults to "0.5" for others.
  581. *@li norm_region: An optional string. A mode option. "ACROSS_CHANNELS":0, "WITHIN_CHANNEL":1. Defaults to "ACROSS_CHANNELS".
  582. *@par Outputs:
  583. *y: A Tensor. Has the same data type and shape as "x".
  584. *@par Third-party framework compatibility:
  585. * Compatible with the TensorFlow operator LRN.
  586. */
  587. REG_OP(LRN)
  588. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  589. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT}))
  590. .ATTR(depth_radius, Int, 5)
  591. .ATTR(bias, Float, 1.0)
  592. .ATTR(alpha, Float, 1.0)
  593. .ATTR(beta, Float, 0.5)
  594. .ATTR(norm_region, String, "ACROSS_CHANNELS")
  595. .OP_END_FACTORY_REG(LRN)
  596. /**
  597. * @brief Computes the gradient for Local Response Normalization.
  598. * @par Inputs:
  599. * @li grads: A 4D Tensor of type float16 or float32.
  600. * @li x: A 4D Tensor of type float16 or float32.
  601. * @li y: A 4D Tensor of type float16 or float32.
  602. * @par Attributes:
  603. * @li depth_radius: An optional int, specifying the half-width of the
  604. * normalization window. Defaults to "5".
  605. * @li bias: An optional float32. An offset, usually > 0 to avoid dividing by 0.
  606. * Defaults to "1".
  607. * @li alpha: An optional float32. A scaling factor, usually positive.
  608. * Defaults to "1".
  609. * @li beta: An optional float32. An exponent. Defaults to "0.5".
  610. * @par Outputs:
  611. * z: A Tensor. Has the same type and shape as "grads".
  612. * @attention Constraints:
  613. * "x" and "y" must have the same shape and type as "grads".
  614. * @par Third-party framework compatibility
  615. * Compatible with the TensorFlow operator LRNGrad.
  616. */
  617. REG_OP(LRNGrad)
  618. .INPUT(grads, TensorType({DT_FLOAT16,DT_FLOAT}))
  619. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  620. .INPUT(y, TensorType({DT_FLOAT16,DT_FLOAT}))
  621. .OUTPUT(z, TensorType({DT_FLOAT16,DT_FLOAT}))
  622. .ATTR(depth_radius, Int, 5)
  623. .ATTR(bias, Float, 1.0)
  624. .ATTR(alpha, Float, 1.0)
  625. .ATTR(beta, Float, 0.5)
  626. .OP_END_FACTORY_REG(LRNGrad)
  627. /**
  628. *@brief Calculates the RNNT Loss (log probability) for each batch entry. \n
  629. Also calculates the gradient.
  630. *@par Inputs:
  631. *@li acts: 4-D, shape: `(batch x seqLength x labelLength x outputDim)`, the logits.
  632. *@li labels: 2-D Tensor containing all the targets of the batch with zero padded.
  633. *@li input_lengths: Tensor of size (batch) containing size of each output sequence.
  634. *@li label_lengths: Tensor of (batch) containing label length of each example.
  635. *@par Outputs:
  636. *@li costs: 1-D Tensor, the cost of each example in the batch.
  637. *@li grads: A Tensor. Has the same type as acts.
  638. *@par Attributes:
  639. *@li blank_label: An optional attribute. Defaults to 0.
  640. *@par Third-party framework compatibility
  641. * Compatible with TensorFlow RNNTLoss operator.
  642. */
  643. REG_OP(RNNTLoss)
  644. .INPUT(acts, TensorType({DT_FLOAT}))
  645. .INPUT(labels, TensorType({DT_INT32}))
  646. .INPUT(input_lengths, TensorType({DT_INT32}))
  647. .INPUT(label_lengths, TensorType({DT_INT32}))
  648. .ATTR(blank_label, Int, 0)
  649. .OUTPUT(costs, TensorType({DT_FLOAT}))
  650. .OUTPUT(grads, TensorType({DT_FLOAT}))
  651. .OP_END_FACTORY_REG(RNNTLoss)
  652. /**
  653. *@brief Performs group normalization.
  654. *@par Inputs:\n
  655. * Five inputs, including: (NHWC, NCHW supported)
  656. *@li x: A 4D Tensor of type float16 or float32, with format NHWC or \n
  657. NCHW for 4D.
  658. *@li scale: A Tensor of type float32. Must be 1D if input "x" is with format \n
  659. NHWC or NCHW. Specifies the scaling factor.
  660. *@li offset: A Tensor of type float32. Must be 1D if input "x" is with \n
  661. format NHWC or NCHW. Specifies the offset.
  662. *@li mean: A Tensor of type float32. Must be 1D if input "x" is with format \n
  663. NHWC or NCHW. Reserved. Mu
  664. st be "None" if the operation is used for training.
  665. *@li variance: A Tensor of type float32. Must be 1D if input "x" is with \n
  666. format NHWC or NCHW. Specifies the variance used for inference. Reserved.
  667. *@par Attributes:
  668. *@li epsilon: An optional float32, specifying the small value added to \n
  669. variance to avoid dividing by zero. Defaults to "0.0001".
  670. *@li data_format: An optional string, specifying the format of "x". \n
  671. Defaults to "NHWC".
  672. *@li is_training: An optional bool, specifying if the operation is used for \n
  673. training or inference. Defaults to "True".
  674. *@par Outputs:\n
  675. * Five outputs, including: (NHWC, NCHW supported)
  676. *@li y: A 4D Tensor of type float16 or float32 for the normalized "x", \n
  677. with format NHWC or NCHW for 4D.
  678. *@li batch_mean: A Tensor of type float32. Must be 1D if input "x" is with \n
  679. format NHWC or NCHW. Specifies the mean of "x".
  680. *@li batch_variance: A Tensor of type float32. Must be 1D if input "x" is \n
  681. with format NHWC or NCHW. Specifies the variance of "x".
  682. *@li reserve_space_1: An optional Tensor of type float32. Must be 1D if \n
  683. input "x" is with format NHWC or NCHW. Specifies the mean o
  684. f "x" for gradient computation. Pass "None" to skip this output.
  685. *@li reserve_space_2: An optional Tensor of type float32. Must be 1D if \n
  686. input "x" is with format NHWC or NCHW. Specifies the varian
  687. ce of "x" for gradient computation. Pass "None" to skip this output.
  688. *@attention Constraints:
  689. *@li If the operation is used for inference and outputs "reserve_space_1" \n
  690. and "reserve_space_2" are available, then "reserve_space_1" has the same \n
  691. value as "mean" and "reserve_spa
  692. ce_2" has the same value as "variance".
  693. *@li For Ascend 310, the result accuracy fails due to the square root \n
  694. instruction.
  695. *@par Third-party framework compatibility
  696. *@li Compatible with the PyTorch operator GroupNorm.
  697. */
  698. REG_OP(GroupNorm)
  699. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  700. .INPUT(scale, TensorType({DT_FLOAT,}))
  701. .INPUT(offset, TensorType({DT_FLOAT,}))
  702. .OPTIONAL_INPUT(mean, TensorType({DT_FLOAT}))
  703. .OPTIONAL_INPUT(variance, TensorType({DT_FLOAT}))
  704. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  705. .OUTPUT(batch_mean, TensorType({DT_FLOAT}))
  706. .OUTPUT(batch_variance, TensorType({DT_FLOAT}))
  707. .OUTPUT(reserve_space_1, TensorType({DT_FLOAT}))
  708. .OUTPUT(reserve_space_2, TensorType({DT_FLOAT}))
  709. .ATTR(epsilon, Float, 0.0001)
  710. .ATTR(data_format, String, "NHWC")
  711. .ATTR(is_training, Bool, true)
  712. .ATTR(num_groups, Int, 2)
  713. .OP_END_FACTORY_REG(GroupNorm)
  714. } // namespace ge
  715. #endif //GE_OP_NN_NORM_OPS_H

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示