You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

reduce_ops.h 53 kB

5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
3 years ago
3 years ago
3 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file reduce_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_REDUCE_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_REDUCE_OPS_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. *@brief Performs reduced batch normalization . \n
  26. *@par Inputs:
  27. *x: A tensor of type float16 or float32. \n
  28. *@par Outputs:
  29. *@li sum: A 1D Tensor of type float32 for SUM reduced "x".
  30. *@li square_sum: A 1D Tensor of type float32 for SUMSQ reduced "x" . \n
  31. *@attention Constraints:
  32. * This operator is a BatchNorm fusion operator for updating the moving
  33. * averages for training.
  34. * This operator is used in conjunction with BNTrainingReduce.
  35. */
  36. REG_OP(BNTrainingReduce)
  37. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  38. .OUTPUT(sum, TensorType({DT_FLOAT}))
  39. .OUTPUT(square_sum, TensorType({DT_FLOAT}))
  40. .OP_END_FACTORY_REG(BNTrainingReduce)
  41. /**
  42. *@brief Performs reduced batch normalization . \n
  43. *@par Inputs:
  44. *x: A tensor of type float16 or float32. \n
  45. *@par Outputs:
  46. *@li sum: A tensor of type float32 for SUM reduced "x".
  47. *@li square_sum: A tensor of type float32 for SUMSQ reduced "x" . \n
  48. *@attention Constraints:
  49. * This operator is a BatchNorm fusion operator for updating the moving
  50. * averages for training.
  51. * This operator is used in conjunction with BN3DTrainingReduce.
  52. */
  53. REG_OP(BN3DTrainingReduce)
  54. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  55. .OUTPUT(sum, TensorType({DT_FLOAT}))
  56. .OUTPUT(square_sum, TensorType({DT_FLOAT}))
  57. .OP_END_FACTORY_REG(BN3DTrainingReduce)
  58. /**
  59. *@brief Performs the backpropagation of BatchNorm . \n
  60. *@par Inputs:
  61. * Seven inputs, including:
  62. *@li grads: A tensor of type float16 or float32, for
  63. * the gradient.
  64. *@li x: A tensor of type float16 or float32.
  65. *@li diff_scale: A tensor of type float32,
  66. * for the mean of "x".
  67. *@li diff_offset: A tensor of type float32,
  68. * for the variance of "x".
  69. *@li scale: A tensor of type float32.
  70. *@li batch_mean: A tensor of type float32,
  71. * for the mean of "x".
  72. *@li batch_variance: A tensor of type float32,
  73. * for the variance of "x" . \n
  74. *@par Attributes:
  75. *epsilon: An optional float32. Defaults to "0.0001". A small float number
  76. * added to the variance of "x" . \n
  77. *@par Outputs:
  78. *y: A Tensor of type float16 or float32, for the offset
  79. * of "x" . \n
  80. *@attention Constraints:
  81. * The preceding layer of this operator must be BNTrainingUpdateGrad . \n
  82. *@see BNTrainingUpdateGrad
  83. */
  84. REG_OP(BNTrainingReduceGrad)
  85. .INPUT(grads, TensorType({DT_FLOAT16,DT_FLOAT}))
  86. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  87. .INPUT(diff_scale, TensorType({DT_FLOAT}))
  88. .INPUT(diff_offset, TensorType({DT_FLOAT}))
  89. .INPUT(scale, TensorType({DT_FLOAT}))
  90. .INPUT(batch_mean, TensorType({DT_FLOAT}))
  91. .INPUT(batch_variance, TensorType({DT_FLOAT}))
  92. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT}))
  93. .ATTR(epsilon, Float, 0.0001)
  94. .OP_END_FACTORY_REG(BNTrainingReduceGrad)
  95. /**
  96. *@brief Performs the backpropagation of BatchNorm . \n
  97. *@par Inputs:
  98. * Seven inputs, including:
  99. *@li grads: A tensor of type float16 or float32, for
  100. * the gradient.
  101. *@li x: A tensor of type float16 or float32.
  102. *@li diff_scale: A tensor of type float32,
  103. * for the mean of "x".
  104. *@li diff_offset: A tensor of type float32,
  105. * for the variance of "x".
  106. *@li scale: A tensor of type float32.
  107. *@li batch_mean: A tensor of type float32,
  108. * for the mean of "x".
  109. *@li batch_variance: A tensor of type float32,
  110. * for the variance of "x" . \n
  111. *@par Attributes:
  112. *epsilon: An optional float32. Defaults to "0.0001". A small float number
  113. * added to the variance of "x" . \n
  114. *@par Outputs:
  115. *y: A Tensor of type float16 or float32, for the offset
  116. * of "x" . \n
  117. *@attention Constraints:
  118. * The preceding layer of this operator must be BN3DTrainingReduceGrad . \n
  119. *@see BN3DTrainingReduceGrad
  120. */
  121. REG_OP(BN3DTrainingReduceGrad)
  122. .INPUT(grads, TensorType({DT_FLOAT16,DT_FLOAT}))
  123. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  124. .INPUT(diff_scale, TensorType({DT_FLOAT}))
  125. .INPUT(diff_offset, TensorType({DT_FLOAT}))
  126. .INPUT(scale, TensorType({DT_FLOAT}))
  127. .INPUT(batch_mean, TensorType({DT_FLOAT}))
  128. .INPUT(batch_variance, TensorType({DT_FLOAT}))
  129. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT}))
  130. .ATTR(epsilon, Float, 0.0001)
  131. .OP_END_FACTORY_REG(BN3DTrainingReduceGrad)
  132. /**
  133. *@brief Performs reduced batch normalization . \n
  134. *@par Inputs:
  135. * Seven inputs, including:
  136. *@li x: A tensor of type float16 or float32.
  137. *@li sum: A 1D Tensor of type float32 for the output of operator
  138. * BNTrainingReduce.
  139. *@li square_sum: A 1D Tensor of type float32 for the output of operator
  140. * BNTrainingReduce.
  141. *@li scale: A 1D Tensor of type float32, for the scaling factor.
  142. *@li offset: A 1D Tensor of type float32, for the scaling offset.
  143. *@li mean: A 1D Tensor of type float32, for the updated mean.
  144. *@li variance: A 1D Tensor of type float32, for the updated variance . \n
  145. *@par Attributes:
  146. *@li epsilon: A required float32, specifying the small value added to variance
  147. * to avoid dividing by zero.
  148. *@li factor: A required float32, specifying the weight for updating the mean
  149. * and variance . \n
  150. *@par Outputs:
  151. * Five outputs, including:
  152. *@li y: A tensor of type float16 or float32, for normalized "x".
  153. *@li mean: A tensor of type float32, for the updated mean.
  154. *@li variance: A tensor of type float32, for the updated variance.
  155. *@li batch_mean: A 1D Tensor of type float32, for the mean of "x".
  156. *@li batch_variance: A 1D Tensor of type float32, for the variance of "x" . \n
  157. *@attention Constraints:
  158. *@li This operator is a BatchNorm fusion operator for updating the moving
  159. averages for training.
  160. *This operator is used in conjunction with BNTrainingUpdate.
  161. *@li For Ascend 310, the result accuracy fails to reach 1/1000 due to the square
  162. * root instruction.
  163. */
  164. REG_OP(BNTrainingUpdate)
  165. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  166. .INPUT(sum, TensorType({DT_FLOAT}))
  167. .INPUT(square_sum, TensorType({DT_FLOAT}))
  168. .INPUT(scale, TensorType({DT_FLOAT}))
  169. .INPUT(offset, TensorType({DT_FLOAT}))
  170. .INPUT(mean, TensorType({DT_FLOAT}))
  171. .INPUT(variance, TensorType({DT_FLOAT}))
  172. .REQUIRED_ATTR(factor, Float)
  173. .REQUIRED_ATTR(epsilon, Float)
  174. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT}))
  175. .OUTPUT(mean, TensorType({DT_FLOAT}))
  176. .OUTPUT(variance, TensorType({DT_FLOAT}))
  177. .OUTPUT(batch_mean, TensorType({DT_FLOAT}))
  178. .OUTPUT(batch_variance, TensorType({DT_FLOAT}))
  179. .OP_END_FACTORY_REG(BNTrainingUpdate)
  180. /**
  181. *@brief Performs reduced batch normalization . \n
  182. *@par Inputs:
  183. * Seven inputs, including:
  184. *@li x: A tensor of type float16 or float32.
  185. *@li sum: A tensor of type float32 for the output of operator
  186. * BN3DTrainingUpdate.
  187. *@li square_sum: A tensor of type float32 for the output of operator
  188. * BN3DTrainingUpdate.
  189. *@li scale: A tensor of type float32, for the scaling factor.
  190. *@li offset: A tensor of type float32, for the scaling offset.
  191. *@li mean: A tensor of type float32, for the updated mean.
  192. *@li variance: A tensor of type float32, for the updated variance . \n
  193. *@par Attributes:
  194. *@li epsilon: A required float32, specifying the small value added to variance
  195. * to avoid dividing by zero.
  196. *@li factor: A required float32, specifying the weight for updating the mean
  197. * and variance . \n
  198. *@par Outputs:
  199. * Five outputs, including:
  200. *@li y: A tensor of type float16 or float32, for normalized "x".
  201. *@li mean: A tensor of type float32, for the updated mean.
  202. *@li variance: A tensor of type float32, for the updated variance.
  203. *@li batch_mean: A tensor of type float32, for the mean of "x".
  204. *@li batch_variance: A tensor of type float32, for the variance of "x" . \n
  205. *@attention Constraints:
  206. *@li This operator is a BatchNorm fusion operator for updating the moving
  207. averages for training.
  208. *This operator is used in conjunction with BN3DTrainingUpdate.
  209. *@li For Ascend 310, the result accuracy fails to reach 1/1000 due to the square
  210. * root instruction.
  211. */
  212. REG_OP(BN3DTrainingUpdate)
  213. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  214. .INPUT(sum, TensorType({DT_FLOAT}))
  215. .INPUT(square_sum, TensorType({DT_FLOAT}))
  216. .INPUT(scale, TensorType({DT_FLOAT}))
  217. .INPUT(offset, TensorType({DT_FLOAT}))
  218. .INPUT(mean, TensorType({DT_FLOAT}))
  219. .INPUT(variance, TensorType({DT_FLOAT}))
  220. .REQUIRED_ATTR(factor, Float)
  221. .REQUIRED_ATTR(epsilon, Float)
  222. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT}))
  223. .OUTPUT(mean, TensorType({DT_FLOAT}))
  224. .OUTPUT(variance, TensorType({DT_FLOAT}))
  225. .OUTPUT(batch_mean, TensorType({DT_FLOAT}))
  226. .OUTPUT(batch_variance, TensorType({DT_FLOAT}))
  227. .OP_END_FACTORY_REG(BN3DTrainingUpdate)
  228. /**
  229. *@brief Performs batch normalization for inference . \n
  230. *@par Inputs:
  231. * Five inputs, including:
  232. *@li x: A tensor of type float16 or float32.
  233. *@li scale: A tensor of type float32, for the scaling factor.
  234. *@li offset: A tensor of type float32, for the scaling offset.
  235. *@li mean: A tensor of type float32, for the mean.
  236. *@li variance: A tensor of type float32, for the variance . \n
  237. *@par Attributes:
  238. *epsilon: An optional float32, specifying the small value added to variance to
  239. * avoid dividing by zero. Defaults to "0.0001" . \n
  240. *@par Outputs:
  241. *y: A tensor of type float16 or float32 for the normalized "x" . \n
  242. *@attention Constraints:
  243. *For Ascend 310, the result accuracy fails to reach 1/1000 due to the square root
  244. * instruction.
  245. */
  246. REG_OP(BNInfer)
  247. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  248. .INPUT(scale, TensorType({DT_FLOAT}))
  249. .INPUT(offset, TensorType({DT_FLOAT}))
  250. .INPUT(mean, TensorType({DT_FLOAT}))
  251. .INPUT(variance, TensorType({DT_FLOAT}))
  252. .REQUIRED_ATTR(epsilon, Float)
  253. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT}))
  254. .OP_END_FACTORY_REG(BNInfer)
  255. /**
  256. *@brief Performs reduced batch normalization. For some scene which don't contain
  257. assignmoving average . \n
  258. *@par Inputs:
  259. *Five inputs, including:
  260. *@li x: A tensor of type float16 or float32.
  261. *@li sum: A tensor of type float32 for the output of operator BNTrainingReduce.
  262. *@li square_sum: A tensor of type float32 for the output of operator BNTrainingReduce.
  263. *@li scale: A tensor of type float32, for the scaling factor.
  264. *@li offset: A tensor of type float32, for the scaling offset . \n
  265. *@par Attributes:
  266. *epsilon: A required float32, specifying the small value added to variance to avoid dividing by zero . \n
  267. *@par Outputs:
  268. *Three outputs, including:
  269. *@li y: A tensor of type float16 or float32, for normalized "x".
  270. *@li batch_mean: A tensor of type float32, for the mean of "x".
  271. *@li batch_variance: A tensor of type float32, for the variance of "x" . \n
  272. *@attention Constraints:
  273. *This operator is used in conjunction with BNTrainingReduce.
  274. For Ascend 310, the result accuracy fails to reach 1/1000 due to the square root instruction.
  275. */
  276. REG_OP(BNTrainingUpdateV2)
  277. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  278. .INPUT(sum, TensorType({DT_FLOAT}))
  279. .INPUT(square_sum, TensorType({DT_FLOAT}))
  280. .INPUT(scale, TensorType({DT_FLOAT}))
  281. .INPUT(offset, TensorType({DT_FLOAT}))
  282. .REQUIRED_ATTR(epsilon, Float)
  283. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT}))
  284. .OUTPUT(batch_mean, TensorType({DT_FLOAT}))
  285. .OUTPUT(batch_variance, TensorType({DT_FLOAT}))
  286. .OP_END_FACTORY_REG(BNTrainingUpdateV2)
  287. /**
  288. *@brief Performs reduced batch normalization v3. For some scene which don't contain
  289. assign moving average . \n
  290. *@par Inputs:
  291. * Five inputs, including:
  292. *@li x: A tensor of type float16 or float32.
  293. *@li sum: A tensor of type float32 for the output of operator BNTrainingReduce.
  294. *@li square_sum: A tensor of type float32 for the output of operator BNTrainingReduce.
  295. *@li scale: A tensor of type float32, for the scaling factor.
  296. *@li offset: A tensor of type float32, for the scaling offset . \n
  297. *@par Attributes:
  298. *epsilon: A required float32, specifying the small value added to variance to avoid dividing by zero . \n
  299. *@par Outputs:
  300. *@li y: A tensor of type float16 or float32, for normalized "x".
  301. *@li batch_mean: A tensor of type float32, for the mean of "x".
  302. *@li batch_variance: A tensor of type float32, for the variance of "x".
  303. *@li reserve_1: A tensor of type float32, for the mean of batch "x". Has the same type as batch_mean.
  304. *@li reserve_2: A tensor of type float32, for the variance of batch "x". Has the same type as batch_mean . \n
  305. *@attention Constraints:
  306. *@li This operator is used in conjunction with BNTrainingReduce.
  307. *@li For Ascend 310, the result accuracy fails to reach 1/1000 due to the square root instruction.
  308. */
  309. REG_OP(BNTrainingUpdateV3)
  310. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  311. .INPUT(sum, TensorType({DT_FLOAT}))
  312. .INPUT(square_sum, TensorType({DT_FLOAT}))
  313. .INPUT(scale, TensorType({DT_FLOAT}))
  314. .INPUT(offset, TensorType({DT_FLOAT}))
  315. .REQUIRED_ATTR(epsilon, Float)
  316. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT}))
  317. .OUTPUT(batch_mean, TensorType({DT_FLOAT}))
  318. .OUTPUT(batch_variance, TensorType({DT_FLOAT}))
  319. .OUTPUT(reserve_1, TensorType({DT_FLOAT}))
  320. .OUTPUT(reserve_2, TensorType({DT_FLOAT}))
  321. .OP_END_FACTORY_REG(BNTrainingUpdateV3)
  322. /**
  323. *@brief Performs the backpropagation of BatchNorm . \n
  324. *@par Inputs:
  325. * Four inputs, including:
  326. *@li grads: A tensor of type float16 or float32,
  327. * for the gradient.
  328. *@li x: A tensor of type float16 or float32.
  329. *@li batch_mean: A tensor of type float32,
  330. * for the mean of "x".
  331. *@li batch_variance: A tensor of type float32,
  332. * for the variance of "x" . \n
  333. *@par Attributes:
  334. *epsilon: An optional float32. Defaults to "0.0001". A small float number
  335. * added to the variance of "x" . \n
  336. *@par Outputs:
  337. *@li diff_scale: A Tensor of type float32,
  338. * for the offset of "scale".
  339. *@li diff_offset: A Tensor of type float32,
  340. * for the offset of "offset" . \n
  341. */
  342. REG_OP(BNTrainingUpdateGrad)
  343. .INPUT(grads, TensorType({DT_FLOAT16,DT_FLOAT}))
  344. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  345. .INPUT(batch_mean, TensorType({DT_FLOAT}))
  346. .INPUT(batch_variance, TensorType({DT_FLOAT}))
  347. .ATTR(epsilon, Float, 0.0001)
  348. .OUTPUT(diff_scale, TensorType({DT_FLOAT}))
  349. .OUTPUT(diff_offset, TensorType({DT_FLOAT}))
  350. .OP_END_FACTORY_REG(BNTrainingUpdateGrad)
  351. /**
  352. *@brief Performs the backpropagation of BatchNorm . \n
  353. *@par Inputs:
  354. * Four inputs, including:
  355. *@li grads: A tensor of type float16 or float32,
  356. * for the gradient.
  357. *@li x: A tensor of type float16 or float32.
  358. *@li batch_mean: A tensor of type float32,
  359. * for the mean of "x".
  360. *@li batch_variance: A tensor of type float32,
  361. * for the variance of "x" . \n
  362. *@par Attributes:
  363. *epsilon: An optional float32. Defaults to "0.0001". A small float number
  364. * added to the variance of "x" . \n
  365. *@par Outputs:
  366. *@li diff_scale: A Tensor of type float32,
  367. * for the offset of "scale".
  368. *@li diff_offset: A Tensor of type float32,
  369. * for the offset of "offset" . \n
  370. */
  371. REG_OP(BN3DTrainingUpdateGrad)
  372. .INPUT(grads, TensorType({DT_FLOAT16,DT_FLOAT}))
  373. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  374. .INPUT(batch_mean, TensorType({DT_FLOAT}))
  375. .INPUT(batch_variance, TensorType({DT_FLOAT}))
  376. .ATTR(epsilon, Float, 0.0001)
  377. .OUTPUT(diff_scale, TensorType({DT_FLOAT}))
  378. .OUTPUT(diff_offset, TensorType({DT_FLOAT}))
  379. .OP_END_FACTORY_REG(BN3DTrainingUpdateGrad)
  380. /**
  381. *@brief Performs the backpropagation of BatchNorm for inference . \n
  382. *@par Inputs:
  383. * Three inputs, including:
  384. *@li grads: A tensor of type loat16 or float32, for the gradient.
  385. *@li scale: A tensor of type float32.
  386. *@li batch_variance: A tensor of type float32. It is an output of BatchNorm . \n
  387. *@par Attributes:
  388. *epsilon: An optional float32. Defaults to "0.0001". A small float number added to the variance of "x" . \n
  389. *@par Outputs:
  390. *x_backprop: A Tensor of type float16 or float32, for the offset of "x" . \n
  391. *@attention Constraints:
  392. * The preceding layer of this operator must be operator BatchNorm.
  393. */
  394. REG_OP(BNInferGrad)
  395. .INPUT(grads, TensorType({DT_FLOAT16,DT_FLOAT}))
  396. .INPUT(scale, TensorType({DT_FLOAT}))
  397. .INPUT(batch_variance, TensorType({DT_FLOAT}))
  398. .OUTPUT(x_backprop, TensorType({DT_FLOAT16,DT_FLOAT}))
  399. .ATTR(epsilon, Float, 0.0001)
  400. .OP_END_FACTORY_REG(BNInferGrad)
  401. /**
  402. *@brief Computes the sum of elements across dimensions of a tensor . \n
  403. *@par Inputs:
  404. * Two inputs, including:
  405. *@li x: A Tensor. Must be one of the following types:
  406. * float32, float64, int32, uint8, int16, int8,
  407. * complex64, int64, qint8, quint8, qint32, uint16,
  408. * complex128, float16, uint32, uint64, complex64, complex128.
  409. *@li axes: A 1D list or tuple of int32 or int64. Specifies the dimensions to reduce . \n
  410. *@par Attributes:
  411. *keep_dims: An optional bool. If "true", retains reduced dimensions with length 1. Defaults to "false" . \n
  412. *@par Outputs:
  413. *y: The reduced tensor. Has the same type and format as input "x" . \n
  414. *@par Third-party framework compatibility
  415. * Compatible with the TensorFlow operator Sum.
  416. */
  417. REG_OP(ReduceSum)
  418. .INPUT(x, TensorType::NumberType())
  419. .INPUT(axes, TensorType::IndexNumberType())
  420. .OUTPUT(y, TensorType::NumberType())
  421. .ATTR(keep_dims, Bool, false)
  422. .OP_END_FACTORY_REG(ReduceSum)
  423. /**
  424. *@brief Computes the sum of elements across dimensions of a tensor . \n
  425. *@par Inputs:
  426. * One input:
  427. *x: A Tensor. Up to 8D. Must be one of the following types: float16, float32. \n
  428. *@par Attributes:
  429. *@li axes: A required 1D list or tuple of int32 or int64. Specifies the dimensions to reduce.
  430. *@li keep_dims: An optional bool. If "true", retains reduced dimensions with length 1. Defaults to "false" . \n
  431. *@par Outputs:
  432. *y: The reduced tensor. Has the same type and format as input "x" . \n
  433. *@par Third-party framework compatibility
  434. * Compatible with the TensorFlow operator Sum.
  435. *
  436. * @par Restrictions:
  437. * Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceSum instead.
  438. */
  439. REG_OP(ReduceSumD)
  440. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  441. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  442. .REQUIRED_ATTR(axes, ListInt)
  443. .ATTR(keep_dims, Bool, false)
  444. .OP_END_FACTORY_REG(ReduceSumD)
  445. /**
  446. *@brief Calculate the total mean based on the mean of each device . \n
  447. *@par Inputs:
  448. * Three inputs, including:
  449. *@li x: A Tensor. Must be one of the following types: float16, float32 .
  450. *@li count: A Tensor. Must be one of the following types: float16, float32 .
  451. *@li count_sum: A Tensor. Must be one of the following types: float16, float32 . \n
  452. *@par Attributes:
  453. *@li axes: A required 1D list or tuple of int32 or int64. Specifies the dimensions to reduce.
  454. *@li keepdims: An optional bool. If "true", retains reduced dimensions with length 1. Defaults to "false" . \n
  455. *@par Outputs:
  456. *y: The reduced tensor. Has the same type and format as input "x" . \n
  457. *@par Third-party framework compatibility
  458. * Compatible with the TensorFlow operator Sum.
  459. */
  460. REG_OP(ReduceMeanWithCount)
  461. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16}))
  462. .INPUT(count, TensorType({DT_FLOAT, DT_FLOAT16}))
  463. .INPUT(count_sum, TensorType({DT_FLOAT, DT_FLOAT16}))
  464. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16}))
  465. .REQUIRED_ATTR(axes, ListInt)
  466. .ATTR(keep_dims, Bool, false)
  467. .OP_END_FACTORY_REG(ReduceMeanWithCount)
  468. /**
  469. *@brief Calculates the "logical sum" of elements of a tensor in a dimension . \n
  470. *@par Inputs:
  471. *One input:
  472. *x: The boolean tensor to reduce . \n
  473. *@par Attributes:
  474. *@li keep_dims: A bool. If true, retains reduced dimensions with length 1.
  475. *@li axis: The dimensions to reduce. If None, reduces all dimensions.
  476. *Must be in the range [- rank (input_sensor), rank (input_sensor)) . \n
  477. *@par Outputs:
  478. *y: The reduced tensor . \n
  479. *@par Third-party framework compatibility
  480. * Compatible with the TensorFlow operator ReduceAll.
  481. *
  482. * @par Restrictions:
  483. * Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceAll instead.
  484. */
  485. REG_OP(ReduceAllD)
  486. .INPUT(x, TensorType({DT_BOOL}))
  487. .OUTPUT(y, TensorType({DT_BOOL}))
  488. .REQUIRED_ATTR(axes, ListInt)
  489. .ATTR(keep_dims, Bool, false)
  490. .OP_END_FACTORY_REG(ReduceAllD)
  491. /**
  492. *@brief Calculates the "logical sum" of elements of a tensor in a dimension . \n
  493. *@par Inputs:
  494. *Two inputs, including:
  495. *@li x: The boolean tensor to reduce.
  496. *@li axis: A mutable Tensor. The dimensions to reduce. If None, reduces all dimensions. Must be in the range [- rank (input_sensor), rank (input_sensor)) . \n
  497. *@par Attributes:
  498. *keep_dims: A bool. If true, retains reduced dimensions with length 1 . \n
  499. *@par Outputs:
  500. *y: The reduced tensor . \n
  501. *@par Third-party framework compatibility
  502. * Compatible with the TensorFlow operator ReduceAll.
  503. */
  504. REG_OP(ReduceAll)
  505. .INPUT(x, TensorType({DT_BOOL}))
  506. .INPUT(axes, TensorType::IndexNumberType())
  507. .OUTPUT(y, TensorType({DT_BOOL}))
  508. .ATTR(keep_dims, Bool, false)
  509. .OP_END_FACTORY_REG(ReduceAll)
  510. /**
  511. *@brief Reduce a tensor on a certain axis based on product. . \n
  512. *@par Inputs:
  513. *Two inputs, including:
  514. *@li x: A mutable Tensor. Must be the type of NumberType.
  515. *@li axis: A mutable Tensor. The dimensions to reduce . \n
  516. *@par Attributes:
  517. *keep_dims: A bool. If true, retains reduced dimensions with length 1. Defaults to "False" . \n
  518. *@par Outputs:
  519. *y: A Tensor. Has the same type and format as input "x" . \n
  520. *@par Third-party framework compatibility
  521. * Compatible with the TensorFlow operator ReduceProd.
  522. */
  523. REG_OP(ReduceProd)
  524. .INPUT(x,TensorType::NumberType())
  525. .INPUT(axes, TensorType::IndexNumberType())
  526. .OUTPUT(y,TensorType::NumberType())
  527. .ATTR(keep_dims, Bool, false)
  528. .OP_END_FACTORY_REG(ReduceProd)
  529. /**
  530. *@brief Computes the product of elements across dimensions of a tensor . \n
  531. *@par Inputs:
  532. * One input:
  533. *x: A Tensor. Must be one of the following types: float16, float, int8, uint8 . \n
  534. *@par Attributes:
  535. *@li axes: A required int8, int16, int32, or int64. Specifies the dimensions to reduce. No default value.
  536. *@li keep_dims: An optional bool. If "True", retains reduced dimensions with length 1. Defaults to "False" . \n
  537. *@par Outputs:
  538. *y: A Tensor. Has the same type and format as input "x" . \n
  539. *@attention Constraints:
  540. * "keep_dims" is in the range [-rank(input_tensor), rank(input_tensor)] . \n
  541. *@par Third-party framework compatibility
  542. * Compatible with the TensorFlow operator ReduceProd.
  543. *
  544. * @par Restrictions:
  545. * Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceProd instead.
  546. */
  547. REG_OP(ReduceProdD)
  548. .INPUT(x,TensorType({DT_FLOAT, DT_UINT8, DT_INT8, DT_INT32, DT_FLOAT16}))
  549. .OUTPUT(y,TensorType({DT_FLOAT, DT_UINT8, DT_INT8, DT_INT32, DT_FLOAT16}))
  550. .REQUIRED_ATTR(axes, ListInt)
  551. .ATTR(keep_dims, Bool, false)
  552. .OP_END_FACTORY_REG(ReduceProdD)
  553. /**
  554. *@brief Reduces "x" along the dimensions according to "axis" . \n
  555. *@par Inputs:
  556. *Two inputs, including:
  557. * @li x: A Tensor. Must be one of the following types: float16, float32, int8, uint8.
  558. * @li axes: The dimensions to reduce. Must be one of the following types: int, list, tuple, NoneType.
  559. * - If None (the default), reduces all dimensions.
  560. * - Must be in the range [-rank(x), rank(x)) . \n
  561. *@par Attributes:
  562. *keep_dims: A bool or NoneType.
  563. * - If true, retains reduced dimensions with length 1.
  564. * - If false, the rank of the tensor is reduced by 1 for each entry in axis.
  565. *@par Outputs:
  566. *y: A Tensor. Has the same type as "x" . \n
  567. *@par Third-party framework compatibility:
  568. * Compatible with the TensorFlow operator ReduceMean.
  569. */
  570. REG_OP(ReduceMean)
  571. .INPUT(x, TensorType::NumberType())
  572. .INPUT(axes, TensorType::IndexNumberType())
  573. .OUTPUT(y, TensorType::NumberType())
  574. .ATTR(keep_dims, Bool, false)
  575. .OP_END_FACTORY_REG(ReduceMean)
  576. /**
  577. *@brief Reduces "x" along the dimensions according to "axis" . \n
  578. *@par Inputs:
  579. *One input:
  580. * @li x: A Tensor. Must be one of the following types: float16, float32 . \n
  581. *@par Attributes:
  582. *@li axes: The dimensions to reduce. Must be one of the following types: int, list, tuple, NoneType.
  583. * If None (the default), reduces all dimensions.
  584. * Must be in the range [-rank(x), rank(x)).
  585. *@li keep_dims: A bool or NoneType.
  586. * - If true, retains reduced dimensions with length 1.
  587. * - If false, the rank of the tensor is reduced by 1 for each entry in axis.
  588. *@li noop_with_empty_axes: A bool default False.
  589. * - If true, same as tf.
  590. * - If false, when x's shape is [], reduce all dims, for onnx.
  591. *@par Outputs:
  592. *y: A Tensor. Has the same type as "x" . \n
  593. *@par Third-party framework compatibility:
  594. * Compatible with the TensorFlow operator ReduceMean.
  595. *
  596. * @par Restrictions:
  597. * Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceMean instead.
  598. */
  599. REG_OP(ReduceMeanD)
  600. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  601. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  602. .REQUIRED_ATTR(axes, ListInt)
  603. .ATTR(keep_dims, Bool, false)
  604. .ATTR(noop_with_empty_axes, Bool, false)
  605. .OP_END_FACTORY_REG(ReduceMeanD)
  606. /**
  607. *@brief Returns the maximum of elements across dimensions of a Tensor . \n
  608. *@par Inputs:
  609. * Two inputs, including:
  610. *@li x: A multi-dimensional Tensor of type float16, float32, or int16.
  611. *@li axes: A Scalar of type int32, specifying the axes information of the index with the maximum value . \n
  612. *@par Attributes:
  613. *keep_dims: A bool, specifying whether to keep dimensions for the output Tensor. Defaults to "false" . \n
  614. *@par Outputs:
  615. *y: A multi-dimensional Tensor, specifying the maximum value of the corresponding axis in the tensor. Has the same type as "x". (If "keep_dims" is set to "false", the output dimensions are reduced by "dimension" compared with that of "x". Otherwise, the output has one fewer dimension than "x".)
  616. *@attention Constraints:
  617. * The value range of "axes" is [-dims, dims - 1]. "dims" indicates the dimension length of "x" . \n
  618. *@par Third-party framework compatibility
  619. * Compatible with TensorFlow operator Max.
  620. */
  621. REG_OP(ReduceMax)
  622. .INPUT(x, TensorType::NumberType())
  623. .INPUT(axes, TensorType::IndexNumberType())
  624. .OUTPUT(y, TensorType::NumberType())
  625. .ATTR(keep_dims, Bool, false)
  626. .OP_END_FACTORY_REG(ReduceMax)
  627. /**
  628. *@brief Returns the maximum of elements across dimensions of a Tensor . \n
  629. *@par Inputs:
  630. *x: A multi-dimensional Tensor of type float16, float32, or int16 . \n
  631. *@par Attributes:
  632. * Two attributes, including:
  633. *@li axes: A required listint, specifying the axes information of the index with the maximum value.
  634. *@li keep_dims: A bool, specifying whether to keep dimensions for the output Tensor. Defaults to "false" . \n
  635. *@par Outputs:
  636. *y: A multi-dimensional Tensor, specifying the maximum value of the corresponding axis in the tensor. Has the same type as "x". (If "keep_dims" is set to "false", the output dimensions are reduced by "dimension" compared with that of "x". Otherwise, the output has one fewer dimension than "x".)
  637. *@attention Constraints:
  638. * The value range of "axis" is [-dims, dims - 1]. "dims" indicates the dimension length of "x" . \n
  639. *@par Third-party framework compatibility
  640. * Compatible with TensorFlow operator Max.
  641. *
  642. * @par Restrictions:
  643. * Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceMax instead.
  644. */
  645. REG_OP(ReduceMaxD)
  646. .INPUT(x, TensorType({DT_FLOAT, DT_UINT8, DT_INT8,
  647. DT_FLOAT16, DT_INT32}))
  648. .OUTPUT(y, TensorType({DT_FLOAT, DT_UINT8, DT_INT8,
  649. DT_FLOAT16, DT_INT32}))
  650. .REQUIRED_ATTR(axes, ListInt)
  651. .ATTR(keep_dims, Bool, false)
  652. .OP_END_FACTORY_REG(ReduceMaxD)
  653. /**
  654. *@brief Computes the minimum of elements across dimensions of a tensor . \n
  655. *@par Inputs:
  656. *@li input_tensor: A Tensor. Must be one of the following types: float16, float32, int8, uint8.
  657. *@li axes: A Tensor of type int8 or int32. Specifies the dimensions to reduce. Defaults to "None".
  658. *@par Attributes:
  659. *keep_dims: An optional bool. If "True", reduced dimensions will be retained. Defaults to "False".
  660. *@par Outputs:
  661. *output_tensor: A Tensor. Must be one of the following types: float16, float32, int8, uint8 . \n
  662. *@attention Constraints:
  663. * If "axes = None", all dimensions will be reduced. "axes" must be in the range [-rank(input_shape), rank(input_shape)) . \n
  664. *@par Third-party framework compatibility
  665. * Compatible with the TensorFlow operator reduce_min.
  666. */
  667. REG_OP(ReduceMin)
  668. .INPUT(x, TensorType::NumberType())
  669. .INPUT(axes, TensorType::IndexNumberType())
  670. .OUTPUT(y, TensorType::NumberType())
  671. .ATTR(keep_dims, Bool, false)
  672. .OP_END_FACTORY_REG(ReduceMin)
  673. /**
  674. *@brief Computes the minimum of elements across dimensions of a tensor . \n
  675. *@par Inputs:
  676. *input_min: A Tensor. Must be one of the following types: float16, float32, int8, uint8 . \n
  677. *@par Attributes:
  678. *@li axes: An optional int32, list, tuple, or NoneType value. Specifies the dimensions to reduce. Defaults to "None".
  679. *@li keep_dims: An optional bool or NoneType value. If "True", reduced dimensions will be retained. Defaults to "None" (equivalent to "False").
  680. *@par Outputs:
  681. *output_min: A Tensor. Must be one of the following types: float16, float32, int8, uint8 . \n
  682. *@attention Constraints:
  683. * If "axes = None", all dimensions will be reduced. "axes" must be in the range [-rank(input_shape), rank(input_shape)) . \n
  684. *@par Third-party framework compatibility
  685. * Compatible with the TensorFlow operator reduce_min.
  686. *
  687. * @par Restrictions:
  688. * Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceMin instead.
  689. */
  690. REG_OP(ReduceMinD)
  691. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8,DT_INT32}))
  692. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT8,DT_UINT8,DT_INT32}))
  693. .REQUIRED_ATTR(axes, ListInt)
  694. .ATTR(keep_dims, Bool, false)
  695. .OP_END_FACTORY_REG(ReduceMinD)
  696. /**
  697. *@brief Computes the "logical or" of elements across dimensions of a tensor.
  698. * Reduces "x" along the dimensions given in "axes".
  699. * Unless "keep_dims" is true, the rank of the tensor is reduced by 1 for each
  700. * entry in "axes". If "keep_dims" is true, the reduced dimensions
  701. * are retained with length 1.
  702. *
  703. * If "axes" is None, all dimensions are reduced, and a
  704. * tensor with a single element is returned.
  705. *
  706. *@attention Constraints:
  707. * Only support bool
  708. *
  709. *@par Inputs:
  710. *@li x : The boolean tensor to reduce.
  711. *@li axes: The dimensions to reduce. If "None" (default), reduces all
  712. * dimensions. Must be in the range "[-rank(x), rank(x))".
  713. *
  714. *@par Attributes:
  715. * keep_dims: If true, retains reduced dimensions with length 1.
  716. *
  717. *@par Outputs:
  718. * y: The reduced tensor
  719. *
  720. *@par Third-party framework compatibility
  721. *Compatible with the TensorFlow operator reduce_any.
  722. *
  723. */
  724. REG_OP(ReduceAny)
  725. .INPUT(x, TensorType({DT_BOOL}))
  726. .INPUT(axes, TensorType::IndexNumberType())
  727. .OUTPUT(y, TensorType({DT_BOOL}))
  728. .ATTR(keep_dims, Bool, false)
  729. .OP_END_FACTORY_REG(ReduceAny)
  730. /**
  731. *@brief Computes the "logical or" of elements across dimensions of a tensor.
  732. * Reduces "x" along the dimensions given in "axes".
  733. * Unless "keep_dims" is true, the rank of the tensor is reduced by 1 for each
  734. * entry in "axes". If "keep_dims" is true, the reduced dimensions
  735. * are retained with length 1.
  736. *
  737. * If "axis" is None, all dimensions are reduced, and a
  738. * tensor with a single element is returned.
  739. *
  740. *@attention Constraints:
  741. * Only support bool
  742. *
  743. *@par Inputs:
  744. * x: The boolean tensor to reduce.
  745. *
  746. *@par Attributes:
  747. *@li axes: The dimensions to reduce. Must be in the range "[-rank(x), rank(x))".
  748. *@li keep_dims: If true, retains reduced dimensions with length 1.
  749. *
  750. *@par Outputs:
  751. * y: The reduced tensor
  752. *
  753. *@par Third-party framework compatibility
  754. *Compatible with the TensorFlow operator reduce_any.
  755. *
  756. * @par Restrictions:
  757. * Warning: THIS FUNCTION IS DEPRECATED. Please use ReduceAny instead.
  758. */
  759. REG_OP(ReduceAnyD)
  760. .INPUT(x, TensorType({DT_BOOL}))
  761. .OUTPUT(y, TensorType({DT_BOOL}))
  762. .REQUIRED_ATTR(axes, ListInt)
  763. .ATTR(keep_dims, Bool, false)
  764. .OP_END_FACTORY_REG(ReduceAnyD)
  765. /**
  766. *@brief Compute reduction on dimensions specified by "axis".
  767. *Four reduction operations are provided:
  768. *SUM Computes the sum of elements across specified dimensions of a tensor.
  769. *ASUM Computes the sum of absolute values of elements across specified dimensions of a tensor.
  770. *SUMSQ Computes the sum of squares of elements across specified dimensions of a tensor.
  771. *SUMSQ Computes the mean values of elements across specified dimensions of a tensor . \n
  772. *@par Inputs:
  773. *x: A Tensor of type float16 or float32
  774. *@par Attributes:
  775. *@li operation: An optional int32 from 1(SUM), 2(ASUM), 3(SUMSQ), and 4(MEAN),
  776. *specifying the reduction algorithm. Defaults to "1".
  777. *@li axis: An optional int32, specifying the first axis to reduce. Defaults to "0".
  778. *The value range is [-N, N-1], where N is the input tensor rank.
  779. *@li coeff: An optional float32, specifying the scale coefficient. Defaults to "1.0" . \n
  780. *@par Outputs:
  781. *y: A Tensor. Has the same type as "x" . \n
  782. *@attention Constraints: The Reduction operator supports type float16 only on the device chip.
  783. *@par Third-party framework compatibility
  784. * Compatible with the Caffe operator Reduction.
  785. */
  786. REG_OP(Reduction)
  787. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  788. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  789. .ATTR(operation, Int, 1)
  790. .ATTR(axis, Int, 0)
  791. .ATTR(coeff, Float, 1.0)
  792. .OP_END_FACTORY_REG(Reduction);
  793. /**
  794. *@brief Computes the euclidean norm of elements across dimensions of a tensor . \n
  795. *@par Inputs:
  796. *@li x: A Tensor. Must be one of the following types: float16, float32, int32.
  797. *@li axes: A Tensor of type int8 or int32. Specifies the dimensions to reduce. Defaults to "None" . \n
  798. *@par Attributes:
  799. *keep_dims: An optional bool. If "True", reduced dimensions will be retained. Defaults to "False" . \n
  800. *@par Outputs:
  801. *y: A Tensor. Must be one of the following types: float16, float32, int32 . \n
  802. *@attention Constraints:
  803. * If "axes = None", all dimensions will be reduced. "axes" must be in the range [-rank(input_shape), rank(input_shape)) . \n
  804. *@par Third-party framework compatibility
  805. * Compatible with the TensorFlow operator EuclideanNorm.
  806. */
  807. REG_OP(EuclideanNorm)
  808. .INPUT(x, TensorType::NumberType())
  809. .INPUT(axes, TensorType::IndexNumberType())
  810. .OUTPUT(y, TensorType::NumberType())
  811. .ATTR(keep_dims, Bool, false)
  812. .OP_END_FACTORY_REG(EuclideanNorm)
  813. /**
  814. *@brief Computes the euclidean norm of elements across dimensions of a tensor . \n
  815. *@par Inputs:
  816. *input_min: A Tensor. Must be one of the following types: float16, float32, int32 . \n
  817. *@par Attributes:
  818. *@li axes: An optional int32, list, tuple, or NoneType value. Specifies the dimensions to reduce. Defaults to "None".
  819. *@li keep_dims: An optional bool or NoneType value. If "True", reduced dimensions will be retained. Defaults to "None" (equivalent to "False") . \n
  820. *@par Outputs:
  821. *output_min: A Tensor. Must be one of the following types: float16, float32, int32 . \n
  822. *@attention Constraints:
  823. * If "axes = None", all dimensions will be reduced. "axes" must be in the range [-rank(input_shape), rank(input_shape)) . \n
  824. *@par Third-party framework compatibility
  825. * Compatible with the TensorFlow operator EuclideanNorm.
  826. *
  827. * @par Restrictions:
  828. * Warning: THIS FUNCTION IS DEPRECATED. Please use EuclideanNorm instead.
  829. */
  830. REG_OP(EuclideanNormD)
  831. .INPUT(x, TensorType({DT_FLOAT, DT_INT32, DT_FLOAT16}))
  832. .OUTPUT(y, TensorType({DT_FLOAT, DT_INT32, DT_FLOAT16}))
  833. .ATTR(axes, ListInt, {})
  834. .ATTR(keep_dims, Bool, false)
  835. .OP_END_FACTORY_REG(EuclideanNormD)
  836. /**
  837. *@brief Performs instance normalization for inference . \n
  838. *@par Inputs:
  839. * Five inputs, including:
  840. *@li x: A Tensor of type float16 or float32.
  841. *@li gamma: A [N, C1, 1, 1, C0] Tensor of type float32, for the scaling gamma.
  842. *@li beta: A [N, C1, 1, 1, C0] Tensor of type float32, for the scaling beta.
  843. *@li mean: A [N, C1, 1, 1, C0] ensor of type float32, for the mean.
  844. *@li variance: A [N, C1, 1, 1, C0] Tensor of type float32, for the variance . \n
  845. *@par Attributes:
  846. *epsilon: An optional float32, specifying the small value added to variance to avoid dividing by zero.
  847. Defaults to "0.00001" . \n
  848. *@par Outputs:
  849. *@li y: A Tensor of type float16 or float32 for the normalized "x".
  850. *@li batch_mean: A Tensor of type float32 for the result mean.
  851. *@li batch_ variance: A Tensor of type float32 for the result variance . \n
  852. *@attention Constraints:
  853. *For Ascend 310, the result accuracy fails to reach 0.001 due to the square root instruction.
  854. */
  855. REG_OP(INInferV2)
  856. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  857. .OPTIONAL_INPUT(gamma, TensorType({DT_FLOAT}))
  858. .OPTIONAL_INPUT(beta, TensorType({DT_FLOAT}))
  859. .OPTIONAL_INPUT(mean, TensorType({DT_FLOAT}))
  860. .OPTIONAL_INPUT(variance, TensorType({DT_FLOAT}))
  861. .ATTR(epsilon, Float, 0.00001)
  862. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT}))
  863. .OUTPUT(batch_mean, TensorType({DT_FLOAT}))
  864. .OUTPUT(batch_variance, TensorType({DT_FLOAT}))
  865. .OP_END_FACTORY_REG(INInferV2)
  866. /**
  867. *@brief Performs reduce instance normalization. \n
  868. *@par Inputs:
  869. *x: A Tensor of type float16 or float32. \n
  870. *@par Outputs:
  871. *@li sum: A Tensor of type float32 for SUM reduced "x".
  872. *@li square_sum: A Tensor of type float32 for SUMSQ reduced "x" . \n
  873. *@attention Constraints:
  874. * This operator is a InstanceNorm fusion operator for updating the moving averages for training.
  875. * This operator is used in conjunction with INTrainingUpdateV2.
  876. */
  877. REG_OP(INTrainingReduceV2)
  878. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  879. .OUTPUT(sum, TensorType({DT_FLOAT}))
  880. .OUTPUT(square_sum, TensorType({DT_FLOAT}))
  881. .OP_END_FACTORY_REG(INTrainingReduceV2)
  882. /**
  883. *@brief Performs update instance normalization. \n
  884. *@par Inputs:
  885. * Seven inputs, including:
  886. *@li x: A Tensor of type float16 or float32.
  887. *@li sum: A Tensor of type float32 for the output of operator INTrainingReduceV2.
  888. *@li square_sum: A Tensor of type float32 for the output of operator INTrainingReduceV2.
  889. *@li gamma: A Tensor of type float32, for the scaling gamma.
  890. *@li beta: A Tensor of type float32, for the scaling beta.
  891. *@li mean: A Tensor of type float32, for the updated mean.
  892. *@li variance: A Tensor of type float32, for the updated variance. \n
  893. *@par Attributes:
  894. *@li momentum: A required float32, specifying the momentum to update mean and var.
  895. *@li epsilon: A required float32, specifying the small value added to variance to avoid dividing by zero. \n
  896. *@par Outputs:
  897. * Three outputs
  898. *@li y: A Tensor of type float16 or float32, for normalized "x".
  899. *@li batch_mean: A Tensor of type float32, for the updated mean.
  900. *@li batch_variance: A Tensor of type float32, for the updated variance. \n
  901. *@attention Constraints:
  902. * This operator is a InstanceNorm fusion operator for updating the moving averages for training.
  903. * This operator is used in conjunction with INTrainingReduceV2.
  904. */
  905. REG_OP(INTrainingUpdateV2)
  906. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  907. .INPUT(sum, TensorType({DT_FLOAT}))
  908. .INPUT(square_sum, TensorType({DT_FLOAT}))
  909. .OPTIONAL_INPUT(gamma, TensorType({DT_FLOAT}))
  910. .OPTIONAL_INPUT(beta, TensorType({DT_FLOAT}))
  911. .OPTIONAL_INPUT(mean, TensorType({DT_FLOAT}))
  912. .OPTIONAL_INPUT(variance, TensorType({DT_FLOAT}))
  913. .ATTR(momentum, Float, 0.1)
  914. .ATTR(epsilon, Float, 0.00001)
  915. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT}))
  916. .OUTPUT(batch_mean, TensorType({DT_FLOAT}))
  917. .OUTPUT(batch_variance, TensorType({DT_FLOAT}))
  918. .OP_END_FACTORY_REG(INTrainingUpdateV2)
  919. /**
  920. *@brief Performs the backpropagation of InstanceNorm. \n
  921. *@par Inputs:
  922. * Seven inputs, including:
  923. *@li dy: A Tensor of type float16 or float32.
  924. *@li x: A Tensor of type float16 or float32.
  925. *@li variance: A Tensor of type float32, for the variance of "x".
  926. *@li mean: A Tensor of type float32, for the mean of "x".
  927. *@li res_gamma: A Tensor of type float32.
  928. *@li res_beta: A Tensor of type float32.
  929. *@li gamma: A Tensor of type float32. \n
  930. *@par Outputs:
  931. *pd_x: A Tensor of type float16 or float32, for the offset of "x". \n
  932. *@attention Constraints:
  933. * The preceding layer of this operator must be INTrainingUpdateGrad. \n
  934. */
  935. REG_OP(INTrainingReduceGrad)
  936. .INPUT(dy, TensorType({DT_FLOAT16,DT_FLOAT}))
  937. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  938. .INPUT(variance, TensorType({DT_FLOAT}))
  939. .INPUT(mean, TensorType({DT_FLOAT}))
  940. .INPUT(res_gamma, TensorType({DT_FLOAT}))
  941. .INPUT(res_beta, TensorType({DT_FLOAT}))
  942. .INPUT(gamma, TensorType({DT_FLOAT}))
  943. .OUTPUT(pd_x, TensorType({DT_FLOAT16,DT_FLOAT}))
  944. .OP_END_FACTORY_REG(INTrainingReduceGrad)
  945. /**
  946. *@brief Performs the backpropagation of InstanceNorm. \n
  947. *@par Inputs:
  948. * Four inputs, including:
  949. *@li dy: A Tensor of type float16 or float32, for the gradient.
  950. *@li x: A Tensor of type float16 or float32.
  951. *@li variance: A Tensor of type float32, for the variance of "x".
  952. *@li mean: A Tensor of type float32, for the mean of "x". \n
  953. *@par Outputs:
  954. *@li res_gamma: A Tensor of type float32.
  955. *@li res_beta: A Tensor of type float32. \n
  956. */
  957. REG_OP(INTrainingUpdateGrad)
  958. .INPUT(dy, TensorType({DT_FLOAT16,DT_FLOAT}))
  959. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  960. .INPUT(variance, TensorType({DT_FLOAT}))
  961. .INPUT(mean, TensorType({DT_FLOAT}))
  962. .OUTPUT(res_gamma, TensorType({DT_FLOAT}))
  963. .OUTPUT(res_beta, TensorType({DT_FLOAT}))
  964. .OP_END_FACTORY_REG(INTrainingUpdateGrad)
  965. /**
  966. *@brief Performs the backpropagation of InstanceNorm. \n
  967. *@par Inputs:
  968. * Two inputs, including:
  969. *@li res_gamma: A Tensor of type float32.
  970. *@li res_beta: A Tensor of type float32. \n
  971. *@par Outputs:
  972. *@li pd_gamma: A Tensor of type float32.
  973. *@li pd_beta: A Tensor of type float32. \n
  974. */
  975. REG_OP(INTrainingUpdateGradGammaBeta)
  976. .INPUT(res_gamma, TensorType({DT_FLOAT}))
  977. .INPUT(res_beta, TensorType({DT_FLOAT}))
  978. .OUTPUT(pd_gamma, TensorType({DT_FLOAT}))
  979. .OUTPUT(pd_beta, TensorType({DT_FLOAT}))
  980. .OP_END_FACTORY_REG(INTrainingUpdateGradGammaBeta)
  981. /**
  982. *@brief Performs reduced group normalization . \n
  983. *@par Inputs:
  984. *x: A Tensor of type float16 or float32, with format NCHW NHWC . \n
  985. *@par Outputs:
  986. *@li sum: A Tensor of type float32 for SUM reduced "x".
  987. *@li square_sum: A Tensor of type float32 for SUMSQ reduced "x".
  988. *@par Attributes:
  989. *num_groups: Int, specifying the num of groups. required, same to GNTrainingUpdate . \n
  990. *@attention Constraints:
  991. * This operator is a GroupNorm fusion operator for updating the moving averages for training.
  992. * This operator is used in conjunction with GNTrainingUpdate.
  993. */
  994. REG_OP(GNTrainingReduce)
  995. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  996. .OUTPUT(sum, TensorType({DT_FLOAT}))
  997. .OUTPUT(square_sum, TensorType({DT_FLOAT}))
  998. .ATTR(num_groups, Int, 2)
  999. .OP_END_FACTORY_REG(GNTrainingReduce)
  1000. /**
  1001. *@brief Performs update group normalization . \n
  1002. *@par Inputs:
  1003. * Seven inputs, including: (NCHW NHWC supported)
  1004. *@li x: A Tensor of type float16 or float32.
  1005. *@li sum: A tensor of type float32,
  1006. shape is [N, G, 1, 1, 1] for NCHW, [N, 1, 1, G, 1] for NHWC
  1007. for the output of operator GNTrainingReduce.
  1008. *@li square_sum: A tensor of type float32,
  1009. shape is [N, G, 1, 1, 1] for NCHW, [N, 1, 1, G, 1] for NHWC
  1010. for the output of operator GNTrainingReduce.
  1011. *@li scale: A tensor of type float32,
  1012. shape is [1, G, 1, 1, 1] for NCHW, [1, 1, 1, G, 1] for NHWC
  1013. is for the scaling gamma.
  1014. *@li offset: A tensor of type float32,
  1015. shape is [1, G, 1, 1, 1] for NCHW, [1, 1, 1, G, 1] for NHWC
  1016. for the scaling beta.
  1017. *@li mean: A tensor of type float32,
  1018. shape is [N, G, 1, 1, 1] for NCHW, [N, 1, 1, G, 1] for NHWC
  1019. for the updated mean.
  1020. *@li variance: A tensor of type float32,
  1021. shape is [N, G, 1, 1, 1] for NCHW, [N, 1, 1, G, 1] for NHWC
  1022. for the updated variance.
  1023. *@par Attributes:
  1024. *@li epsilon: A float32, specifying the small value added to variance to avoid dividing by zero.
  1025. *@li num_groups: Int, specifying the num of groups. required, same to GNTrainingReduce
  1026. *@par Outputs:
  1027. * Three outputs, including:
  1028. *@li y: A Tensor of type float16 or float32, for normalized "x".
  1029. *@li batch_mean: A Tensor of type float32, for the updated mean.
  1030. *@li batch_variance: A Tensor of type float32, for the updated variance . \n
  1031. *@attention Constraints:
  1032. *@li This operator is a InstanceNorm fusion operator for updating the moving averages for training.
  1033. * This operator is used in conjunction with GNTrainingUpdate.
  1034. *@li For Ascend 310, the result accuracy fails to reach 1/1000 due to the square root instruction.
  1035. */
  1036. REG_OP(GNTrainingUpdate)
  1037. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  1038. .INPUT(sum, TensorType({DT_FLOAT}))
  1039. .INPUT(square_sum, TensorType({DT_FLOAT}))
  1040. .OPTIONAL_INPUT(scale, TensorType({DT_FLOAT}))
  1041. .OPTIONAL_INPUT(offset, TensorType({DT_FLOAT}))
  1042. .OPTIONAL_INPUT(mean, TensorType({DT_FLOAT}))
  1043. .OPTIONAL_INPUT(variance, TensorType({DT_FLOAT}))
  1044. .ATTR(num_groups, Int, 2)
  1045. .ATTR(epsilon, Float, 0.0001)
  1046. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT}))
  1047. .OUTPUT(batch_mean, TensorType({DT_FLOAT}))
  1048. .OUTPUT(batch_variance, TensorType({DT_FLOAT}))
  1049. .OP_END_FACTORY_REG(GNTrainingUpdate)
  1050. /**
  1051. *@brief Joins a string Tensor across the given dimensions. \n
  1052. *@par Inputs:
  1053. include:
  1054. *@li input:A Tensor of type string. The text to be processed.
  1055. *@li reduction_indices:A Tensor of type int. The text to be processed.
  1056. *@par Attributes:
  1057. *@li keep_dims:A bool, An optional bool. Defaults to False. If True, retain reduced dimensions with length 1..
  1058. *@li separator:string.
  1059. *@par Outputs:
  1060. *output:A Tensor of type string.
  1061. */
  1062. REG_OP(ReduceJoin)
  1063. .INPUT(input, TensorType({DT_STRING}))
  1064. .INPUT(reduction_indices, TensorType({DT_INT32}))
  1065. .OUTPUT(output, TensorType({DT_STRING}))
  1066. .ATTR(keep_dims, Bool, true)
  1067. .ATTR(separator, String, "")
  1068. .OP_END_FACTORY_REG(ReduceJoin)
  1069. /**
  1070. * @brief Calculates the standard deviation and average value of Tensors.
  1071. * @par Inputs:
  1072. * x: A Tensor. Must be one of the following types:
  1073. * float16, float32. \n
  1074. * @par Attributes:
  1075. * Three Attributes, including:
  1076. * @li dim: An optional listint, Defaults to "None". \n
  1077. * @li unbiased: An optional bool. Defaults to "True".
  1078. * If "True", Use Bessel Correction.
  1079. * If "False", Do not use Bessel Correction. \n
  1080. * @li keepdim: An optional bool. Defaults to "False".
  1081. * If "True", Keep the original tensor dimension.
  1082. * If "False", Do not keep the original tensor dimension. \n
  1083. * @par Outputs:
  1084. * Two Outputs, including:
  1085. * @li y1: A Tensor. Has the same type as "x".
  1086. * @li y2: A Tensor. Has the same type as "x". \n
  1087. * @par Third-party framework compatibility
  1088. * Compatible with the Pytorch operator ReduceStd.
  1089. */
  1090. REG_OP(ReduceStd)
  1091. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16}))
  1092. .OUTPUT(y1, TensorType({DT_FLOAT, DT_FLOAT16}))
  1093. .OUTPUT(y2, TensorType({DT_FLOAT, DT_FLOAT16}))
  1094. .ATTR(dim, ListInt, {})
  1095. .ATTR(unbiased, Bool, true)
  1096. .ATTR(keepdim, Bool, false)
  1097. .OP_END_FACTORY_REG(ReduceStd)
  1098. /**
  1099. * @brief Calculates the standard deviation of Tensors.
  1100. * @par Inputs:
  1101. * include:
  1102. * @li x: A Tensor. Must be one of the following types: float16, float32. \n
  1103. * @li mean: A Tensor. It's the mean of X. Must be one of the following types: float16, float32. \n
  1104. * @par Attributes:
  1105. * Five Attributes, including:
  1106. * @li dim: An optional listint, Defaults to "None". \n
  1107. * @li unbiased: An optional bool. Defaults to "True".
  1108. * If "True", Use Bessel Correction.
  1109. * If "False", Do not use Bessel Correction. \n
  1110. * @li keepdim: An optional bool. Defaults to "False".
  1111. * If "True", Keep the original tensor dimension.
  1112. * If "False", Do not keep the original tensor dimension. \n
  1113. * @li invert: An optional bool, Defaults to "False".
  1114. * If "True", the output is inverse of variance.
  1115. * If "False", the output is variance.
  1116. * @li epsilon: An optional floar, Defaults to 0.001.
  1117. * Prevent division by 0.
  1118. * @par Outputs:
  1119. * @li y: A Tensor. It's the variance of X or reciprocal of vaiance of X. Has the same type as "x".
  1120. * @par Third-party framework compatibility
  1121. * Compatible with the Pytorch operator ReduceStdWithMean.
  1122. */
  1123. REG_OP(ReduceStdWithMean)
  1124. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16}))
  1125. .INPUT(mean, TensorType({DT_FLOAT, DT_FLOAT16}))
  1126. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16}))
  1127. .ATTR(dim, ListInt, {})
  1128. .ATTR(unbiased, Bool, true)
  1129. .ATTR(keepdim, Bool, false)
  1130. .ATTR(invert, Bool, false)
  1131. .ATTR(epsilon, Float, 0.001)
  1132. .OP_END_FACTORY_REG(ReduceStdWithMean)
  1133. /**
  1134. *@brief Performs reduced batch normalization . \n
  1135. *@par Inputs:
  1136. *x: A tensor of type float16 or float32 . \n
  1137. *@par Outputs:
  1138. *@li mean: A Tensor of type float32 for SUM reduced "x".
  1139. *@li variance: A Tensor of type float32 for square sum reduced "x" . \n
  1140. *@par Restrictions:
  1141. * Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1142. */
  1143. REG_OP(ReduceMeanVariance)
  1144. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT}))
  1145. .OUTPUT(mean, TensorType({DT_FLOAT16,DT_FLOAT}))
  1146. .OUTPUT(variance, TensorType({DT_FLOAT16,DT_FLOAT}))
  1147. .ATTR(axes, ListInt, {})
  1148. .ATTR(keep_dims, Bool, true)
  1149. .OP_END_FACTORY_REG(ReduceMeanVariance)
  1150. /**
  1151. * @brief Calculates the standard deviation or the variance of Tensors with the average value.
  1152. * @par Inputs:
  1153. * Two inputs, including:
  1154. * @li x: A Tensor. Must be one of the following types: float16, float32. \n
  1155. * @li mean: A Tensor. It's the mean of X. Has the same shape and type as "x" \n
  1156. * @par Attributes:
  1157. * Four Attributes, including:
  1158. * @li dim: An listint. \n
  1159. * @li if_std: An optional bool. Defaults to "False"
  1160. * If "True", Calculate the standard deviation
  1161. * If "False", Calculate the variance
  1162. * @li unbiased: An optional bool. Defaults to "True".
  1163. * If "True", Use Bessel Correction.
  1164. * If "False", Do not use Bessel Correction. \n
  1165. * @li keepdim: An optional bool. Defaults to "False".
  1166. * If "True", Keep the original tensor dimension.
  1167. * If "False", Do not keep the original tensor dimension. \n
  1168. * @par Outputs:
  1169. * @li output_var: A Tensor. It's the standard deviation or the variance of X. Has the same type as "x".
  1170. * @par Third-party framework compatibility
  1171. * Compatible with the Pytorch operator Var_mean.
  1172. */
  1173. REG_OP(ReduceStdV2Update)
  1174. .INPUT(x, TensorType({DT_FLOAT,DT_FLOAT16}))
  1175. .INPUT(mean, TensorType({DT_FLOAT,DT_FLOAT16}))
  1176. .OUTPUT(output_var, TensorType({DT_FLOAT,DT_FLOAT16}))
  1177. .REQUIRED_ATTR(dim, ListInt)
  1178. .ATTR(if_std, Bool, false)
  1179. .ATTR(unbiased, Bool, true)
  1180. .ATTR(keepdim, Bool, false)
  1181. .OP_END_FACTORY_REG(ReduceStdV2Update)
  1182. /**
  1183. *@brief Computes the log and sum and exp of elements across dimensions of a tensor.
  1184. * Reduces "x" along the dimensions given in "axes".
  1185. * Unless "keep_dims" is true, the rank of the tensor is reduced by 1 for each
  1186. * entry in "axes". If "keep_dims" is true, the reduced dimensions
  1187. * are retained with length 1.
  1188. *
  1189. *@par Inputs:
  1190. * Two inputs, including:
  1191. *@li x: A Tensor. Must be one of the following types:
  1192. * float32, float16, int32, int64, uint32, uint64, double
  1193. *@li axes: A 1D list or tuple of int32 or int64. Specifies the dimensions to reduce . \n
  1194. *
  1195. *@par Attributes:
  1196. *keep_dims: An optional bool. If "true", retains reduced dimensions with length 1. Defaults to "false" . \n
  1197. *
  1198. *@par Outputs:
  1199. *y: The reduced tensor. Has the same type and format as input "x" . \n
  1200. *
  1201. *@par Third-party framework compatibility
  1202. * Compatible with the Onnx operator ReduceLogSumExp.
  1203. */
  1204. REG_OP(ReduceLogSumExp)
  1205. .INPUT(x, TensorType::NumberType())
  1206. .INPUT(axes, TensorType::IndexNumberType())
  1207. .OUTPUT(y, TensorType::NumberType())
  1208. .ATTR(keep_dims, Bool, false)
  1209. .OP_END_FACTORY_REG(ReduceLogSumExp)
  1210. /**
  1211. *@brief Computes the log and sum of elements across dimensions of a tensor.
  1212. * Reduces "x" along the dimensions given in "axes".
  1213. * Unless "keep_dims" is true, the rank of the tensor is reduced by 1 for each
  1214. * entry in "axes". If "keep_dims" is true, the reduced dimensions
  1215. * are retained with length 1.
  1216. *
  1217. *@par Inputs:
  1218. * Two inputs, including:
  1219. *@li x: A Tensor. Must be one of the following types:
  1220. * float32, float16, int32, int64, uint32, uint64, double
  1221. *@li axes: A 1D list or tuple of int32 or int64. Specifies the dimensions to reduce . \n
  1222. *
  1223. *@par Attributes:
  1224. *keep_dims: An optional bool. If "true", retains reduced dimensions with length 1. Defaults to "false" . \n
  1225. *
  1226. *@par Outputs:
  1227. *y: The reduced tensor. Has the same type and format as input "x" . \n
  1228. *
  1229. *@par Third-party framework compatibility
  1230. * Compatible with the Onnx operator ReduceLogSum.
  1231. */
  1232. REG_OP(ReduceLogSum)
  1233. .INPUT(x, TensorType::NumberType())
  1234. .INPUT(axes, TensorType::IndexNumberType())
  1235. .OUTPUT(y, TensorType::NumberType())
  1236. .ATTR(keep_dims, Bool, false)
  1237. .OP_END_FACTORY_REG(ReduceLogSum)
  1238. } //namespace ge
  1239. #endif // OPS_BUILT_IN_OP_PROTO_INC_REDUCE_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示