You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

nn_pooling_ops.h 77 kB

5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
3 years ago
3 years ago
3 years ago
5 years ago
3 years ago
3 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file nn_pooling_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_NN_POOLING_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_NN_POOLING_OPS_H_
  22. #include "graph/operator_reg.h"
  23. #include "graph/operator.h"
  24. namespace ge {
  25. /**
  26. *@brief Performs pooling on the input.
  27. *@par Inputs:
  28. * x: An NCHW tensor of type float16, float32, int8.
  29. *@par Attributes:
  30. *@li mode: An optional int32, specifying the pooling algorithm, either "0" (max pooling) or "1" (avg pooling). Defaults to "0".
  31. *@li global_pooling: An optional bool. Defaults to "false".
  32. *@li window: Optional, including:
  33. *window[0]: An optional int32, specifying the window size along in the H dimension. The value range is [1, 32768]. Defaults to "1".
  34. *window[1]: An optional int32, specifying the window size along in the W dimension. The value range is [1, 32768]. Defaults to "1".
  35. *@li stride: Optional, including:
  36. *stride[0]: An optional int32, specifying the stride along in the H dimension. The value range is [1, 63]. Defaults to "1".
  37. *stride[1]: An optional int32, specifying the stride along in the W dimension. The value range is [1, 63]. Defaults to "1".
  38. *@li pad: Optional, including:
  39. *pad[0]: An optional int32, specifying the up padding. Defaults to "0".
  40. *pad[1]: An optional int32, specifying the bottom padding. Defaults to "0".
  41. *pad[2]: An optional int32, specifying the left padding. Defaults to "0".
  42. *pad[3]: An optional int32, specifying the right padding. Defaults to "0".
  43. *@li dilation: Optional, including:
  44. *dilation[0]: An optional int32, specifying the up dilation. Defaults to "1".
  45. *dilation[1]: An optional int32, specifying the bottom dilation. Defaults to "1".
  46. *dilation[2]: An optional int32, specifying the left dilation. Defaults to "1".
  47. *dilation[3]: An optional int32, specifying the right dilation. Defaults to "1".
  48. *@li ceil_mode: An optional int32, either "0" (ceil mode) or "1" (floor mode). Defaults to "0".
  49. *@li data_format: An optional string, Specify the data format of the input and output data. With the default format "NCHW".
  50. *@par Outputs:
  51. *y: An NCHW tensor of type float16, float32, int32.
  52. *@attention Constraints:
  53. *@li window[0] * window[1] < 256;
  54. *@li 1<=input_h<=4096,1<=input_w<=4096
  55. *@li If input tensor N is a prime number, it should be less than 65535.
  56. *@par Third-party framework compatibility
  57. *@li Compatible with the Caffe operator Pooling.
  58. *@li Compatible with the TensorFlow operator Pooling.
  59. */
  60. REG_OP(Pooling)
  61. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT32, DT_INT8}))
  62. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32, DT_INT32}))
  63. .ATTR(mode, Int, 0) // 0:max pooling or 1:avg pooling
  64. .ATTR(global_pooling, Bool, false)
  65. .ATTR(window, ListInt, {1,1}) // kernel size
  66. .ATTR(stride, ListInt, {1,1}) // stride size
  67. .ATTR(pad, ListInt, {0,0,0,0}) // pad size
  68. .ATTR(dilation, ListInt, {1,1,1,1})
  69. .ATTR(ceil_mode, Int, 0)
  70. .ATTR(data_format, String, "NCHW")
  71. .OP_END_FACTORY_REG(Pooling)
  72. /**
  73. *@brief Performs average pooling on the input . \n
  74. *@par Inputs:
  75. *x: A tensor of type float16, float32, double . \n
  76. *@par Attributes:
  77. *@li ksize: A required list of 4 ints, specifying the size (N, C, H, and W) of the sliding window,
  78. * where N = C = 1, and H and W are positive integers within the range [1, 255].
  79. *@li strides: A required list of 4 ints, specifying the stride of the sliding window.
  80. * The strides of the N and C dimensions are 1.
  81. * The strides of the H and W dimensions are positive integers within the range [1, 63].
  82. *@li padding: A required string, specifying the padding algorithm,
  83. * either "VALID" or "SAME". With "SAME" means that the outputs will have the same spatial dimensions as its inputs.
  84. * With "VALID" means no padding.
  85. *@li data_format: An optional string, specifying the data format of "ksize" and "strides",
  86. * either "NCHW", or "NHWC" (default) . \n
  87. *@par Outputs:
  88. *y: The average pooled output tensor. Has the same type and format as input "x" . \n
  89. *@attention Constraints:
  90. *@li This operator applies only to a TensorFlow network.
  91. *@li Only single input and single output are supported.
  92. *@li Global pooling is supported.
  93. *@li "ksize_H" and "ksize_W" are positive integers within the range [1, 255]. ksize_H * ksize_W < 256
  94. *@li Due to instruction restrictions,
  95. * the values of "strides_h" and "strides_w" are positive integers within the range [1, 63].
  96. *@par Third-party framework compatibility
  97. * Compatible with the TensorFlow operator AvgPool.
  98. */
  99. REG_OP(AvgPool)
  100. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))
  101. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))
  102. .REQUIRED_ATTR(ksize, ListInt)
  103. .REQUIRED_ATTR(strides, ListInt)
  104. .REQUIRED_ATTR(padding, String)
  105. .ATTR(data_format, String, "NHWC")
  106. .OP_END_FACTORY_REG(AvgPool)
  107. /**
  108. *@brief Performs average pooling on the input.
  109. *@par Inputs:
  110. *x: A tensor of type float16, float32, double.
  111. *@par Attributes:
  112. *@li ksize: A required list of 4 ints, specifying the size (N, C, H, and W) of the sliding window,
  113. * where N = C = 1, and H and W are positive integers within the range [1, 255].
  114. *@li strides: A required list of 4 ints, specifying the stride of the sliding window.
  115. * The strides of the N and C dimensions are 1.
  116. * The strides of the H and W dimensions are positive integers within the range [1, 63].
  117. *@li padding_mode: A required string, specifying the padding algorithm,
  118. * either "VALID", "SAME" and "CALCULATED".
  119. * With "SAME" means that the outputs will have the same spatial dimensions as its inputs.
  120. * With "VALID" means no padding.
  121. *@li pads: Pad value when padding_mode is "CALCULATED".
  122. *@li data_format: An optional string, specifying the data format of "ksize" and "strides",
  123. * either "NCHW", or "NHWC" (default).
  124. *@li global_pooling: Global or not. If true, pads will change to {0,0,0,0} and ksize will change to [input_h, input_w]
  125. *@li ceil_mode: Use ceil or floor to calculate the output size when padding_mode is "CALCULATED".
  126. *@li exclusive: Ignore padding area or not when calculating average.
  127. *@par Outputs:
  128. *y: The average pooled output tensor. Has the same type and format as input "x".
  129. *@attention Constraints:
  130. *@li Only single input and single output are supported.
  131. *@li Global pooling is supported.
  132. *@li "ksize_H" and "ksize_W" are positive integers within the range [1, 255]. ksize_H * ksize_W < 256
  133. *@li Due to instruction restrictions,
  134. * the values of "strides_h" and "strides_w" are positive integers within the range [1, 63].
  135. *@par Third-party framework compatibility
  136. * Compatible with the TensorFlow operator AvgPoolV2.
  137. */
  138. REG_OP(AvgPoolV2)
  139. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  140. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  141. .REQUIRED_ATTR(ksize, ListInt)
  142. .REQUIRED_ATTR(strides, ListInt)
  143. .ATTR(padding_mode, String, "CALCULATED")
  144. .ATTR(pads, ListInt, {0, 0, 0, 0})
  145. .ATTR(data_format, String, "NCHW")
  146. .ATTR(global_pooling, Bool, false)
  147. .ATTR(ceil_mode, Bool, false)
  148. .ATTR(exclusive, Bool, true)
  149. .OP_END_FACTORY_REG(AvgPoolV2)
  150. /**
  151. *@brief Performs average pooling on the input.
  152. *@par Inputs:
  153. *x: A 5-D Tensor of shape [batch, depth, height, width, channels] and type float16, float32, double.
  154. *@par Attributes:
  155. *@li ksize: List of ints that has length 1, 3 or 5. The size of the window for each dimension of the input tensor.
  156. *@li strides:List of ints that has length 1, 3 or 5. The stride of the sliding window for each dimension of the input tensor.
  157. *@li pads: List of ints, implicit zero paddings on both sides of the input.
  158. *@li ceil_mode: When true, will use ceil instead of floor in the formula to compute the output shape.
  159. *@li count_include_pad: When true, will include the zero-padding in the averaging calculation.
  160. *@li divisor_override: if specified, it will be used as divisor, otherwise size of the pooling region will be used.
  161. *@li data_format: A string, format of input data . \n
  162. *@par Outputs:
  163. *y: The average pooled output tensor . \n
  164. *@attention Constraints:
  165. *@li "ksize" is in the range [1, 255]. "strides" is in the range [1, 63]
  166. *@par Third-party framework compatibility
  167. * Compatible with the TensorFlow operator AvgPool3D.
  168. */
  169. REG_OP(AvgPool3D)
  170. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))
  171. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))
  172. .REQUIRED_ATTR(ksize, ListInt)
  173. .REQUIRED_ATTR(strides, ListInt)
  174. .REQUIRED_ATTR(pads, ListInt)
  175. .ATTR(ceil_mode, Bool, false)
  176. .ATTR(count_include_pad, Bool, true)
  177. .ATTR(divisor_override, Int, 0)
  178. .ATTR(data_format, String, "NDHWC")
  179. .OP_END_FACTORY_REG(AvgPool3D)
  180. /**
  181. *@brief Performs average pooling on the input.
  182. *@par Inputs:
  183. *@li x: A 5-D Tensor of shape [batch, depth, height, width, channels] and type float16, float32, double.
  184. *@li filter: An optional tensor of type float16, float32, double, fractal_z_3d layout.
  185. *@li multiplier: An optional tensor of float16, float32, double.
  186. *@par Attributes:
  187. *@li ksize: List of ints that has length 1, 3 or 5. The size of the window for each dimension of the input tensor.
  188. *@li strides:List of ints that has length 1, 3 or 5. The stride of the sliding window for each dimension of the input tensor.
  189. *@li pads: List of ints, implicit zero paddings on both sides of the input.
  190. *@li ceil_mode: When true, will use ceil instead of floor in the formula to compute the output shape.
  191. *@li count_include_pad: When true, will include the zero-padding in the averaging calculation.
  192. *@li divisor_override: if specified, it will be used as divisor, otherwise size of the pooling region will be used.
  193. *@li data_format: A string, format of input data . \n
  194. *@par Outputs:
  195. *y: The average pooled output tensor . \n
  196. *@attention Constraints:
  197. *"ksize" is in the range [1, 255]. "strides" is in the range [1, 63]
  198. *@par Third-party framework compatibility
  199. * Compatible with the TensorFlow operator AvgPool3D.
  200. */
  201. REG_OP(AvgPool3DD)
  202. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))
  203. .OPTIONAL_INPUT(filter, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))
  204. .OPTIONAL_INPUT(multiplier, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))
  205. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))
  206. .REQUIRED_ATTR(ksize, ListInt)
  207. .REQUIRED_ATTR(strides, ListInt)
  208. .REQUIRED_ATTR(pads, ListInt)
  209. .ATTR(ceil_mode, Bool, false)
  210. .ATTR(count_include_pad, Bool, true)
  211. .ATTR(divisor_override, Int, 0)
  212. .ATTR(data_format, String, "NDHWC")
  213. .OP_END_FACTORY_REG(AvgPool3DD)
  214. /**
  215. * @brief Computes AvgPool3DGrad function.
  216. * @par Inputs:
  217. * @li orig_input_shape: An NDHWC tensor of type int32.
  218. * @li grads: An NDHWC tensor of type float16, float32, or double.
  219. * @par Attributes:
  220. * @li ksize: List of ints that has length 5. The size of the window for each dimension of the input tensor.
  221. * @li strides:List of ints that has length 5. The stride of the sliding window for each dimension of the input tensor.
  222. * @li pads: List of ints, implicit zero paddings on both sides of the input.
  223. * @li ceil_mode: When true, will use ceil instead of floor in the formula to compute the output shape.
  224. * @li count_include_pad: When true, will include the zero-padding in the averaging calculation.
  225. * @li divisor_override: if specified, it will be used as divisor, otherwise size of the pooling region will be used.
  226. * @li data_format: A string, format of input data.
  227. * @par Outputs:
  228. * @output: A mutable tensor with the same shape and type as "orig_input_shape".
  229. * @attention Constraints:
  230. * @li "ksize" is in the range [1, 255]. "strides" is in the range [1, 63]
  231. * @par Third-party framework compatibility
  232. * @li Compatible with the TensorFlow operator AvgPoolGrad.
  233. */
  234. REG_OP(AvgPool3DGrad)
  235. .INPUT(orig_input_shape, TensorType({DT_INT32}))
  236. .INPUT(grads, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))
  237. .OUTPUT(output, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))
  238. .REQUIRED_ATTR(ksize, ListInt)
  239. .REQUIRED_ATTR(strides, ListInt)
  240. .REQUIRED_ATTR(pads, ListInt)
  241. .ATTR(ceil_mode, Bool, false)
  242. .ATTR(count_include_pad, Bool, true)
  243. .ATTR(divisor_override, Int, 0)
  244. .ATTR(data_format, String, "NDHWC")
  245. .OP_END_FACTORY_REG(AvgPool3DGrad)
  246. /**
  247. * @brief Performs average pooling on the input.
  248. * @par Inputs:
  249. * @li grads: An NDHWC tensor of type float16.
  250. * @li filter: An optional tensor of type float16, fractal_z_3d layout.
  251. * @li multiplier: An optional tensor of float16.
  252. * @par Attributes:
  253. * @li orig_input_shape: List of ints that has length 5. The size of the window for each dimension of the input tensor.
  254. * @li ksize: List of ints that has length 5. The size of the window for each dimension of the input tensor.
  255. * @li strides:List of ints that has length 5. The stride of the sliding window for each dimension of the input tensor.
  256. * @li pads: List of ints, implicit zero paddings on both sides of the input.
  257. * @li ceil_mode: When true, will use ceil instead of floor in the formula to compute the output shape.
  258. * @li count_include_pad: When true, will include the zero-padding in the averaging calculation.
  259. * @li divisor_override: if specified, it will be used as divisor, otherwise size of the pooling region will be used.
  260. * @li data_format: A string, format of input data . \n
  261. * @par Outputs:
  262. * output: The average pooled output tensor . \n
  263. * @attention Constraints:
  264. * "ksize" is in the range [1, 255]. "strides" is in the range [1, 63]
  265. * @par Third-party framework compatibility
  266. * Compatible with the TensorFlow operator AvgPool3DGradD.
  267. */
  268. REG_OP(AvgPool3DGradD)
  269. .INPUT(grads, TensorType({DT_FLOAT16}))
  270. .OPTIONAL_INPUT(filter, TensorType({DT_FLOAT16}))
  271. .OPTIONAL_INPUT(multiplier, TensorType({DT_FLOAT16}))
  272. .OUTPUT(output, TensorType({DT_FLOAT16}))
  273. .REQUIRED_ATTR(orig_input_shape, ListInt)
  274. .REQUIRED_ATTR(ksize, ListInt)
  275. .REQUIRED_ATTR(strides, ListInt)
  276. .REQUIRED_ATTR(pads, ListInt)
  277. .ATTR(ceil_mode, Bool, false)
  278. .ATTR(count_include_pad, Bool, true)
  279. .ATTR(divisor_override, Int, 0)
  280. .ATTR(data_format, String, "NDHWC")
  281. .OP_END_FACTORY_REG(AvgPool3DGradD)
  282. /**
  283. *@brief Performs max_pool_ext2 on the input . \n
  284. *@par Inputs:
  285. * One input:
  286. *x: A Tensor of type float16.
  287. *@par Attributes:
  288. *@li ksize: A required list of int8, int16, int32, or int64 values,
  289. * specifying the size of the window for each dimension of the input tensor. No default value.
  290. *@li strides: A required list of int8, int16, int32, or int64 values,
  291. * specifying the stride of the sliding window for each dimension of the input tensor. No default value.
  292. *@li padding: A required string. No default value.
  293. *@li data_format: An optional string . \n
  294. *@par Outputs:
  295. *y: A Tensor. Has the same type and format as input "x" . \n
  296. *@attention Constraints:
  297. *@li "ksize" is a list that has length 4: ksize[0] = 1 or ksize[3] = 1, ksize[1] * ksize[2] <= 255.
  298. *@li "stride is a list that has length 4: strides[0] = 1 or strides[3] = 1,
  299. * strides[1] <= 63, strides[0] >= 1, strides[2] <= 63, strides[2] >= 1.
  300. *@li "padding" is either "SAME" or "VALID" . \n
  301. *@par Third-party framework compatibility
  302. * Compatible with the TensorFlow operator MaxPoolV2.
  303. */
  304. REG_OP(MaxPoolExt2)
  305. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE, DT_INT8,
  306. DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  307. DT_UINT16, DT_QINT8}))
  308. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE, DT_INT8,
  309. DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  310. DT_UINT16, DT_QINT8}))
  311. .REQUIRED_ATTR(ksize, ListInt)
  312. .REQUIRED_ATTR(strides, ListInt)
  313. .REQUIRED_ATTR(padding, String)
  314. .ATTR(data_format, String, "NHWC")
  315. .OP_END_FACTORY_REG(MaxPoolExt2)
  316. /**
  317. *@brief Performs max pooling on the input . \n
  318. *@par Inputs:
  319. * One input:
  320. *x: A Tensor. Supported type:float16, float32, double, int8, int16,
  321. * int32, int64, uint8, uint16, qint8
  322. *@par Attributes:
  323. *@li ksize: A required list of int8, int16, int32, or int64 values,
  324. * specifying the size of the window for each dimension of the input tensor.
  325. * No default value.
  326. *@li strides: A required list of int8, int16, int32, or int64 values,
  327. * specifying the stride of the sliding window for each dimension of
  328. * the input tensor. No default value.
  329. *@li padding: A required string. No default value.
  330. *@li data_format: An optional string. Defaults to "NHWC" . \n
  331. *@par Outputs:
  332. *y: A Tensor. Has the same type and format as input "x" . \n
  333. *@attention Constraints:
  334. *@li "ksize" is a list that has length 4: ksize[0] = 1 or ksize[3] = 1,
  335. * ksize[1] * ksize[2] <= 255.
  336. *@li "stride is a list that has length 4: strides[0] = 1 or strides[3] = 1,
  337. * strides[1] <= 63, strides[0] >= 1, strides[2] <= 63, strides[2] >= 1.
  338. *@li "padding" is either "SAME" or "VALID".
  339. *@par Third-party framework compatibility
  340. * Compatible with the TensorFlow operator MaxPool.
  341. */
  342. REG_OP(MaxPool)
  343. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE, DT_INT8,
  344. DT_INT16, DT_INT32, DT_INT64, DT_UINT8,
  345. DT_UINT16, DT_QINT8}))
  346. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE, DT_INT8,
  347. DT_INT16, DT_INT32, DT_INT64, DT_UINT8, DT_UINT16, DT_QINT8}))
  348. .REQUIRED_ATTR(ksize, ListInt)
  349. .REQUIRED_ATTR(strides, ListInt)
  350. .REQUIRED_ATTR(padding, String)
  351. .ATTR(data_format, String, "NHWC")
  352. .OP_END_FACTORY_REG(MaxPool)
  353. /**
  354. *@brief Performs max 3d pooling on the input . \n
  355. *@par Inputs:
  356. *x: A Tensor. Supported type float16, float32, double . \n
  357. *@par Attributes:
  358. *@li ksize: A required list of int8, int16, int32, or int64 values,
  359. specifying the size of the window for each dimension of the input tensor.
  360. No default value.
  361. *@li strides: A required list of int8, int16, int32, or int64 values,
  362. specifying the stride of the sliding window for each dimension of
  363. the input tensor. No default value.
  364. *@li padding: A required string type of float16.
  365. *@li pads: A list type of int32. Default value {0,0,0,0,0,0}.
  366. *@li dilation: A list type of int32. Default value {1,1,1,1,1,1}.
  367. *@li ceil_mode: A ceil mode number of int32 . Default value 0.
  368. *@li data_format: An optional string. Defaults to "NDHWC" . \n
  369. *@par Outputs:
  370. *y: A Tensor. Has the same type and format as input "x" . \n
  371. *@attention Constraints:
  372. *@li "ksize" is a list that has length 4: ksize[0] = 1 or ksize[3] = 1,
  373. * ksize[1] * ksize[2] <= 255.
  374. *@li "stride is a list that has length 4: strides[0] = 1 or strides[3] = 1,
  375. * strides[1] <= 63, strides[0] >= 1, strides[2] <= 63, strides[2] >= 1.
  376. *@li "padding" is either "SAME" or "VALID" . \n
  377. *@par Third-party framework compatibility
  378. * Compatible with the TensorFlow operator MaxPool3D.
  379. */
  380. REG_OP(MaxPool3D)
  381. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))
  382. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))
  383. .REQUIRED_ATTR(ksize, ListInt)
  384. .REQUIRED_ATTR(strides, ListInt)
  385. .REQUIRED_ATTR(padding, String)
  386. .ATTR(pads, ListInt, {0,0,0,0,0,0})
  387. .ATTR(dilation, ListInt, {1,1,1,1,1,1})
  388. .ATTR(ceil_mode, Int, 0)
  389. .ATTR(data_format, String, "NDHWC")
  390. .OP_END_FACTORY_REG(MaxPool3D)
  391. /**
  392. * @brief Performs max pooling3d on both max values and indices.
  393. *
  394. * @par Inputs:
  395. * One input:
  396. * x: An 6D tensor. Supported type: float16. Format as NDC1HWC0.
  397. * @par Attributes:
  398. * @li ksize: A required list of int32 values,
  399. * specifying the size of the window for each dimension of the input tensor.
  400. * No default value.
  401. * @li strides: A required list of int32 values,
  402. * specifying the stride of the sliding window for each dimension of
  403. * the input tensor. No default value.
  404. * @li pads: A required 3*2-dimension-list of int32 values.
  405. * specifying the pad of three dimension of input, implement with 0.
  406. * @li dilation: dilation of kernel. default value is {1,1,1,1,1}.
  407. * @li ceil_mode: default value is false.
  408. * @li data_format: the format of torch input, default value is "NCDHW".
  409. * @li argmax_type: the function of this field is to determine the type of
  410. * output argmax, "bitmask" is the default value, the argmax will return
  411. * a img2col bitmask. "index_int32" and "index_int64" represent the torch
  412. * output indices.
  413. * @par Outputs:
  414. * y: An 6D tensor. the maxpool3d output(max value), format as NDoC1HoWoC0.
  415. * @par Outputs:
  416. * argmax: A 5D uint16 tensor. the indice output.
  417. */
  418. REG_OP(MaxPool3DWithArgmax)
  419. .INPUT(x, TensorType::RealNumberType())
  420. .OUTPUT(y, TensorType::RealNumberType())
  421. .OUTPUT(argmax, TensorType::IndexNumberType())
  422. .REQUIRED_ATTR(ksize, ListInt)
  423. .REQUIRED_ATTR(strides, ListInt)
  424. .REQUIRED_ATTR(pads, ListInt)
  425. .ATTR(dilation, ListInt, {1, 1, 1, 1, 1})
  426. .ATTR(ceil_mode, Bool, false)
  427. .ATTR(data_format, String, "NCDHW")
  428. .ATTR(argmax_type, String, "bitmask")
  429. .OP_END_FACTORY_REG(MaxPool3DWithArgmax)
  430. /**
  431. *@brief Applies a 2D adaptive max pooling over an input signal conposed of several input planes. \n
  432. * The output is of size H x W, for any input size.
  433. * @par Inputs:
  434. * One input, including:
  435. * @li x: A Tensor. Must be one of the following data types:
  436. * float16, float32, float64. \n
  437. * @par Attributes:
  438. * @li output_size: A required list of 2 ints
  439. * specifying the size (H,W) of the output tensor. \n
  440. * @par Outputs:
  441. * @li y: A Tensor. Has the same data type as "x" \n
  442. * @par Third-party framework compatibility
  443. * Compatible with the Pytorch operator AdaptiveMaxPool2d.
  444. */
  445. REG_OP(AdaptiveMaxPool2d)
  446. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))
  447. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))
  448. .OUTPUT(argmax, TensorType::IndexNumberType())
  449. .REQUIRED_ATTR(output_size, ListInt)
  450. .OP_END_FACTORY_REG(AdaptiveMaxPool2d)
  451. /**
  452. * @brief Computes second-order gradients of the maxpooling3d function . \n
  453. * @par Inputs:
  454. * @li orig_x: Original forward input tensor(NDC1HWC0) of type float16
  455. * @li orig_y: Original forward output tensor(NDC1HWC0) of type float16
  456. * @li grads: Gradient tensor(NDC1HWC0) of type float16
  457. * @li assist: Assist tensor(NDC1HWC0) of type float16
  458. * @par Attributes:
  459. * @li ksize: A required list or tuple,
  460. * specifying the size of the sliding window.
  461. * @li strides: A required list or tuple,
  462. * specifying the stride of the sliding window.
  463. * @li pads: A required list or tuple
  464. * @li padding: A required string, window sliding mode. Either SAME or VALID.
  465. * @li data_format: An optional string.
  466. * Format of the original input, either NCDHW or NDHWC. Defaults to NDHWC . \n
  467. * @attention Constraints:
  468. * @li Only the Ascend 910 platform is supported.
  469. * @li "orig_x" and "grads" must have the same shape.
  470. * @li "orig_y" and "y" must have the same shape. Otherwise, an error is reported.
  471. * @li "orig_x", "orig_y", "grads", and "y" must be NDC1HWC0 tensors . \n
  472. * @par Outputs:
  473. * @li y: Result tensor of type float16
  474. * @par Third-party framework compatibility
  475. * @li Compatible with the TensorFlow operator MaxPool3DGradGrad.
  476. */
  477. REG_OP(MaxPool3DGradGrad)
  478. .INPUT(orig_x, TensorType::RealNumberType())
  479. .INPUT(orig_y, TensorType::RealNumberType())
  480. .INPUT(grads, TensorType::RealNumberType())
  481. .OUTPUT(y, TensorType::RealNumberType())
  482. .REQUIRED_ATTR(ksize, ListInt)
  483. .REQUIRED_ATTR(strides, ListInt)
  484. .REQUIRED_ATTR(pads, ListInt)
  485. .ATTR(data_format, String, "NDHWC")
  486. .OP_END_FACTORY_REG(MaxPool3DGradGrad)
  487. /**
  488. * @brief Computes gradients of the maxpooling function . \n
  489. * @par Inputs:
  490. * @li x1: A mutable tensor of type RealNumberType.
  491. * @li x2: A mutable tensor of type RealNumberTypex.
  492. * @li grad: A mutable tensor of type RealNumberType . \n
  493. * @par Attributes:
  494. * @li ksize: A required tuple or list, specifying the size of the window for
  495. * each dimension of the input tensor.
  496. * @li strides: A required tuple or list, specifying the stride of the sliding
  497. * window for each dimension of the input tensor.
  498. * @li padding: A required string, specifying the type of padding algorithm
  499. * to use.
  500. * @li data_format: An optional string, Specify the data format of the input and
  501. * output data. With the default format "NHWC" . \n
  502. * @par Outputs:
  503. * y: A mutable tensor. Has the same shape and type as "x1" . \n
  504. * @attention Constraints:
  505. * @li ksize is limited by buffer with full tiling.
  506. * @li "ksize" is in the range [1, 255]. "strides" is in the range [1, 63]
  507. * @par Third-party framework compatibility
  508. * Compatible with the TensorFlow operator MaxPoolGrad.
  509. */
  510. REG_OP(MaxPoolGrad)
  511. .INPUT(x1, TensorType::RealNumberType())
  512. .INPUT(x2, TensorType::RealNumberType())
  513. .INPUT(grad, TensorType::RealNumberType())
  514. .OUTPUT(y, TensorType::RealNumberType())
  515. .REQUIRED_ATTR(ksize, ListInt)
  516. .REQUIRED_ATTR(strides, ListInt)
  517. .REQUIRED_ATTR(padding, String)
  518. .ATTR(data_format, String, "NHWC")
  519. .OP_END_FACTORY_REG(MaxPoolGrad)
  520. /**
  521. * @brief Computes second-order gradients of the maxpooling function . \n
  522. * @par Inputs:
  523. * @li x1: Original forward input tensor. Supported type:float, double, int32,
  524. * uint8, int16, int8, int64, uint16, half, uint32, uint64.
  525. * @li x2: Has the same type and format as input "x1".
  526. * @li grad:Has the same type and format as input "x1" . \n
  527. * @par Attributes:
  528. * @li ksize: A required list or tuple,
  529. * specifying the size of the sliding window.
  530. * @li strides: A required list or tuple,
  531. * specifying the stride of the sliding window.
  532. * @li padding: A required string, window sliding mode. Either SAME or VALID.
  533. * @li data_format: An optional string.
  534. * Format of the original input, either NCHW or NHWC. Defaults to NHWC . \n
  535. * @attention Constraints:
  536. * @li Only the Ascend 910 platform is supported.
  537. * @li "x1" and "grads" must have the same shape.
  538. * @li "x2" and "y" must have the same shape. Otherwise, an error is reported.
  539. * @li "x1", "x2", "grads", and "y" must be 5D tensors.
  540. * @li ksize[H] and ksize[W] is in the range [1, 255].
  541. * @li strides[H] and strides[W] is in the range [1, 63].
  542. * @li Other dimensions of ksize and strides is 1 . \n
  543. * @par Outputs:
  544. * y: Has the same type and format as input "x1" . \n
  545. * @par Third-party framework compatibility
  546. * @li Compatible with the TensorFlow operator MaxPoolGradGrad.
  547. */
  548. REG_OP(MaxPoolGradGrad)
  549. .INPUT(x1, TensorType::RealNumberType())
  550. .INPUT(x2, TensorType::RealNumberType())
  551. .INPUT(grad, TensorType::RealNumberType())
  552. .OUTPUT(y, TensorType::RealNumberType())
  553. .REQUIRED_ATTR(ksize, ListInt)
  554. .REQUIRED_ATTR(strides, ListInt)
  555. .REQUIRED_ATTR(padding, String)
  556. .ATTR(data_format, String, "NHWC")
  557. .OP_END_FACTORY_REG(MaxPoolGradGrad)
  558. /**
  559. *@brief Performs max_pool_ext2 on the input . \n
  560. *@par Inputs:
  561. * Three inputs:
  562. *@li x: A Tensor of type float16.
  563. *@li strides: A required type of int32 values,
  564. * specifying the stride of the sliding window for each dimension of the input tensor. No default value.
  565. *@li ksize: A required type of int32 values,
  566. * specifying the size of the window for each dimension of the input tensor. No default value.
  567. *@par Attributes:
  568. *@li padding: A required string. No default value.
  569. *@li data_format: An optional string. \n
  570. *@par Outputs:
  571. *y: A Tensor. Has the same type and format as input "x" . \n
  572. *@attention Constraints:
  573. *@li "ksize" is a list that has length 4: ksize[0] = 1 or ksize[3] = 1, ksize[1] * ksize[2] <= 255.
  574. *@li "stride is a list that has length 4: strides[0] = 1 or strides[3] = 1, strides[1] <= 63, strides[0] >= 1,
  575. * strides[2] <= 63, strides[2] >= 1.
  576. *@li "padding" is either "SAME" or "VALID" . \n
  577. *@par Third-party framework compatibility
  578. * Compatible with the TensorFlow operator MaxPoolV2.
  579. */
  580. REG_OP(MaxPoolV2)
  581. .INPUT(x, TensorType({DT_FLOAT16}))
  582. .INPUT(ksize, TensorType({DT_INT32}))
  583. .INPUT(strides, TensorType({DT_INT32}))
  584. .OUTPUT(y, TensorType({DT_FLOAT16}))
  585. .REQUIRED_ATTR(padding, String)
  586. .ATTR(data_format, String, "NHWC")
  587. .OP_END_FACTORY_REG(MaxPoolV2)
  588. /**
  589. *@brief Performs max pooling on the input and outputs both max values and
  590. * indices . \n
  591. *@par Inputs:
  592. * One input:
  593. * x: An 4D Tensor. Supported type: float, double, int32,
  594. * uint8, int16, int8, int64, uint16, half, uint32, uint64.
  595. * Must set the format, supported format list ["NCHW, NHWC"]. \n
  596. *@par Attributes:
  597. *@li ksize: A required list of int8, int16, int32, or int64 values,
  598. * specifying the size of the window for each dimension of the input tensor.
  599. * No default value.
  600. *@li strides: A required list of int8, int16, int32, or int64 values,
  601. * specifying the stride of the sliding window for each dimension of
  602. * the input tensor. No default value.
  603. *@li padding: A required string. No default value .
  604. *@li Targmax:An optional int with default value 7 . \n
  605. *@par Outputs:
  606. *@li y: A Tensor. Has the same type and format as input "x".
  607. *@li argmax: A Tensor. Has the same type and format as input "x".
  608. *@attention Constraints:
  609. *@li "ksize" is a list that has length 4: ksize[0] = 1 or ksize[3] = 1,
  610. * ksize[1] * ksize[2] <= 255.
  611. *@li "stride is a list that has length 4: strides[0] = 1 or strides[3] = 1,
  612. * strides[1] <= 63, strides[0] >= 1, strides[2] <= 63, strides[2] >= 1.
  613. *@li "padding" is either "SAME" or "VALID" .
  614. *@par Third-party framework compatibility
  615. * Compatible with the TensorFlow operator MaxPoolWithArgmax.
  616. */
  617. REG_OP(MaxPoolWithArgmax)
  618. .INPUT(x, TensorType::RealNumberType())
  619. .OUTPUT(y, TensorType::RealNumberType())
  620. .OUTPUT(argmax, TensorType::IndexNumberType())
  621. .REQUIRED_ATTR(ksize, ListInt)
  622. .REQUIRED_ATTR(strides, ListInt)
  623. .REQUIRED_ATTR(padding, String)
  624. .ATTR(Targmax, Int, 7)
  625. .OP_END_FACTORY_REG(MaxPoolWithArgmax)
  626. /**
  627. *@brief Performs the backpropagation of MaxPoolWithArgmax . \n
  628. *@par Inputs:
  629. * Three inputs, including:
  630. *@li x: An 4d tensor. Supported type: float, double, int32,
  631. * uint8, int16, int8, int64, uint16, half, uint32, uint64.
  632. * Must set the format, supported format list ["NCHW, NHWC"]
  633. *@li grad: An 4d tensor. Supported type: float, double, int32,
  634. * uint8, int16, int8, int64, uint16, half, uint32, uint64.
  635. * Must set the format, supported format list ["NCHW, NHWC"]
  636. *@li argmx: A tensor of type int32 or int64 . \n
  637. *@par Attributes:
  638. *@li ksize: A required list of int8, int16, int32, or int64 values,
  639. * specifying the size of the window for each dimension of the input tensor.
  640. * No default value.
  641. *@li strides: A required list of int8, int16, int32, or int64 values,
  642. * specifying the stride of the sliding window for each dimension of
  643. * the input tensor. No default value.
  644. *@li padding: A required string. No default value . \n
  645. *@par Outputs:
  646. *y: A Tensor. Has the same type and format as input "x" . \n
  647. *@attention Constraints:
  648. *@li "ksize" is a list that has length 4: ksize[0] = 1 or ksize[3] = 1,
  649. * ksize[1] * ksize[2] <= 255.
  650. *@li "strides" is a list that has length 4: strides[0] = 1 or strides[3] = 1
  651. *@li "padding" is either "SAME" or "VALID".
  652. *@see max_pool_with_argmax
  653. *@par Third-party framework compatibility
  654. * Compatible with the TensorFlow operator MaxPoolGradWithArgmax.
  655. */
  656. REG_OP(MaxPoolGradWithArgmax)
  657. .INPUT(x, TensorType::RealNumberType())
  658. .INPUT(grad, TensorType::RealNumberType())
  659. .INPUT(argmax, TensorType::IndexNumberType())
  660. .OUTPUT(y, TensorType::RealNumberType())
  661. .REQUIRED_ATTR(ksize, ListInt)
  662. .REQUIRED_ATTR(strides, ListInt)
  663. .REQUIRED_ATTR(padding, String)
  664. .OP_END_FACTORY_REG(MaxPoolGradWithArgmax)
  665. /**
  666. *@brief Performs transform mask to argmax . \n
  667. *@par Inputs:
  668. * Two inputs:
  669. *@li x: A Tensor of type float16.
  670. *@li mask: A Tensor of type uint16 . \n
  671. *@par Attributes:
  672. *@li ksize: A required list of int8, int16, int32, or int64 values, specifying the size of the window for each dimension of the input tensor. No default value.
  673. *@li strides: A required list of int8, int16, int32, or int64 values, specifying the stride of the sliding window for each dimension of the input tensor. No default value.
  674. *@li padding: A required string. No default value .
  675. *@li originshape:A required list of int8, int16, int32, or int64 values, No default value. \n
  676. *@par Outputs:
  677. *argmax: A Tensor of type int32 . \n
  678. *@attention Constraints:
  679. *@li "ksize" is a list that has length 4: ksize[0] = 1 or ksize[3] = 1, ksize[1] * ksize[2] <= 255.
  680. *@li "stride is a list that has length 4: strides[0] = 1 or strides[3] = 1, strides[1] <= 63, strides[0] >= 1, strides[2] <= 63, strides[2] >= 1.
  681. *@li "padding" is either "SAME" or "VALID" . \n
  682. *@par Third-party framework compatibility
  683. * Compatible with the TensorFlow operator Mask2Argmax.
  684. */
  685. REG_OP(Mask2Argmax)
  686. .INPUT(x, TensorType::RealNumberType())
  687. .INPUT(mask, TensorType::IndexNumberType())
  688. .OUTPUT(argmax, TensorType::IndexNumberType())
  689. .REQUIRED_ATTR(ksize, ListInt)
  690. .REQUIRED_ATTR(strides, ListInt)
  691. .REQUIRED_ATTR(padding, String)
  692. .REQUIRED_ATTR(originshape, ListInt)
  693. .OP_END_FACTORY_REG(Mask2Argmax)
  694. /**
  695. * @brief Computes second-order gradients of the maxpooling function . \n
  696. * @par Inputs:
  697. * @li x: Original forward input tensor. Supported type: float, double, int32,
  698. * uint8, int16, int8, int64, uint16, half, uint32, uint64.
  699. * @li grad: Gradient tensor. Supported type: float, double, int32,
  700. * uint8, int16, int8, int64, uint16, half, uint32, uint64.
  701. * @li argmax: An tensor of type int32 or int64.
  702. * @par Attributes:
  703. * @li ksize: A required list, specifying the size of the sliding window.
  704. * @li strides: A required list, specifying the stride of the sliding window.
  705. * @li padding: A required string, window sliding mode. Either SAME or VALID.
  706. * @par Outputs:
  707. * y:Result tensor. Supported type: float, double, int32,
  708. * uint8, int16, int8, int64, uint16, half, uint32, uint64
  709. * @attention Constraints:
  710. * @li Only the cloud platform is supported.
  711. * @li "x1" and "grads" must have the same shape.
  712. * @li length of the shape of x, grads, argmax, y must be 5.
  713. * @li shape of argmax must be (fmap_n, fmap_c1, kernel_h * kernel_w,
  714. * (shape_max_pool[2] * shape_max_pool[3] + 15) // 16 * 16, 1),
  715. * or (fmap_n, fmap_c1, kernel_h * kernel_w,
  716. * (shape_max_pool[2] * shape_max_pool[3] + 31) // 16, 16), else failed . \n
  717. * @par Third-party framework compatibility
  718. * Compatible with the TensorFlow operator MaxPoolGradGradWithArgmax.
  719. */
  720. REG_OP(MaxPoolGradGradWithArgmax)
  721. .INPUT(x, TensorType::RealNumberType())
  722. .INPUT(grad, TensorType::RealNumberType())
  723. .INPUT(argmax, TensorType::IndexNumberType())
  724. .OUTPUT(y, TensorType::RealNumberType())
  725. .REQUIRED_ATTR(ksize, ListInt)
  726. .REQUIRED_ATTR(strides, ListInt)
  727. .REQUIRED_ATTR(padding, String)
  728. .OP_END_FACTORY_REG(MaxPoolGradGradWithArgmax)
  729. /**
  730. * @brief Computes avgpoograd function . \n
  731. * @par Inputs:
  732. * @li orig_input_shape: An NHWC tensor of type int32.
  733. * @li input_grad: An NHWC tensor of type float16, float32, or double . \n
  734. * @par Attributes:
  735. * @li ksize: A required tuple or list, specifying the size of the window for
  736. * each dimension of the input tensor.
  737. * @li strides: A required tuple or list, specifying the stride of the sliding
  738. * window for each dimension of the input tensor.
  739. * @li padding: A required string, specifying the type of
  740. * the padding algorithm to use.
  741. * @li data_format: An optional string. Defaults to "NHWC" . \n
  742. * @par Outputs:
  743. * @out_grad: A mutable tensor with the same shape and type as "orig_input" . \n
  744. * @par Third-party framework compatibility
  745. * @li Compatible with the TensorFlow operator AvgPoolGrad.
  746. */
  747. REG_OP(AvgPoolGrad)
  748. .INPUT(orig_input_shape, TensorType({DT_INT32}))
  749. .INPUT(input_grad, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))
  750. .OUTPUT(out_grad, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))
  751. .REQUIRED_ATTR(ksize, ListInt)
  752. .REQUIRED_ATTR(strides, ListInt)
  753. .REQUIRED_ATTR(padding, String)
  754. .ATTR(data_format, String, "NHWC")
  755. .OP_END_FACTORY_REG(AvgPoolGrad)
  756. /**
  757. * @brief Computes gradients of average pooling function . \n
  758. * @par Inputs:
  759. * @input_grad: An NHWC tensor of type float16.
  760. * @mean_matrix: Assist matrix, an NHWC tensor of type float16.
  761. * @kernel_matrix: Assist matrix, an NHWC tensor of type float16.
  762. * @par Attributes:
  763. * @li orig_input_shape: A required Original input dimensions.
  764. * @li ksize: A required tuple or list, specifying the size of the window
  765. * for each dimension of the input tensor.
  766. * @li strides: A required tuple or list, specifying the stride of
  767. * the sliding window for each dimension of the input tensor.
  768. * @li padding: A required string, specifying the type of the padding algorithm
  769. * to use.
  770. * @li data_format: An optional string. Defaults to "NHWC" . \n
  771. * @par Outputs:
  772. * @out_grad: A mutable tensor with the same shape and type as "orig_input".
  773. *
  774. * @par Restrictions:
  775. * Warning: THIS FUNCTION IS DEPRECATED. Please use AvgPoolGrad instead.
  776. */
  777. REG_OP(AvgPoolGradD)
  778. .INPUT(input_grad, TensorType({DT_FLOAT16}))
  779. .INPUT(mean_matrix, TensorType({DT_FLOAT16}))
  780. .INPUT(kernel_matrix, TensorType({DT_FLOAT16}))
  781. .OUTPUT(out_grad, TensorType({DT_FLOAT16}))
  782. .REQUIRED_ATTR(orig_input_shape, ListInt)
  783. .REQUIRED_ATTR(ksize, ListInt)
  784. .REQUIRED_ATTR(strides, ListInt)
  785. .REQUIRED_ATTR(padding, String)
  786. .ATTR(data_format, String, "NHWC")
  787. .OP_END_FACTORY_REG(AvgPoolGradD)
  788. /**
  789. * @brief Computes avgpoolv2grad function.
  790. * @par Inputs:
  791. * @li orig_input_shape: An NHWC tensor of type int32.
  792. * @li input_grad: An NHWC tensor of type float16, float32, or double.
  793. * @par Attributes:
  794. * @li ksize: A required tuple or list, specifying the size of the window for
  795. * each dimension of the input tensor.
  796. * @li strides: A required tuple or list, specifying the stride of the sliding
  797. * window for each dimension of the input tensor.
  798. * @li padding_mode: A required string, specifying the type of
  799. * the padding algorithm to use.
  800. * @li global_pooling: Whether to use the global pooling. If global_pooling=true,
  801. * ksize and pads will be ignored. Default False.
  802. * @li ceil_mode: Whether to use the ceil function to calculate output height and
  803. * width. Default False.
  804. * @li exclusive: Whether to exclude padding points. default is true.
  805. * @li data_format: An optional string. Defaults to "NHWC".
  806. * @par Outputs:
  807. * @out_grad: A mutable tensor with the same shape and type as "orig_input".
  808. * @par Third-party framework compatibility
  809. * @li Compatible with the TensorFlow operator AvgPoolGrad.
  810. */
  811. REG_OP(AvgPoolV2Grad)
  812. .INPUT(orig_input_shape, TensorType({DT_INT32}))
  813. .INPUT(input_grad, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))
  814. .OUTPUT(out_grad, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))
  815. .REQUIRED_ATTR(ksize, ListInt)
  816. .REQUIRED_ATTR(strides, ListInt)
  817. .ATTR(padding_mode, String, "CALCULATED")
  818. .ATTR(pads, ListInt, {0,0,0,0})
  819. .ATTR(data_format, String, "NCHW")
  820. .ATTR(global_pooling, Bool, false)
  821. .ATTR(ceil_mode, Bool, false)
  822. .ATTR(exclusive, Bool, true)
  823. .OP_END_FACTORY_REG(AvgPoolV2Grad)
  824. /**
  825. * @brief Computes gradients of averagev2 pooling function.
  826. * @par Inputs:
  827. *input_grad: An NHWC tensor of type float16, float32, or double.
  828. * @par Attributes:
  829. * @li orig_input_shape: A required tuple or list of type int32.
  830. * @li ksize: A required tuple or list, specifying the size of the window for
  831. * each dimension of the input tensor.
  832. * @li strides: A required tuple or list, specifying the stride of the sliding
  833. * window for each dimension of the input tensor.
  834. * @li padding_mode: A required string, specifying the type of
  835. * the padding algorithm to use.
  836. * @li global_pooling: Whether to use the global pooling. If global_pooling=true,
  837. * ksize and pads will be ignored. Default False.
  838. * @li ceil_mode: Whether to use the ceil function to calculate output height and
  839. * width. Default False.
  840. * @li exclusive: Whether to exclude padding points. default is true.
  841. * @li data_format: An optional string. Defaults to "NHWC".
  842. * @par Outputs:
  843. *out_grad: A mutable tensor with the same shape and type as "orig_input".
  844. * @par Third-party framework compatibility
  845. *Compatible with the TensorFlow operator AvgPoolGrad.
  846. */
  847. REG_OP(AvgPoolV2GradD)
  848. .INPUT(input_grad, TensorType({DT_FLOAT16}))
  849. .OPTIONAL_INPUT(mean_matrix, TensorType({DT_FLOAT16}))
  850. .OPTIONAL_INPUT(kernel_matrix, TensorType({DT_FLOAT16}))
  851. .OUTPUT(out_grad, TensorType({DT_FLOAT16}))
  852. .REQUIRED_ATTR(orig_input_shape, ListInt)
  853. .REQUIRED_ATTR(ksize, ListInt)
  854. .REQUIRED_ATTR(strides, ListInt)
  855. .ATTR(padding_mode, String, "CALCULATED")
  856. .ATTR(pads, ListInt, {0,0,0,0})
  857. .ATTR(data_format, String, "NCHW")
  858. .ATTR(global_pooling, Bool, false)
  859. .ATTR(ceil_mode, Bool, false)
  860. .ATTR(exclusive, Bool, true)
  861. .OP_END_FACTORY_REG(AvgPoolV2GradD)
  862. /**
  863. *@brief upsample the layer, similar to the nearest-neighbor difference scaling algorithm.
  864. *@par Inputs:
  865. * one input, including:
  866. * x: A tensor of type float16 or float32.
  867. *@par Attributes:
  868. *@li scale: A optional float32, scale factor of x. Defaults to "1.0".
  869. *@li stride_h: An optional int32, broadcast the axis of h. Defaults to "2".
  870. *@li stride_w: An optional int32, broadcast the axis of w. Defaults to "2".
  871. *@par Outputs:
  872. *y: A tensor of type float16 or float32.
  873. */
  874. REG_OP(Upsample)
  875. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  876. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  877. .ATTR(scale, Float, 1)
  878. .ATTR(stride_h, Int, 2)
  879. .ATTR(stride_w, Int, 2)
  880. .OP_END_FACTORY_REG(Upsample)
  881. /**
  882. *@brief Computes gradient of the FractionalMaxPool function . \n
  883. *@par Inputs:
  884. *Inputs include:
  885. * @li orig_input: A Tensor. Must be one of the following types: float32, float64, int32, int64.
  886. * @li orig_output: A Tensor. Must have the same type as orig_input.
  887. * @li out_backprop: A Tensor. Must have the same type as orig_input.
  888. 4-D with shape [batch, height, width, channels].
  889. * @li row_pooling_sequence: A Tensor of type int64.
  890. * @li col_pooling_sequence: A Tensor of type int64 . \n
  891. *@par Attributes:
  892. *overlapping: An optional bool. Defaults to False . \n
  893. *@par Outputs:
  894. *y: A Tensor. Has the same type as orig_input . \n
  895. *@attention Constraints:
  896. *The implementation for FractionalMaxPoolGrad on Ascend uses AICPU, with bad performance.
  897. *@par Third-party framework compatibility
  898. *@li compatible with tensorflow FractionalMaxPoolGrad operator.
  899. */
  900. REG_OP(FractionalMaxPoolGrad)
  901. .INPUT(orig_input, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64}))
  902. .INPUT(orig_output, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64}))
  903. .INPUT(out_backprop, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64}))
  904. .INPUT(row_pooling_sequence, TensorType({ DT_INT64 }))
  905. .INPUT(col_pooling_sequence, TensorType({ DT_INT64 }))
  906. .OUTPUT(y, TensorType({ DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64 }))
  907. .ATTR(overlapping, Bool, false)
  908. .OP_END_FACTORY_REG(FractionalMaxPoolGrad)
  909. /**
  910. *@brief Performs fractional average pooling on the input . \n
  911. *@par Inputs:
  912. *Inputs include:
  913. *x: A Tensor. Must be one of the following types: float32, float64, int32, int64.
  914. 4-D with shape [batch, height, width, channels] . \n
  915. *@par Attributes:
  916. *@li pooling_ratio: A list of floats that has length >= 4.
  917. *@li pseudo_random: An optional bool. Defaults to False.
  918. *@li overlapping: An optional bool. Defaults to False. When set to True, it means when pooling.
  919. *@li deterministic: An optional bool. Defaults to False.
  920. *@li seed: An optional int. Defaults to 0.
  921. *@li seed2: An optional int. Defaults to 0 . \n
  922. *@par Outputs:
  923. *@li y: A Tensor. Has the same type as x.
  924. *@li row_pooling_sequence: A Tensor of type int64.
  925. *@li col_pooling_sequence: A Tensor of type int64 . \n
  926. *@attention Constraints:
  927. *The implementation for FractionalAvgPool on Ascend uses AICPU, with bad performance.
  928. *@par Third-party framework compatibility
  929. *@li compatible with tensorflow FractionalAvgPool operator.
  930. */
  931. REG_OP(FractionalAvgPool)
  932. .INPUT(x, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64}))
  933. .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64}))
  934. .OUTPUT(row_pooling_sequence, TensorType({DT_INT64}))
  935. .OUTPUT(col_pooling_sequence, TensorType({DT_INT64}))
  936. .ATTR(pooling_ratio, ListFloat, {})
  937. .ATTR(pseudo_random, Bool, false)
  938. .ATTR(overlapping, Bool, false)
  939. .ATTR(deterministic, Bool, false)
  940. .ATTR(seed, Int, 0)
  941. .ATTR(seed2, Int, 0)
  942. .OP_END_FACTORY_REG(FractionalAvgPool)
  943. /**
  944. *@brief Performs fractional max pooling on the input . \n
  945. *@par Inputs:
  946. *Inputs include:
  947. *x: A Tensor. Must be one of the following types: float32, float64, int32, int64.
  948. 4-D with shape [batch, height, width, channels] . \n
  949. *@par Attributes:
  950. *@li pooling_ratio: A list of floats that has length >= 4. Pooling ratio for each dimension of value.
  951. *@li pseudo_random: An optional bool. Defaults to False.
  952. *@li overlapping: An optional bool. Defaults to False.
  953. *@li deterministic: An optional bool. Defaults to False.
  954. *@li seed: An optional int. Defaults to 0.
  955. *@li seed2: An optional int. Defaults to 0 . \n
  956. *@par Outputs:
  957. *@li y: A Tensor. Has the same type as x.
  958. *@li row_pooling_sequence: A Tensor of type int64.
  959. *@li col_pooling_sequence: A Tensor of type int64 . \n
  960. *@attention Constraints:
  961. *The implementation for FractionalMaxPool on Ascend uses AICPU, with bad performance.
  962. *@par Third-party framework compatibility
  963. *@li compatible with tensorflow FractionalMaxPool operator.
  964. */
  965. REG_OP(FractionalMaxPool)
  966. .INPUT(x, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64}))
  967. .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64}))
  968. .OUTPUT(row_pooling_sequence, TensorType({DT_INT64}))
  969. .OUTPUT(col_pooling_sequence, TensorType({DT_INT64}))
  970. .ATTR(pooling_ratio, ListFloat, {})
  971. .ATTR(pseudo_random, Bool, false)
  972. .ATTR(overlapping, Bool, false)
  973. .ATTR(deterministic, Bool, false)
  974. .ATTR(seed, Int, 0)
  975. .ATTR(seed2, Int, 0)
  976. .OP_END_FACTORY_REG(FractionalMaxPool)
  977. /**
  978. *@brief Finds values of the n-th order statistic for the last dimension . \n
  979. *@par Inputs:
  980. *Inputs include:
  981. * @li x: A Tensor. Must be one of the following types: float32, float64, int32, uint8,
  982. int16, int8, int64, bfloat16, uint16, half, uint32, uint64.
  983. * @li n: A Tensor of type int32. 0-D . \n
  984. *@par Attributes:
  985. *reverse: An optional bool. Defaults to False . \n
  986. *@par Outputs:
  987. *y: A Tensor. Has the same type as x . \n
  988. *@attention Constraints:
  989. *The implementation for NthElement on Ascend uses AICPU, with bad performance.
  990. *@par Third-party framework compatibility
  991. *@li compatible with tensorflow NthElement operator.
  992. */
  993. REG_OP(NthElement)
  994. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16,
  995. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  996. .INPUT(n, TensorType({DT_INT32}))
  997. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT8, DT_INT16,
  998. DT_UINT16, DT_UINT8, DT_INT32, DT_INT64, DT_DOUBLE}))
  999. .ATTR(reverse, Bool, false)
  1000. .OP_END_FACTORY_REG(NthElement)
  1001. /**
  1002. *@brief Computes gradient of the FractionalAvgPool function . \n
  1003. *@par Inputs:
  1004. *Inputs include:
  1005. * @li orig_input_tensor_shape: A Tensor of type int64.
  1006. * @li out_backprop: A Tensor. Must be one of the following types: float32, float64,
  1007. int32, int64. 4-D with shape [batch, height, width, channels].
  1008. * @li row_pooling_sequence: A Tensor of type int64.
  1009. * @li col_pooling_sequence: A Tensor of type int64 . \n
  1010. *@par Attributes:
  1011. *overlapping: An optional bool. Defaults to False . \n
  1012. *@par Outputs:
  1013. *y: A Tensor. Has the same type as out_backprop . \n
  1014. *@attention Constraints:
  1015. *The implementation for FractionalAvgPoolGrad on Ascend uses AICPU, with bad performance.
  1016. *@par Third-party framework compatibility
  1017. *@li compatible with tensorflow FractionalAvgPoolGrad operator.
  1018. */
  1019. REG_OP(FractionalAvgPoolGrad)
  1020. .INPUT(orig_input_tensor_shape, TensorType({DT_INT64}))
  1021. .INPUT(out_backprop, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64}))
  1022. .INPUT(row_pooling_sequence, TensorType({DT_INT64}))
  1023. .INPUT(col_pooling_sequence, TensorType({DT_INT64}))
  1024. .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64}))
  1025. .ATTR(overlapping, Bool, false)
  1026. .OP_END_FACTORY_REG(FractionalAvgPoolGrad)
  1027. /**
  1028. *@brief Returns the permuted vector/tensor in the destination data format given the . \n
  1029. *@par Inputs:
  1030. *Inputs include:
  1031. *x: A Tensor. Must be one of the following types: int32, int64. Vector of size 4
  1032. or Tensor of shape (4, 2) in source data format . \n
  1033. *@par Attributes:
  1034. *@li src_format: An optional string. Defaults to "NHWC". source data format.
  1035. *@li dst_format: An optional string. Defaults to "NCHW". destination data format . \n
  1036. *@par Outputs:
  1037. *y: A Tensor. Has the same type as x . \n
  1038. *@attention Constraints:
  1039. *The implementation for DataFormatVecPermute on Ascend uses AICPU, with bad performance.
  1040. *@par Third-party framework compatibility
  1041. *@li compatible with tensorflow DataFormatVecPermute operator.
  1042. */
  1043. REG_OP(DataFormatVecPermute)
  1044. .INPUT(x, TensorType({ DT_INT32, DT_INT64 }))
  1045. .OUTPUT(y, TensorType({ DT_INT32, DT_INT64 }))
  1046. .ATTR(src_format, String, "NHWC")
  1047. .ATTR(dst_format, String, "NCHW")
  1048. .OP_END_FACTORY_REG(DataFormatVecPermute)
  1049. /**
  1050. * @brief Computes gradients of the MaxPool3D function . \n
  1051. * @par Inputs:
  1052. * @li orig_x: A mutable NDC1HWC0 tensor of type float16.
  1053. * @li orig_y: A mutable NDC1HWC0 tensor of type float16.
  1054. * @li grads: A mutable NDC1HWC0 tensor of type float16 . \n
  1055. * @par Attributes:
  1056. * @li ksize: A required tuple or list, specifying the size of the window for
  1057. * each dimension of the input tensor.
  1058. * @li strides: A required tuple or list, specifying the stride of the sliding
  1059. * window for each dimension of the input tensor.
  1060. * @li pads: A list of 6 ints. Supports only padding along the D,
  1061. * H and W dimensions in sequence of head, tail, top, bottom, left and right.
  1062. * to use.
  1063. * @li data_format: An optional string, Specify the data format of the input and
  1064. * output data. With the default format "NDHWC" . \n
  1065. * @par Outputs:
  1066. * y: A mutable tensor. Has the same shape as "orig_x", but type is float32 . \n
  1067. * @par Third-party framework compatibility
  1068. * Compatible with the TensorFlow operator MaxPool3DGrad.
  1069. */
  1070. REG_OP(MaxPool3DGrad)
  1071. .INPUT(orig_x, TensorType::RealNumberType())
  1072. .INPUT(orig_y, TensorType::RealNumberType())
  1073. .INPUT(grads, TensorType::RealNumberType())
  1074. .OUTPUT(y, TensorType::RealNumberType())
  1075. .REQUIRED_ATTR(ksize, ListInt)
  1076. .REQUIRED_ATTR(strides, ListInt)
  1077. .ATTR(padding, String, "SAME")
  1078. .REQUIRED_ATTR(pads, ListInt)
  1079. .ATTR(data_format, String, "NDHWC")
  1080. .OP_END_FACTORY_REG(MaxPool3DGrad)
  1081. /**
  1082. *@brief Performs AvgPool1D on the input . \n
  1083. *@par Inputs:
  1084. *x: A Tensor. Must be one of the following types: int8, uint8, int16, int32, int64, float16, float32, float64 . \n
  1085. *@par Attributes:
  1086. *@li ksize: An required int, specifying the size of the window.
  1087. *@li strides: An required int.
  1088. *@li pads: A required tuple or list.
  1089. *@li ceil_mode: An optional bool. Defaults to False.
  1090. *@li count_include_pad: An optional bool. Defaults to False . \n
  1091. *@par Outputs:
  1092. *y: A Tensor. Has the same type as x . \n
  1093. *@par Third-party framework compatibility
  1094. *@li compatible with pytorch AvgPool1D operator.
  1095. */
  1096. REG_OP(AvgPool1D)
  1097. .INPUT(x, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  1098. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  1099. .REQUIRED_ATTR(ksize, Int)
  1100. .REQUIRED_ATTR(strides, Int)
  1101. .REQUIRED_ATTR(pads, ListInt)
  1102. .ATTR(ceil_mode, Bool, false)
  1103. .ATTR(count_include_pad, Bool, false)
  1104. .OP_END_FACTORY_REG(AvgPool1D)
  1105. /**
  1106. *@brief Performs AvgPool1D on the input . \n
  1107. *@par Inputs:
  1108. *x: A Tensor. Must be one of the following types: int8, uint8, int16, int32, int64, float16, float32, float64 . \n
  1109. *@par Attributes:
  1110. *@li ksize: An required int, specifying the size of the window.
  1111. *@li strides: An required int.
  1112. *@li pads: A required tuple or list.
  1113. *@li ceil_mode: An optional bool. Defaults to False.
  1114. *@li count_include_pad: An optional bool. Defaults to False . \n
  1115. *@par Outputs:
  1116. *y: A Tensor. Has the same type as x . \n
  1117. *@par Third-party framework compatibility
  1118. *@li compatible with pytorch AvgPool1D operator.
  1119. *
  1120. *@par Restrictions:
  1121. *Warning: THIS FUNCTION IS DEPRECATED. Please use AvgPool1D instead.
  1122. */
  1123. REG_OP(AvgPool1DD)
  1124. .INPUT(x, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  1125. .INPUT(assist_matrix, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  1126. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  1127. .REQUIRED_ATTR(ksize, Int)
  1128. .REQUIRED_ATTR(strides, Int)
  1129. .REQUIRED_ATTR(pads, ListInt)
  1130. .ATTR(ceil_mode, Bool, false)
  1131. .ATTR(count_include_pad, Bool, false)
  1132. .OP_END_FACTORY_REG(AvgPool1DD)
  1133. /**
  1134. * @brief Performs max pooling on the input and outputs both max values and indices . \n
  1135. * @par Inputs:
  1136. * One input:
  1137. * x: An 5hd Tensor of type float16.
  1138. * Must set the format, supported format list ["NC1HWC0"].
  1139. * @par Attributes:
  1140. * @li ksize: A required list of int8, int16, int32, or int64 values,
  1141. * specifying the size of the window for each dimension of the input tensor. No default value.
  1142. * @li strides: A required list of int8, int16, int32, or int64 values,
  1143. * specifying the stride of the sliding window for each dimension of the input tensor. No default value.
  1144. * @li pads: A required list of int8, int16, int32, or int64 values,
  1145. * specifying the pad of the input feature map. No default value. \n
  1146. * @li dtype: A optional int. default value is 3.
  1147. * @li dilation: A optional list of int8, int16, int32, or int64 values.
  1148. * @li ceil_mode: A optional bool. default value is false . \n
  1149. * @par Outputs:
  1150. * y: A Tensor. Has the same type and format as input "x".
  1151. * argmax: A Tensor. type:uint16.
  1152. * @attention Constraints:
  1153. * @li ksize: a list that has length 4:
  1154. * ksize[0] = 1, ksize[1] = 1, ksize[2] * ksize[3] <= (ub_size-8)*1024//6//2//16.
  1155. * @li strides: a list that has length 4:
  1156. * strides[0] = 1, strides[1] = 1, 1 <= strides[2] <= 2048, 1 <= strides[3] <= 2048.
  1157. * @li pads: a list that has length 4:
  1158. * pads[0] = 1, pads[1] = 1, 1 <= pads[2] <= (ksize[2]//2), 1 <= pads[3] <= (ksize[3]//2).
  1159. * @li dilation: a list that has length 4.
  1160. * @li ceil_mode: is a bool, default is false . \n
  1161. * @par Third-party framework compatibility
  1162. * Compatible with the PyTorch operator max_pool2d_with_indices.
  1163. */
  1164. REG_OP(MaxPoolWithArgmaxV2)
  1165. .INPUT(x, TensorType({DT_FLOAT16}))
  1166. .OUTPUT(y, TensorType({DT_FLOAT16}))
  1167. .OUTPUT(argmax, TensorType({DT_UINT16}))
  1168. .REQUIRED_ATTR(ksize, ListInt)
  1169. .REQUIRED_ATTR(strides, ListInt)
  1170. .REQUIRED_ATTR(pads, ListInt)
  1171. .ATTR(dtype, Int, 3)
  1172. .ATTR(dilation, ListInt, {1, 1, 1, 1})
  1173. .ATTR(ceil_mode, Bool, false)
  1174. .OP_END_FACTORY_REG(MaxPoolWithArgmaxV2)
  1175. /**
  1176. * @brief Performs the backpropagation of MaxPoolWithArgmaxV2. \n
  1177. * @par Inputs:
  1178. * Three inputs, including:
  1179. * @li x: An 5hd tensor of type float16.
  1180. * Must set the format, supported format list ["NC1HWC0"]
  1181. * @li grad: An 5hd tensor of type float16.
  1182. * Must set the format, supported format list ["NC1HWC0"]
  1183. * @li argmax: An 5hd tensor of type uint16 or int64.
  1184. * Must set the format, supported format list ["NC1HWC0"] \n
  1185. * @par Attributes:
  1186. * @li ksize: A required list of int8, int16, int32, or int64 values,
  1187. * specifying the size of the window for each dimension of the input tensor. No default value.
  1188. * @li strides: A required list of int8, int16, int32, or int64 values,
  1189. * specifying the stride of the sliding window for each dimension of the input tensor. No default value.
  1190. * @li pads: A required list of int8, int16, int32, or int64 values,
  1191. * specifying the pad of the input feature map. No default value. \n
  1192. * @li dtype: A optional int. default value is 3.
  1193. * @li dilation: A optional list of int8, int16, int32, or int64 values.
  1194. * @li ceil_mode: A optional bool. default value is false. \n
  1195. * @par Outputs:
  1196. * y: A Tensor. Has the same type and format as input "x". \n
  1197. * @attention Constraints:
  1198. * @li ksize: a list that has length 4:
  1199. * ksize[0] = 1, ksize[1] = 1, ksize[2] * ksize[3] <= (ub_size-8)*1024//7//2//16.
  1200. * @li strides: a list that has length 4:
  1201. * strides[0] = 1, strides[1] = 1, 1 <= strides[2] <= 2048, 1 <= strides[3] <= 2048.
  1202. * @li pads: a list that has length 4:
  1203. * pads[0] = 1, pads[1] = 1, 1 <= pads[2] <= (ksize[2]//2), 1 <= pads[3] <= (ksize[3]//2).
  1204. * @li dilation: a list that has length 4.
  1205. * @li ceil_mode: is a bool, default is false. \n
  1206. * @see max_pool_grad_with_argmaxv2
  1207. * @par Third-party framework compatibility
  1208. * Compatible with the PyTorch backward operator of max_pool2d_with_indices.
  1209. */
  1210. REG_OP(MaxPoolGradWithArgmaxV2)
  1211. .INPUT(x, TensorType({DT_FLOAT16}))
  1212. .INPUT(grad, TensorType({DT_FLOAT16}))
  1213. .INPUT(argmax, TensorType({DT_UINT16}))
  1214. .OUTPUT(y, TensorType({DT_FLOAT16}))
  1215. .REQUIRED_ATTR(ksize, ListInt)
  1216. .REQUIRED_ATTR(strides, ListInt)
  1217. .REQUIRED_ATTR(pads, ListInt)
  1218. .ATTR(dtype, Int, 3)
  1219. .ATTR(dilation, ListInt, {1,1,1,1})
  1220. .ATTR(ceil_mode, Bool, false)
  1221. .OP_END_FACTORY_REG(MaxPoolGradWithArgmaxV2)
  1222. /**
  1223. * @brief Performs max pooling on the input . \n
  1224. * @par Inputs:
  1225. * One input:
  1226. * x: A Tensor. Supported type:float16, float32, double, int32, int64,
  1227. * uint8, int16, int8, uint16, qint8
  1228. * @par Attributes:
  1229. * @li ksize: A required list of int8, int16, int32, or int64 values,
  1230. * specifying the size of the window for each dimension of the input tensor.
  1231. * No default value.
  1232. * @li strides: A required list of int8, int16, int32, or int64 values,
  1233. * specifying the stride of the sliding window for each dimension of
  1234. * the input tensor. No default value.
  1235. * @li padding_mode: A required string. Defaults to "CALCULATED".
  1236. * @li pads:A required list of int8, int16, int32, or int64 values,
  1237. * a data to calculate when padding_mode is "CALCULATED".
  1238. * @li data_format: An optional string. Defaults to "NHWC" .
  1239. * @li global_pooling bool, Whether to use the global pooling.
  1240. * If global_pooling = true, kernel size and paddings will be ignored.
  1241. * Default False
  1242. * @li ceil_mode: Whether to use the ceil function to calculate output
  1243. * height and width. False is the default. If it is set to False,
  1244. * the floor function will be used. Default False \n
  1245. * @par Outputs:
  1246. * y: A Tensor. Has the same type and format as input "x" . \n
  1247. * @attention Constraints:
  1248. * @li "ksize" is a list that has length 4: ksize[0] = 1 or ksize[3] = 1,
  1249. * ksize[1] * ksize[2] <= 255.
  1250. * @li "stride is a list that has length 4: strides[0] = 1 or strides[3] = 1,
  1251. * strides[1] <= 63, strides[0] >= 1, strides[2] <= 63, strides[2] >= 1.
  1252. * @li "padding" is "SAME" "VALID" or "CALCULATE" .
  1253. * @par Third-party framework compatibility
  1254. * Compatible with the TensorFlow operator MaxPool.
  1255. */
  1256. REG_OP(MaxPoolV3)
  1257. .INPUT(x,TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16, DT_QINT8}))
  1258. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT32, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16, DT_QINT8}))
  1259. .REQUIRED_ATTR(ksize, ListInt)
  1260. .REQUIRED_ATTR(strides, ListInt)
  1261. .ATTR(padding_mode, String, "CALCULATED")
  1262. .ATTR(pads, ListInt, {0,0,0,0})
  1263. .ATTR(data_format, String, "NCHW")
  1264. .ATTR(global_pooling,Bool,false)
  1265. .ATTR(ceil_mode, Bool, false)
  1266. .OP_END_FACTORY_REG(MaxPoolV3)
  1267. /**
  1268. * @brief Computes gradients of the maxpooling function . \n
  1269. * @par Inputs:
  1270. * @li orig_input: A mutable tensor of type RealNumberType.
  1271. * @li orig_output: A mutable tensor of type RealNumberTypex.
  1272. * @li grad: A mutable tensor of type RealNumberType . \n
  1273. * @par Attributes:
  1274. * @li ksize: A required list of int8, int16, int32, or int64 values,
  1275. * specifying the size of the window for each dimension of the input tensor.
  1276. * No default value.
  1277. * @li strides: A required list of int8, int16, int32, or int64 values,
  1278. * specifying the stride of the sliding window for each dimension of
  1279. * the input tensor. No default value.
  1280. * @li padding_mode: A required string. Defaults to "CALCULATED".
  1281. * @li pads:A required list of int8, int16, int32, or int64 values,
  1282. * a data to caculate when padding_mode is "CALCULATED".
  1283. * @li data_format: An optional string. Defaults to "NHWC" .
  1284. * @li global_pooling bool, Whether to use the global pooling.
  1285. * If global_pooling = true, kernel size and paddings will be ignored.
  1286. * Default False
  1287. * @li ceil_mode: Whether to use the ceil function to calculate output
  1288. * height and width. False is the default. If it is set to False,
  1289. * the floor function will be used. Default False \n
  1290. * @par Outputs:
  1291. * out_grad: A mutable tensor. Has the same shape and type as "x1" . \n
  1292. * @attention Constraints:
  1293. * @li Computing gradients of global pooling is not supported, which means
  1294. * "ksize < x1".
  1295. * @li "ksize" is in the range [1, 255]. "strides" is in the range [1, 63]
  1296. * @par Third-party framework compatibility
  1297. * Compatible with the TensorFlow operator MaxPoolGrad.
  1298. */
  1299. REG_OP(MaxPoolV3Grad)
  1300. .INPUT(orig_input, TensorType::RealNumberType())
  1301. .INPUT(orig_output, TensorType::RealNumberType())
  1302. .INPUT(grad, TensorType::RealNumberType())
  1303. .OUTPUT(out_grad, TensorType::RealNumberType())
  1304. .REQUIRED_ATTR(ksize, ListInt)
  1305. .REQUIRED_ATTR(strides, ListInt)
  1306. .ATTR(padding_mode, String, "CALCULATED")
  1307. .ATTR(pads, ListInt, {0, 0, 0, 0})
  1308. .ATTR(data_format, String, "NCHW")
  1309. .ATTR(global_pooling, Bool, false)
  1310. .ATTR(ceil_mode, Bool, false)
  1311. .OP_END_FACTORY_REG(MaxPoolV3Grad)
  1312. /**
  1313. *@brief Performs Dilation2D on the input . \n
  1314. *@par Inputs:
  1315. *@li x: A tensor of shape is 4d, format is support NHWC.
  1316. *@li filter: A tensor of shape is 3d, the type is same with x, and the c dimension is same with x. \n
  1317. *@par Attributes:
  1318. *@li strides: A required list of 4 ints, specifying the stride of the sliding window. The strides of the N and C dimensions are 1.
  1319. *@li rates: A required list of 4 ints. The rates of the N and C dimensions are 1.
  1320. *@li padding_mode: A optional string. Defaults to "SAME", it support SAME and VALID.
  1321. *@li pads: An optional list of 4 ints.
  1322. *@li ceil_mode: An optional bool. Defaults to "false". Use ceil or floor to calculate the output size when padding_mode is "CALCULATED".
  1323. *@li data_format: An optional string, specifying the data format of "rates" and "strides", either "NCHW" or "NHWC" (default). \n
  1324. *@par Outputs:
  1325. *y: The output tensor. Has the same type and format as input "x" . \n
  1326. *@par Third-party framework compatibility
  1327. * Compatible with the TensorFlow operator Dilation2D.
  1328. */
  1329. REG_OP(Dilation2D)
  1330. .INPUT(x,TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16}))
  1331. .INPUT(filter,TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16}))
  1332. .OUTPUT(y,TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16}))
  1333. .REQUIRED_ATTR(strides, ListInt)
  1334. .REQUIRED_ATTR(rates, ListInt)
  1335. .ATTR(padding_mode, String, "SAME")
  1336. .ATTR(pads, ListInt, {0,0,0,0})
  1337. .ATTR(ceil_mode, Bool, false)
  1338. .ATTR(data_format, String, "NHWC")
  1339. .OP_END_FACTORY_REG(Dilation2D)
  1340. /**
  1341. *@brief Performs Dilation2DBackpropFilter on the input. \n
  1342. *@par Inputs:
  1343. *@li x: A tensor of shape is 4d, format is support NHWC.
  1344. *@li filter: A tensor of shape is 3d, the type is same with x, and the c dimension is same with x.
  1345. *@li out_backprop: Has the same type and format as input x and the c dimension is same with x. \n
  1346. *@par Attributes
  1347. *@li strides: A required list of 4 ints, specifying the stride of the sliding window. The strides of the N and C dimension are 1.
  1348. *@li rates: A required list of 4 ints, the rates of the N and C dimensions are 1.
  1349. *@li padding_mode: A optional string. Defaults to "SAME", it support SAME and VALID.
  1350. *@li pads: A optional list of 4 ints.
  1351. *@li ceil_mode: An optional bool. Defaults to "false". Use ceil or floor to calculate the output size when padding_mode is "CALCULATED".
  1352. *@li data_format: An optional string, specifying the data format of "rates" and "strides", either "NCHW" or "NHWC" (default). \n
  1353. *@par Outputs:
  1354. *y: The output tensor. Has the same type and format as input "filter" . \n
  1355. *@par Third-party framework compatibility
  1356. * Compatible with the TensorFlow operator Dilation2DBackpropFilter.
  1357. */
  1358. REG_OP(Dilation2DBackpropFilter)
  1359. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16}))
  1360. .INPUT(filter,
  1361. TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16}))
  1362. .INPUT(out_backprop,
  1363. TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16}))
  1364. .OUTPUT(y,
  1365. TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16}))
  1366. .REQUIRED_ATTR(strides, ListInt)
  1367. .REQUIRED_ATTR(rates, ListInt)
  1368. .ATTR(padding_mode, String, "SAME")
  1369. .ATTR(pads, ListInt, {0, 0, 0, 0})
  1370. .ATTR(ceil_mode, Bool, false)
  1371. .ATTR(data_format, String, "NHWC")
  1372. .OP_END_FACTORY_REG(Dilation2DBackpropFilter)
  1373. /**
  1374. *@brief Performs Dilation2DBackpropInput on the input. \n
  1375. *@par Inputs:
  1376. *@li x: A tensor of shape is 4d, format is support NHWC.
  1377. *@li filter: A tensor of shape is 3d, the type is same with x, and the c dimension is same with x.
  1378. *@li out_backprop: Has the same type and format as input x and the c dimension is same with x. \n
  1379. *@par Attributes
  1380. *@li strides: A required list of 4 ints, specifying the stride of the sliding window. The strides of the N and C dimension are 1.
  1381. *@li rates: A required list of 4 ints, the rates of the N and C dimensions are 1.
  1382. *@li padding_mode: A optional string. Defaults to "SAME", it support SAME and VALID.
  1383. *@li pads: A optional list of 4 ints.
  1384. *@li ceil_mode: An optional bool. Defaults to "false". Use ceil or floor to calculate the output size when padding_mode is "CALCULATED".
  1385. *@li data_format: An optional string, specifying the data format of "rates" and "strides", either "NCHW" or "NHWC" (default). \n
  1386. *@par Outputs:
  1387. *y: The output tensor. Has the same type and format as input "x" . \n
  1388. *@par Third-party framework compatibility
  1389. * Compatible with the TensorFlow operator Dilation2DBackpropInput.
  1390. */
  1391. REG_OP(Dilation2DBackpropInput)
  1392. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16}))
  1393. .INPUT(filter,
  1394. TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16}))
  1395. .INPUT(out_backprop,
  1396. TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16}))
  1397. .OUTPUT(y,
  1398. TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_INT32, DT_INT64, DT_UINT8, DT_INT16, DT_INT8, DT_UINT16}))
  1399. .REQUIRED_ATTR(strides, ListInt)
  1400. .REQUIRED_ATTR(rates, ListInt)
  1401. .ATTR(padding_mode, String, "SAME")
  1402. .ATTR(pads, ListInt, {0, 0, 0, 0})
  1403. .ATTR(ceil_mode, Bool, false)
  1404. .ATTR(data_format, String, "NHWC")
  1405. .OP_END_FACTORY_REG(Dilation2DBackpropInput)
  1406. /**
  1407. * @brief Applies a 2D adaptive average pooling over
  1408. * an input signal composed of several input planes. \n
  1409. * @par Inputs:
  1410. * One input, including:
  1411. * @li x: A Tensor. Must be one of the following data types:
  1412. * float16, float32. \n
  1413. * @par Attributes:
  1414. * @li output_size: A required list of 2 ints
  1415. * specifying the size (H,W) of the output tensor. \n
  1416. * @par Outputs:
  1417. * @li y: A Tensor. Has the same data type as "x" \n
  1418. * @par Third-party framework compatibility
  1419. * Compatible with the Pytorch operator AdaptiveAvgPool2d.
  1420. */
  1421. REG_OP(AdaptiveAvgPool2d)
  1422. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16}))
  1423. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16}))
  1424. .REQUIRED_ATTR(output_size, ListInt)
  1425. .OP_END_FACTORY_REG(AdaptiveAvgPool2d)
  1426. /**
  1427. * @brief Compute gradients of adaptive averagev2 pooling function.
  1428. * @par Inputs:
  1429. * @li input_grad: A Tensor. Must be one of the following data types:
  1430. * float16, float32.
  1431. * @par Attributes:
  1432. * @li orig_input_shape: A required tuple or list of type int32.
  1433. * @par Outputs:
  1434. * @li output_grad: A tensor with the same type as "input_grad".
  1435. * @par Third-party framework compatibility
  1436. * Compatible with the Pytorch operator AdaptiveAvgPool2dGrad.
  1437. */
  1438. REG_OP(AdaptiveAvgPool2dGrad)
  1439. .INPUT(input_grad, TensorType({DT_FLOAT, DT_FLOAT16}))
  1440. .OUTPUT(output_grad, TensorType({DT_FLOAT, DT_FLOAT16}))
  1441. .REQUIRED_ATTR(orig_input_shape, ListInt)
  1442. .OP_END_FACTORY_REG(AdaptiveAvgPool2dGrad)
  1443. /**
  1444. * @brief Performs the backpropagation of MaxPoolWithGradArgmaxV1.
  1445. * @par Inputs:
  1446. * Three inputs, including:
  1447. * @li x: A tensor of type float16.
  1448. * @li grad: A tensor of type float16.
  1449. * @li argmax: A tensor of type uint16 or int64. \n
  1450. * @par Attributes:
  1451. * @li ksize: A required list of int8, int16, int32, or int64 values,
  1452. * specifying the size of the window for each dimension of the input tensor. No default value.
  1453. * @li strides: A required list of int8, int16, int32, or int64 values,
  1454. * specifying the stride of the sliding window for each dimension of the input tensor. No default value.
  1455. * @li pads: A required list of int8, int16, int32, or int64 values,
  1456. * specifying the pad of the input feature map. No default value. \n
  1457. * @par Outputs:
  1458. * y: A Tensor. Has the same type and format as input "x". \n
  1459. * @attention Constraints:
  1460. * @li The MaxPoolGradWithArgmaxV2 operator has the same function, and it is recommended to use the V2 operator.
  1461. * @li ksize: a list that has length 4:
  1462. * ksize[0] = 1, ksize[3] = 1, ksize[1] * ksize[2] <= (ub_size-8)*1024//7//2//16.
  1463. * @li strides: a list that has length 4:
  1464. * strides[0] = 1, strides[3] = 1, 1 <= strides[1] <= 2048, 1 <= strides[2] <= 2048.
  1465. * @li pads: a list that has length 4:
  1466. * pads[0] = 1, pads[3] = 1, 1 <= pads[2] <= (ksize[1]//2), 1 <= pads[2] <= (ksize[3]//2).
  1467. * @li ceil_mode: defaults to False.\n
  1468. * @par Third-party framework compatibility
  1469. * Compatible with the Pytorch backward operator of max_pool2d_with_indices.
  1470. */
  1471. REG_OP(MaxPoolGradWithArgmaxV1)
  1472. .INPUT(x, TensorType({DT_FLOAT16}))
  1473. .INPUT(grad, TensorType({DT_FLOAT16}))
  1474. .INPUT(argmax, TensorType({DT_UINT16}))
  1475. .OUTPUT(y, TensorType({DT_FLOAT16}))
  1476. .REQUIRED_ATTR(ksize, ListInt)
  1477. .REQUIRED_ATTR(strides, ListInt)
  1478. .REQUIRED_ATTR(pads, ListInt)
  1479. .ATTR(dtype, Int, 3)
  1480. .ATTR(dilation, ListInt, {1, 1, 1, 1})
  1481. .ATTR(ceil_mode, Bool, false)
  1482. .OP_END_FACTORY_REG(MaxPoolGradWithArgmaxV1)
  1483. /**
  1484. * @brief Performs max pooling on the input and outputs both max values and indices.
  1485. * @par Inputs:
  1486. * One input:
  1487. * x: A Tensor of type float16. \n
  1488. * @par Attributes:
  1489. * @li ksize: A required list of int8, int16, int32, or int64 values,
  1490. * specifying the size of the window for each dimension of the input tensor. No default value.
  1491. * @li strides: A required list of int8, int16, int32, or int64 values,
  1492. * specifying the stride of the sliding window for each dimension of the input tensor. No default value.
  1493. * @li pads: A required list of int8, int16, int32, or int64 values,
  1494. * specifying the pad of the input feature map. No default value. \n
  1495. * @par Outputs:
  1496. * y: A Tensor. Has the same type and format as input "x".
  1497. * argmax: A Tensor. type:uint16. \n
  1498. * @attention Constraints:
  1499. * @li The MaxPoolWithArgmaxV2 operator has the same function, and it is recommended to use the V2 operator.
  1500. * @li ksize: a list that has length 4:
  1501. * ksize[0] = 1, ksize[3] = 1, ksize[1] * ksize[2] <= (ub_size-8)*1024//6//2//16.
  1502. * @li strides: a list that has length 4:
  1503. * strides[0] = 1, strides[3] = 1, 1 <= strides[1] <= 2048, 1 <= strides[2] <= 2048.
  1504. * @li pads: a list that has length 4:
  1505. * pads[0] = 1, pads[3] = 1, 1 <= pads[1] <= (ksize[1]//2), 1 <= pads[2] <= (ksize[2]//2).
  1506. * @li ceil_mode: defaults to False.
  1507. * @par Third-party framework compatibility
  1508. * Compatible with the PyTorch operator max_pool2d_with_indices.
  1509. */
  1510. REG_OP(MaxPoolWithArgmaxV1)
  1511. .INPUT(x, TensorType({DT_FLOAT16}))
  1512. .OUTPUT(y, TensorType({DT_FLOAT16}))
  1513. .OUTPUT(argmax, TensorType({DT_UINT16}))
  1514. .REQUIRED_ATTR(ksize, ListInt)
  1515. .REQUIRED_ATTR(strides, ListInt)
  1516. .REQUIRED_ATTR(pads, ListInt)
  1517. .ATTR(dtype, Int, 3)
  1518. .ATTR(dilation, ListInt, {1, 1, 1, 1})
  1519. .ATTR(ceil_mode, Bool, false)
  1520. .OP_END_FACTORY_REG(MaxPoolWithArgmaxV1)
  1521. /**
  1522. *@brief Randomly sample a subset of positive and negative examples,and overwrite
  1523. the label vector to the ignore value (-1) for all elements that are not
  1524. included in the sample.\n
  1525. * @par Inputs:
  1526. * One input:
  1527. * labels: shape of labels,(N, ) label vector with values. \n
  1528. * @par Attributes:
  1529. * @li batch_size_per_images: A require attribute of type int.
  1530. * @li positive_fraction: A require attribute of type float.
  1531. *@par Outputs:
  1532. *y: The result of subSample. \n
  1533. *@par Third-party framework compatibility
  1534. *Compatible with the Pytorch operator SubSample.
  1535. *@attention Constraints:
  1536. *Warning: This operator can be integrated only by MaskRcnn. Please do not use it directly.
  1537. */
  1538. REG_OP(SubSample)
  1539. .INPUT(labels, TensorType({DT_INT32}))
  1540. .OUTPUT(y, TensorType({DT_INT32}))
  1541. .REQUIRED_ATTR(batch_size_per_images, Int)
  1542. .REQUIRED_ATTR(positive_fraction, Float)
  1543. .OP_END_FACTORY_REG(SubSample)
  1544. /**
  1545. *@brief Randomly sample a subset of positive and negative examples,and overwrite
  1546. the label vector to the ignore value (-1) for all elements that are not
  1547. included in the sample.\n
  1548. * @par Inputs:
  1549. * two inputs, including:
  1550. * @li labels: shape of labels,(N, ) label vector with values:.
  1551. * @li shuffle_matrix: random matrix with shape (N, ). \n
  1552. * @par Attributes:
  1553. * @li batch_size_per_images: A require attribute of type int.
  1554. * @li positive_fraction: A require attribute of type float.
  1555. *@par Outputs:
  1556. *y: The result of subSample. \n
  1557. *@par Third-party framework compatibility
  1558. *Compatible with the Pytorch operator SubSampleLabels.
  1559. *@attention Constraints:
  1560. *Warning: This operator can be integrated only by MaskRcnn. Please do not use it directly.
  1561. */
  1562. REG_OP(SubSampleLabels)
  1563. .INPUT(labels, TensorType({DT_INT32}))
  1564. .INPUT(shuffle_matrix, TensorType({DT_INT32}))
  1565. .OUTPUT(y, TensorType({DT_INT32}))
  1566. .REQUIRED_ATTR(batch_size_per_images, Int)
  1567. .REQUIRED_ATTR(positive_fraction, Float)
  1568. .OP_END_FACTORY_REG(SubSampleLabels)
  1569. /**
  1570. *@brief Computes GlobalLpPool, GlobalLpPool consumes an input tensor X and applies lp pool pooling across the
  1571. values in the same channel. \n
  1572. *@par Inputs:
  1573. * x: A Tensor of type float16 or float32 . \n
  1574. *@par Attributes:
  1575. *@li p: Optional. Must be one of the following types: float32. Defaults to 2.0. \n
  1576. *@par Outputs:
  1577. * y: A Tensor. Has the same type as "x", when shape of x is [N,C,H,W], shape of y is [N,C,1,1].
  1578. *@par Third-party framework compatibility
  1579. * Compatible with the onnx operator GlobalLpPool.
  1580. *@par Restrictions:
  1581. *Warning: THIS FUNCTION IS DEPRECATED.
  1582. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  1583. */
  1584. REG_OP(GlobalLpPool)
  1585. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  1586. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  1587. .ATTR(p, Float, 2.0)
  1588. .OP_END_FACTORY_REG(GlobalLpPool)
  1589. /**
  1590. *@brief GlobalAveragePool consumes an input tensor X and applies average pooling across the values in the same channel.
  1591. This is equivalent to AveragePool with kernel size equal to the spatial dimension of input tensor \n
  1592. *@par Inputs:
  1593. *@li x: Input data tensor from the previous operator; dimensions for image case are (N x C x H x W),
  1594. where N is the batch size, C is the number of channels, and H and W are the height and the width of the data.
  1595. For non image case, the dimensions are in the form of (N x C x D1 x D2 ... Dn), where N is the batch size.
  1596. *@par Outputs:
  1597. *y: Output data tensor from pooling across the input tensor. The output tensor has the same rank as the input.
  1598. The first two dimensions of output shape are the same as the input (N x C), while the other dimensions are all 1
  1599. *@par Restrictions:
  1600. *Warning: This operator can be integrated only by configuring INSERT_OP_FILE of aclgrphBuildModel. Please do not use it directly.
  1601. */
  1602. REG_OP(GlobalAveragePool)
  1603. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  1604. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  1605. .OP_END_FACTORY_REG(GlobalAveragePool);
  1606. } // namespace ge
  1607. #endif // OPS_BUILT_IN_OP_PROTO_INC_NN_POOLING_OPS_H

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示