You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

image_ops.h 58 kB

5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file image_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_IMAGE_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_IMAGE_OPS_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. *@brief Adjust the hue of one or more images . \n
  26. *@par Inputs:
  27. *Input images is a tensor of at least 3 dimensions. The last dimension is
  28. interpretted as channels, and must be three. Inputs include:
  29. *@li images:A Tensor of type float. Images to adjust. At least 3-D. The format
  30. must be NHWC.
  31. *@li delta:A Tensor of type float. A float delta to add to the hue . \n
  32. *@par Outputs:
  33. *y:A Tensor of type float. The format must be NHWC. \n
  34. *@attention Constraints:
  35. *Input images is a tensor of at least 3 dimensions. The last dimension is
  36. interpretted as channels, and must be three . \n
  37. *@par Third-party framework compatibility
  38. *Compatible with tensorflow AdjustHue operator.
  39. */
  40. REG_OP(AdjustHue)
  41. .INPUT(images, TensorType({DT_FLOAT16,DT_FLOAT}))
  42. .INPUT(delta, TensorType({DT_FLOAT}))
  43. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT}))
  44. .OP_END_FACTORY_REG(AdjustHue)
  45. /**
  46. *@brief Adjust the saturation of one or more images . \n
  47. *@par Inputs:
  48. *Input images is a tensor of at least 3 dimensions. The last dimension is
  49. interpretted as channels, and must be three. Inputs include:
  50. *@li images:A Tensor of type float. Images to adjust. At least 3-D. The format
  51. must be NHWC.
  52. *@li scale:A Tensor of type float. A float scale to add to the saturation . \n
  53. *@par Outputs:
  54. *y:A Tensor of type float. The format must be NHWC. \n
  55. *@attention Constraints:
  56. *Input images is a tensor of at least 3 dimensions. The last dimension is
  57. interpretted as channels, and must be three . \n
  58. *@par Third-party framework compatibility
  59. *Compatible with tensorflow AdjustSaturation operator.
  60. */
  61. REG_OP(AdjustSaturation)
  62. .INPUT(images, TensorType({DT_FLOAT16,DT_FLOAT}))
  63. .INPUT(scale, TensorType({DT_FLOAT}))
  64. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT}))
  65. .OP_END_FACTORY_REG(AdjustSaturation)
  66. /**
  67. *@brief Adjust the contrast of one or more images . \n
  68. *@par Inputs:
  69. *Input images is a tensor of at least 3 dimensions. The last 3 dimensions are
  70. interpreted as '[height, width, channels]'. Inputs include:
  71. *@li images:A Tensor of type float. Images to adjust. At least 3-D. The format
  72. must be NHWC.
  73. *@li scale:A Tensor of type float. A float multiplier for adjusting contrast . \n
  74. *@par Outputs:
  75. *y:A Tensor of type float. The format must be NHWC. \n
  76. *@attention Constraints:
  77. *Input images is a tensor of at least 3 dimensions. The last dimension is
  78. interpretted as channels, and must be three . \n
  79. *@par Third-party framework compatibility
  80. *Compatible with tensorflow AdjustContrast operator.
  81. */
  82. REG_OP(AdjustContrast)
  83. .INPUT(images, TensorType({DT_FLOAT16,DT_FLOAT}))
  84. .INPUT(contrast_factor, TensorType({DT_FLOAT}))
  85. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT}))
  86. .OP_END_FACTORY_REG(AdjustContrast)
  87. /**
  88. *@brief Extracts crops from the input image tensor and resizes them. Extracts
  89. crops from the input image tensor and resizes them using bilinear sampling or
  90. nearest neighbor sampling to a common output size specified by crop_size . \n
  91. *@par Inputs:
  92. *Input images must be a 4-D tensor. Inputs include:
  93. *@li images:A Tensor. Must be one of the following types:uint8, uint16, int8,
  94. int16, int32, int64, float16, float, double. A 4-D tensor of shape
  95. [batch, image_height, image_width, depth]. The format must be NHWC.
  96. *@li boxes: A Tensor of type float. A 2-D tensor of shape [num_boxes, 4].
  97. *@li box_index: A Tensor of type int32. A 1-D tensor of shape [num_boxes] with
  98. int32 values in [0, batch).
  99. *@li crop_size: A Tensor of type int32. A 1-D tensor of 2 elements, crop_size
  100. = [crop_height, crop_width]. All cropped image patches are resized to this size . \n
  101. *@par Attributes:
  102. *@li extrapolation_value: An optional float. Defaults to 0. Value used for
  103. extrapolation, when applicable.
  104. *@li method: An optional string from: '"bilinear", "nearest"'. Defaults to
  105. "bilinear". Currently two sampling methods are supported: Bilinear and
  106. NearestNeighbor . \n
  107. *@par Outputs:
  108. *y:A Tensor of type float. The format must be NHWC. \n
  109. *@attention Constraints:
  110. *Input images must be a 4-D tensor . \n
  111. *@par Third-party framework compatibility
  112. *Compatible with tensorflow CropAndResize operator.
  113. */
  114. REG_OP(CropAndResize)
  115. .INPUT(x, TensorType({DT_UINT8, DT_UINT16, DT_INT8, \
  116. DT_INT16, DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  117. .INPUT(boxes, TensorType({DT_FLOAT}))
  118. .INPUT(box_index, TensorType({DT_INT32}))
  119. .INPUT(crop_size, TensorType({DT_INT32}))
  120. .OUTPUT(y, TensorType({DT_FLOAT}))
  121. .ATTR(extrapolation_value, Float, 0)
  122. .ATTR(method, String, "bilinear")
  123. .OP_END_FACTORY_REG(CropAndResize)
  124. /**
  125. *@brief Extracts crops from the input image tensor and resizes them.
  126. * Extracts crops from the input image tensor and resizes them using bilinear sampling or
  127. * nearest neighbor sampling to a common output size specified by crop_size . \n
  128. *@par Inputs:
  129. *Input images must be a 5HD tensor. Inputs include:
  130. *@li x:A Tensor. Must be one of the following types:float16, float. A 5HD tensor of shape
  131. * [batch, C1, image_height, image_width, C0].
  132. *@li boxes: A Tensor of type float. A 2-D tensor of shape [num_boxes, 4].
  133. *@li box_index: A Tensor of type int32. A 1-D tensor of shape [num_boxes] with int32 values in [0, batch) . \n
  134. *@par Attributes:
  135. *@li crop_size: list int. [crop_height, crop_width]. All cropped image patches are resized to this size.
  136. *@li extrapolation_value: An optional float. Defaults to 0. Value used for extrapolation, when applicable.
  137. *@li method: An optional string from: '"bilinear"'. Defaults to "bilinear" . \n
  138. *@par Outputs:
  139. *y:A Tensor of type float . \n
  140. *@attention Constraints:
  141. *Input images must be a 5HD tensor . \n
  142. *@par Third-party framework compatibility
  143. *Compatible with tensorflow CropAndResize operator.
  144. * @par Restrictions:
  145. * Warning: THIS FUNCTION IS DEPRECATED. Please use CropAndResize instead.
  146. */
  147. REG_OP(CropAndResizeD)
  148. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  149. .INPUT(boxes, TensorType({DT_FLOAT}))
  150. .INPUT(box_index, TensorType({DT_INT32}))
  151. .OUTPUT(y, TensorType({DT_FLOAT}))
  152. .REQUIRED_ATTR(crop_size, ListInt)
  153. .ATTR(extrapolation_value, Float, 0)
  154. .ATTR(method, String, "bilinear")
  155. .OP_END_FACTORY_REG(CropAndResizeD)
  156. /**
  157. *@brief Computes the gradient of the crop_and_resize op wrt the input
  158. boxes tensor . \n
  159. *@par Inputs:
  160. *Input images and grads must be a 4-D tensor. Inputs include:
  161. *@li grads: A 4-D tensor of shape [num_boxes, crop_height, crop_width, depth].
  162. The format must be NHWC.
  163. *@li images: A 4-D tensor of shape [batch, image_height, image_width, depth].
  164. The format must be NHWC.
  165. Both image_height and image_width need to be positive.
  166. *@li boxes: A 2-D tensor of shape [num_boxes, 4]. The i-th row of the tensor
  167. specifies the coordinates of a box in the box_ind[i] image and is specified in
  168. normalized coordinates [y1, x1, y2, x2].
  169. *@li box_index: A 1-D tensor of shape [num_boxes] with int32 values in
  170. [0, batch). The value of box_ind[i] specifies the image that the i-th box
  171. refers to . \n
  172. *@par Attributes:
  173. method: A string specifying the interpolation method. Only 'bilinear' is
  174. supported for now . \n
  175. *@par Outputs:
  176. *y:A 2-D tensor of shape [num_boxes, 4] . \n
  177. *@attention Constraints:
  178. *Input images and grads must be a 4-D tensor . \n
  179. *@par Third-party framework compatibility
  180. *Compatible with tensorflow CropAndResizeGradBoxes operator.
  181. */
  182. REG_OP(CropAndResizeGradBoxes)
  183. .INPUT(grads, TensorType({DT_FLOAT}))
  184. .INPUT(images, TensorType({DT_UINT8, DT_UINT16, DT_INT8, DT_INT16, \
  185. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  186. .INPUT(boxes, TensorType({DT_FLOAT}))
  187. .INPUT(box_index, TensorType({DT_INT32}))
  188. .OUTPUT(y, TensorType({DT_FLOAT}))
  189. .ATTR(method, String, "bilinear")
  190. .OP_END_FACTORY_REG(CropAndResizeGradBoxes)
  191. /**
  192. *@brief Computes the gradient of the crop_and_resize op wrt the input
  193. images tensor . \n
  194. *@par Inputs:
  195. *Input grads must be a 4-D tensor. Inputs include:
  196. *@li grads: A 4-D tensor of shape [num_boxes, crop_height, crop_width, depth].
  197. The format must be NHWC.
  198. *@li boxes: A 2-D tensor of shape [num_boxes, 4]. The i-th row of the tensor
  199. specifies the coordinates of a box in the box_ind[i] image and is specified
  200. in normalized coordinates [y1, x1, y2, x2].
  201. *@li box_index: A 1-D tensor of shape [num_boxes] with int32 values in
  202. [0, batch). The value of box_ind[i] specifies the image that the i-th box
  203. refers to.
  204. *@li image_size: A 1-D tensor with value [batch, image_height, image_width,
  205. depth] containing the original image size. Both image_height and image_width
  206. need to be positive . \n
  207. *@par Attributes:
  208. method: A string specifying the interpolation method. Only 'bilinear' is
  209. supported for now . \n
  210. *@par Outputs:
  211. *y:A 4-D tensor of shape [batch, image_height, image_width, depth]. The format
  212. must be NHWC. \n
  213. *@attention Constraints:
  214. *Input grads must be a 4-D tensor . \n
  215. *@par Third-party framework compatibility
  216. *Compatible with tensorflow CropAndResizeGradImage operator.
  217. */
  218. REG_OP(CropAndResizeGradImage)
  219. .INPUT(grads, TensorType({DT_FLOAT}))
  220. .INPUT(boxes, TensorType({DT_FLOAT}))
  221. .INPUT(box_index, TensorType({DT_INT32}))
  222. .INPUT(image_size, TensorType({DT_INT32}))
  223. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  224. .ATTR(method, String, "bilinear")
  225. .REQUIRED_ATTR(T, Type)
  226. .OP_END_FACTORY_REG(CropAndResizeGradImage)
  227. /**
  228. *@brief Extracts a glimpse from the input tensor . \n
  229. *@par Inputs:
  230. *Input x must be a 4-D tensor. Inputs include:
  231. *@li x: A 4-D float tensor of shape [batch_size, height, width, channels].
  232. The format must be NHWC.
  233. *@li size: A 1-D tensor of 2 elements containing the size of the glimpses to
  234. extract. The glimpse height must be specified first, following by the glimpse
  235. width.
  236. *@li offsets: A 2-D integer tensor of shape [batch_size, 2] containing the y,
  237. x locations of the center of each window . \n
  238. *@par Attributes:
  239. *@li centered: indicates if the offset coordinates are centered relative to
  240. the image, in which case the (0, 0) offset is relative to the center of the
  241. input images. If false, the (0,0) offset corresponds to the upper left corner
  242. of the input images.
  243. *@li normalized: indicates if the offset coordinates are normalized.
  244. *@li uniform_noise: indicates if the noise should be generated using a
  245. uniform distribution or a Gaussian distribution.
  246. *@li noise: indicates if the noise should uniform, gaussian, or zero.
  247. The default is uniform which means the the noise type will be decided by
  248. uniform_noise . \n
  249. *@par Outputs:
  250. *y:A tensor representing the glimpses [batch_size, glimpse_height,
  251. glimpse_width, channels]. The format must be NHWC. \n
  252. *@attention Constraints:
  253. *Input x must be a 4-D tensor . \n
  254. *@par Third-party framework compatibility
  255. *Compatible with tensorflow CropAndResizeGradImage operator.
  256. */
  257. REG_OP(ExtractGlimpse)
  258. .INPUT(x, TensorType({DT_FLOAT}))
  259. .INPUT(size, TensorType({DT_INT32}))
  260. .INPUT(offsets, TensorType({DT_FLOAT}))
  261. .OUTPUT(y, TensorType({DT_FLOAT}))
  262. .ATTR(centered, Bool, true)
  263. .ATTR(normalized, Bool, true)
  264. .ATTR(uniform_noise, Bool, true)
  265. .ATTR(noise, String, "uniform")
  266. .OP_END_FACTORY_REG(ExtractGlimpse)
  267. /**
  268. *@brief Convert one or more images from HSV to RGB . \n
  269. *@par Inputs:
  270. *Last dimension of input x must be size 3. Inputs include:
  271. *images: 1-D or higher rank. HSV data to convert. Last dimension must be size 3 . \n
  272. *@par Outputs:
  273. *y:images converted to RGB . \n
  274. *@attention Constraints:
  275. *Last dimension of input x must be size 3 . \n
  276. *@par Third-party framework compatibility
  277. *Compatible with tensorflow HSVToRGB operator.
  278. */
  279. REG_OP(HSVToRGB)
  280. .INPUT(images, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE}))
  281. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE}))
  282. .OP_END_FACTORY_REG(HSVToRGB)
  283. /**
  284. *@brief Resize quantized images to size using quantized bilinear interpolation . \n
  285. *@par Inputs:
  286. *Input images must be a 4-D tensor. Inputs include:
  287. *@li images: 4-D with shape [batch, height, width, channels]. The format must
  288. be NHWC.
  289. *@li size: A 1-D int32 Tensor of 2 elements: new_height, new_width. The new
  290. size for the images.
  291. *@li min: A Tensor of type float.
  292. *@li max: A Tensor of type float . \n
  293. *@par Attributes:
  294. *@li align_corners: An optional bool. Defaults to False. If true, the centers
  295. of the 4 corner pixels of the input and output tensors are aligned, preserving
  296. the values at the corner pixels. Defaults to false.
  297. *@li half_pixel_centers: indicates if the offset coordinates are normalized . \n
  298. *@par Outputs:
  299. *@li resized_images: 4-D with shape [batch, new_height, new_width, channels].
  300. The format must be NHWC.
  301. *@li y_min: A Tensor of type float.
  302. *@li y_max: A Tensor of type float . \n
  303. *@attention Constraints:
  304. *Input images and output images must be quantized types . \n
  305. *@par Third-party framework compatibility
  306. *Compatible with tensorflow QuantizedResizeBilinear operator.
  307. */
  308. REG_OP(QuantizedResizeBilinear)
  309. .INPUT(images, TensorType({DT_QUINT8,DT_QINT32,DT_FLOAT}))
  310. .INPUT(size, TensorType({ DT_INT32 }))
  311. .INPUT(min, TensorType({ DT_FLOAT }))
  312. .INPUT(max, TensorType({ DT_FLOAT }))
  313. .OUTPUT(resized_images, TensorType({DT_QUINT8,DT_QINT32,DT_FLOAT }))
  314. .OUTPUT(y_min, TensorType({ DT_FLOAT }))
  315. .OUTPUT(y_max, TensorType({ DT_FLOAT }))
  316. .ATTR(align_corners, Bool, false)
  317. .ATTR(half_pixel_centers, Bool, false)
  318. .OP_END_FACTORY_REG(QuantizedResizeBilinear)
  319. /**
  320. *@brief Resize images to size using area interpolation . \n
  321. *@par Inputs:
  322. *Input images must be a 4-D tensor. Inputs include:
  323. *@li images: 4-D with shape [batch, height, width, channels]. The format must
  324. be NHWC.
  325. *@li size: A 1-D int32 Tensor of 2 elements: new_height, new_width.
  326. The new size for the images . \n
  327. *@par Attributes:
  328. *align_corners: If true, the centers of the 4 corner pixels of the input and
  329. output tensors are aligned, preserving the values at the corner pixels.
  330. Defaults to false . \n
  331. *@par Outputs:
  332. *y: 4-D with shape [batch, new_height, new_width, channels]. The format must
  333. be NHWC. \n
  334. *@attention Constraints:
  335. *Input images can be of different types but output images are always float . \n
  336. *@par Third-party framework compatibility
  337. *Compatible with tensorflow ResizeArea operator.
  338. */
  339. REG_OP(ResizeArea)
  340. .INPUT(images, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  341. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  342. .INPUT(size, TensorType({DT_INT32}))
  343. .OUTPUT(y, TensorType({DT_FLOAT}))
  344. .ATTR(align_corners, Bool, false)
  345. .OP_END_FACTORY_REG(ResizeArea)
  346. /**
  347. *@brief Computes the gradient of bicubic interpolation . \n
  348. *@par Inputs:
  349. *Input grads must be a 4-D tensor. Inputs include:
  350. *@li grads: A Tensor of type float. 4-D with shape [batch, height, width,
  351. channels]. The format must be NHWC.
  352. *@li original_image: A Tensor. Must be one of the following types: float,
  353. double. 4-D with shape [batch, orig_height, orig_width, channels], The image
  354. tensor that was resized. The format must be NHWC. \n
  355. *@par Attributes:
  356. *@li align_corners: An optional bool. Defaults to False. If true, the centers
  357. of the 4 corner pixels of the input and grad tensors are aligned. Defaults to
  358. false.
  359. *@li half_pixel_centers: An optional bool. Defaults to False . \n
  360. *@par Outputs:
  361. *y: A Tensor. Has the same type as original_image. The format must be NHWC. \n
  362. *@attention Constraints:
  363. *Input images can be of different types but output images are always float .
  364. *@par Third-party framework compatibility
  365. *Compatible with tensorflow ResizeBicubicGrad operator.
  366. */
  367. REG_OP(ResizeBicubicGrad)
  368. .INPUT(grads, TensorType({DT_FLOAT}))
  369. .INPUT(original_image, TensorType({DT_FLOAT, DT_DOUBLE}))
  370. .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE}))
  371. .ATTR(align_corners, Bool, false)
  372. .ATTR(half_pixel_centers, Bool, false)
  373. .OP_END_FACTORY_REG(ResizeBicubicGrad)
  374. /**
  375. *@brief Resize images to size using bicubic interpolation . \n
  376. *@par Inputs:
  377. *Input images must be a 4-D tensor. Inputs include:
  378. *@li images: 4-D with shape [batch, height, width, channels]. The format
  379. must be NHWC.
  380. *@li size: A 1-D int32 Tensor of 2 elements: new_height, new_width. The new
  381. size for the images . \n
  382. *@par Attributes:
  383. *@li align_corners: If true, the centers of the 4 corner pixels of the input
  384. and output tensors are aligned, preserving the values at the corner pixels.
  385. Defaults to false.
  386. *@li half_pixel_centers: An optional bool. Defaults to False . \n
  387. *@par Outputs:
  388. *y: 4-D with shape [batch, new_height, new_width, channels]. The format
  389. must be NHWC. \n
  390. *@attention Constraints:
  391. *Input images can be of different types but output images are always float .
  392. *@par Third-party framework compatibility
  393. *Compatible with tensorflow ResizeBicubic operator.
  394. */
  395. REG_OP(ResizeBicubic)
  396. .INPUT(images, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, \
  397. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  398. .INPUT(size, TensorType({DT_INT32}))
  399. .OUTPUT(y, TensorType({DT_FLOAT}))
  400. .ATTR(align_corners, Bool, false)
  401. .ATTR(half_pixel_centers, Bool, false)
  402. .OP_END_FACTORY_REG(ResizeBicubic)
  403. /**
  404. *@brief Computes the gradient of nearest neighbor interpolation . \n
  405. *@par Inputs:
  406. *Input grads must be a 4-D tensor. Inputs include:
  407. *@li grads: A Tensor. Must be one of the following types: uint8, int8, int32,
  408. float16, float, double. Must set the format, supported format list ["NCHW, NHWC"]
  409. *@li size: A 1-D int32 Tensor of 2 elements: orig_height, orig_width.
  410. The original input size . \n
  411. *@par Attributes:
  412. *@li align_corners: An optional bool. Defaults to False. If true, the centers
  413. of the 4 corner pixels of the input and grad tensors are aligned. Defaults to
  414. false.
  415. *@li half_pixel_centers: An optional bool. Defaults to False . \n
  416. *@par Outputs:
  417. *y: A Tensor. Has the same type as grads . \n
  418. *@attention Constraints:
  419. *Input grads must be a 4-D tensor . \n
  420. *@par Third-party framework compatibility
  421. *Compatible with tensorflow ResizeNearestNeighborV2Grad operator.
  422. */
  423. REG_OP(ResizeNearestNeighborV2Grad)
  424. .INPUT(grads, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32,
  425. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  426. .INPUT(size, TensorType({DT_INT32}))
  427. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32,
  428. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  429. .ATTR(align_corners, Bool, false)
  430. .ATTR(half_pixel_centers, Bool, false)
  431. .OP_END_FACTORY_REG(ResizeNearestNeighborV2Grad)
  432. /**
  433. *@brief Computes the gradient of nearest neighbor interpolation . \n
  434. *@par Inputs:
  435. *Input grads must be a 4-D tensor. Inputs include:
  436. *grads: A Tensor. 4-D with shape [batch, height, width, channels].
  437. *@par Attributes:
  438. *@li align_corners: An optional bool. Defaults to False. If true, the centers
  439. of the 4 corner pixels of the input and grad tensors are aligned. Defaults to
  440. false.
  441. *@li size: An list type. Specify the images size . \n
  442. *@par Outputs:
  443. *y: A Tensor. Has the same type as grads . \n
  444. *@par Third-party framework compatibility
  445. *Compatible with tensorflow ResizeNearestNeighborV2GradD operator.
  446. *
  447. * @par Restrictions:
  448. * Warning: THIS FUNCTION IS DEPRECATED. Please use ResizeNearestNeighborV2Grad instead.
  449. */
  450. REG_OP(ResizeNearestNeighborV2GradD)
  451. .INPUT(grads, TensorType({DT_FLOAT}))
  452. .OUTPUT(y, TensorType({DT_FLOAT}))
  453. .REQUIRED_ATTR(size, ListInt)
  454. .ATTR(align_corners, Bool, false)
  455. .ATTR(half_pixel_centers, Bool, false)
  456. .OP_END_FACTORY_REG(ResizeNearestNeighborV2GradD)
  457. /**
  458. *@brief Computes the gradient of bilinear interpolation . \n
  459. *@par Inputs:
  460. *Input grads must be a 4-D tensor. Inputs include:
  461. *@li grads: A Tensor of type float32. Must set the format, supported format list ["NCHW, NHWC"]
  462. *@li original_image: A Tensor. 4-D shape. Must set the format, supported format list ["NCHW, NHWC"]
  463. channels], The image tensor that was resized . \n
  464. *@par Attributes:
  465. *align_corners: An optional bool. Defaults to False. If true, the centers of
  466. the 4 corner pixels of the input and grad tensors are aligned. Defaults to
  467. false . \n
  468. *@par Outputs:
  469. *y: A Tensor. Has the same type as original_image . \n
  470. *@attention Constraints:
  471. *Input grads must be a 4-D tensor . \n
  472. *@par Third-party framework compatibility
  473. *Compatible with tensorflow ResizeBilinearV2Grad operator.
  474. */
  475. REG_OP(ResizeBilinearV2Grad)
  476. .INPUT(grads, TensorType({DT_FLOAT}))
  477. .INPUT(original_image, TensorType::FloatingDataType())
  478. .OUTPUT(y, TensorType({DT_FLOAT}))
  479. .ATTR(align_corners, Bool, false)
  480. .ATTR(half_pixel_centers, Bool, false)
  481. .OP_END_FACTORY_REG(ResizeBilinearV2Grad)
  482. /**
  483. *@brief Resize images to size using bilinear interpolation . \n
  484. *@par Inputs:
  485. *Input images must be a 4-D tensor. Inputs include:
  486. *@li x: 4-D tensor. Must set the format, supported format list ["NCHW, NHWC"]
  487. *@li size: A 1-D int32 Tensor of 2 elements: new_height, new_width. The new
  488. size for the images . \n
  489. *@par Attributes:
  490. *align_corners: If true, the centers of the 4 corner pixels of the input and
  491. output tensors are aligned, preserving the values at the corner pixels.
  492. Defaults to false . \n
  493. *@par Outputs:
  494. *y: 4-D with shape [batch, new_height, new_width, channels] . \n
  495. *@attention Constraints:
  496. *Input images can be of different types but output images are always float . \n
  497. *@par Third-party framework compatibility
  498. *Compatible with tensorflow ResizeBilinearV2 operator.
  499. */
  500. REG_OP(ResizeBilinearV2)
  501. .INPUT(x, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16,
  502. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  503. .INPUT(size, TensorType({DT_INT32}))
  504. .OUTPUT(y, TensorType({DT_FLOAT}))
  505. .ATTR(align_corners, Bool, false)
  506. .ATTR(half_pixel_centers, Bool, false)
  507. .OP_END_FACTORY_REG(ResizeBilinearV2)
  508. /**
  509. *@brief Converts one or more images from RGB to HSV . \n
  510. *@par Inputs:
  511. *Last dimension of input images must be size 3. Inputs include:
  512. *images: A Tensor. Must be one of the following types: float, double. 1-D or
  513. higher rank. RGB data to convert. Last dimension must be size 3 . \n
  514. *@par Outputs:
  515. *y: A Tensor. Has the same type as images . \n
  516. *@attention Constraints:
  517. *Outputs a tensor of the same shape as the images tensor, containing the HSV
  518. value of the pixels. The output is only well defined if the value in images
  519. are in [0,1] . \n
  520. *@par Third-party framework compatibility
  521. *Compatible with tensorflow RGBToHSV operator.
  522. */
  523. REG_OP(RGBToHSV)
  524. .INPUT(images, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE }))
  525. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE }))
  526. .OP_END_FACTORY_REG(RGBToHSV)
  527. /**
  528. *@brief Generate a single randomly distorted bounding box for an image . \n
  529. *@par Inputs:
  530. *Input images must be a 4-D tensor. Inputs include:
  531. *@li image_size: 1-D, containing [height, width, channels].
  532. *@li bounding_boxes: 3-D with shape [batch, N, 4] describing the N bounding
  533. boxes associated with the image. \n
  534. *@par Attributes:
  535. *@li seed: If either seed or seed2 are set to non-zero, the random number
  536. generator is seeded by the given seed. Otherwise, it is seeded by a random seed.
  537. *@li seed2: A second seed to avoid seed collision.
  538. *@li min_object_covered: The cropped area of the image must contain at least
  539. this fraction of any bounding box supplied. The value of this parameter should
  540. be non-negative. In the case of 0, the cropped area does not need to overlap
  541. any of the bounding boxes supplied .
  542. *@li aspect_ratio_range: The cropped area of the image must have an aspect
  543. ratio = width / height within this range.
  544. *@li max_attempts: Number of attempts at generating a cropped region of the
  545. image of the specified constraints. After max_attempts failures, return the
  546. entire image.
  547. *@li use_image_if_no_bounding_boxes: Controls behavior if no bounding boxes
  548. supplied. If true, assume an implicit bounding box covering the whole input.
  549. If false, raise an error . \n
  550. *@par Outputs:
  551. *@li begin: 1-D, containing [offset_height, offset_width, 0].
  552. *@li size: 1-D, containing [target_height, target_width, -1].
  553. *@li bboxes: 3-D with shape [1, 1, 4] containing the distorted bounding box . \n
  554. *@attention Constraints:
  555. *Input images can be of different types but output images are always float . \n
  556. *@par Third-party framework compatibility
  557. *Compatible with tensorflow SampleDistortedBoundingBox operator.
  558. */
  559. REG_OP(SampleDistortedBoundingBox)
  560. .INPUT(image_size, TensorType({ DT_UINT8, DT_INT8, DT_INT16, \
  561. DT_INT32, DT_INT64 }))
  562. .INPUT(bounding_boxes, TensorType({ DT_FLOAT }))
  563. .OUTPUT(begin, TensorType({ DT_UINT8, DT_INT8, DT_INT16, \
  564. DT_INT32, DT_INT64 }))
  565. .OUTPUT(size, TensorType({ DT_UINT8, DT_INT8, DT_INT16, \
  566. DT_INT32, DT_INT64 }))
  567. .OUTPUT(bboxes, TensorType({ DT_FLOAT }))
  568. .ATTR(seed, Int, 0)
  569. .ATTR(seed2, Int, 0)
  570. .ATTR(min_object_covered, Float, 0.1f)
  571. .ATTR(aspect_ratio_range, ListFloat, { 0.75f, 1.33f })
  572. .ATTR(area_range, ListFloat, { 0.05f, 1.0f })
  573. .ATTR(max_attempts, Int, 100)
  574. .ATTR(use_image_if_no_bounding_boxes, Bool, false)
  575. .OP_END_FACTORY_REG(SampleDistortedBoundingBox)
  576. /**
  577. *@brief Generate a single randomly distorted bounding box for an image . \n
  578. *@par Inputs:
  579. *Input images must be a 4-D tensor. Inputs include:
  580. *@li image_size: 1-D, containing [height, width, channels].
  581. *@li bounding_boxes: 3-D with shape [batch, N, 4] describing the N bounding
  582. boxes associated with the image.
  583. *@li min_object_covered: The cropped area of the image must contain at least
  584. this fraction of any bounding box supplied. The value of this parameter should
  585. be non-negative. In the case of 0, the cropped area does not need to overlap
  586. any of the bounding boxes supplied . \n
  587. *@par Attributes:
  588. *@li seed: If either seed or seed2 are set to non-zero, the random number
  589. generator is seeded by the given seed. Otherwise, it is seeded by a random seed.
  590. *@li seed2: A second seed to avoid seed collision.
  591. *@li aspect_ratio_range: The cropped area of the image must have an aspect
  592. ratio = width / height within this range.
  593. *@li max_attempts: Number of attempts at generating a cropped region of the
  594. image of the specified constraints. After max_attempts failures, return the
  595. entire image.
  596. *@li use_image_if_no_bounding_boxes: Controls behavior if no bounding boxes
  597. supplied. If true, assume an implicit bounding box covering the whole input.
  598. If false, raise an error . \n
  599. *@par Outputs:
  600. *@li begin: 1-D, containing [offset_height, offset_width, 0].
  601. *@li size: 1-D, containing [target_height, target_width, -1].
  602. *@li bboxes: 3-D with shape [1, 1, 4] containing the distorted bounding box . \n
  603. *@attention Constraints:
  604. *Input images can be of different types but output images are always float . \n
  605. *@par Third-party framework compatibility
  606. *Compatible with tensorflow SampleDistortedBoundingBoxExt2 operator.
  607. */
  608. REG_OP(SampleDistortedBoundingBoxExt2)
  609. .INPUT(image_size, TensorType({ DT_UINT8, DT_INT8, DT_INT16, \
  610. DT_INT32, DT_INT64 }))
  611. .INPUT(bounding_boxes, TensorType({ DT_FLOAT }))
  612. .INPUT(min_object_covered, TensorType({ DT_FLOAT }))
  613. .OUTPUT(begin, TensorType({ DT_UINT8, DT_INT8, DT_INT16, \
  614. DT_INT32, DT_INT64 }))
  615. .OUTPUT(size, TensorType({ DT_UINT8, DT_INT8, DT_INT16, \
  616. DT_INT32, DT_INT64 }))
  617. .OUTPUT(bboxes, TensorType({ DT_FLOAT }))
  618. .ATTR(seed, Int, 0)
  619. .ATTR(seed2, Int, 0)
  620. .ATTR(aspect_ratio_range, ListFloat, { 0.75f, 1.33f })
  621. .ATTR(area_range, ListFloat, { 0.05f, 1.0f })
  622. .ATTR(max_attempts, Int, 100)
  623. .ATTR(use_image_if_no_bounding_boxes, Bool, false)
  624. .OP_END_FACTORY_REG(SampleDistortedBoundingBoxExt2)
  625. /**
  626. *@brief Resize images to size using nearest neighbor interpolation . \n
  627. *@par Inputs:
  628. *Input x must be a 4-D tensor. Inputs include:
  629. *@li x: 4-D tensor. Must set the format, supported format list ["NCHW, NHWC"].
  630. *@li size: A 1-D int32 Tensor of 2 elements: new_height, new_width.
  631. The new size for the images . \n
  632. *@par Attributes:
  633. *align_corners: If true, the centers of the 4 corner pixels of the input and
  634. output tensors are aligned, preserving the values at the corner pixels.
  635. Defaults to false . \n
  636. *@par Outputs:
  637. *y: 4-D with shape [batch, new_height, new_width, channels] . \n
  638. *@par Third-party framework compatibility
  639. *Compatible with tensorflow ResizeNearestNeighborV2 operator.
  640. */
  641. REG_OP(ResizeNearestNeighborV2)
  642. .INPUT(x, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32,
  643. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  644. .INPUT(size, TensorType({DT_INT32}))
  645. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32,
  646. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  647. .ATTR(align_corners, Bool, false)
  648. .ATTR(half_pixel_centers, Bool, false)
  649. .OP_END_FACTORY_REG(ResizeNearestNeighborV2)
  650. /**
  651. *@brief Draw bounding boxes on a batch of images . \n
  652. *@par Inputs:
  653. *Input images must be a 4-D tensor. Inputs include:
  654. *@li images: A Tensor. Must be one of the following types: float. 4-D with
  655. shape [batch, height, width, depth]. A batch of images. The format must be NHWC.
  656. *@li boxes: A Tensor of type float32. 3-D with shape [batch,
  657. num_bounding_boxes, 4] containing bounding boxes . \n
  658. *@par Outputs:
  659. *A Tensor. Has the same type as images. The format must be NHWC. \n
  660. *@attention Constraints:
  661. *Input images must be a 4-D tensor . \n
  662. *@par Third-party framework compatibility
  663. *Compatible with tensorflow DrawBoundingBoxes operator.
  664. */
  665. REG_OP(DrawBoundingBoxes)
  666. .INPUT(images, TensorType({DT_FLOAT}))
  667. .INPUT(boxes, TensorType({DT_FLOAT}))
  668. .OUTPUT(y, TensorType({DT_FLOAT}))
  669. .OP_END_FACTORY_REG(DrawBoundingBoxes)
  670. /**
  671. *@brief Greedily selects a subset of bounding boxes in descending order of
  672. score . \n
  673. *@par Inputs:
  674. *Input boxes and scores must be float type. Inputs include:
  675. *@li boxes: A 2-D float tensor of shape [num_boxes, 4].
  676. *@li scores: A 1-D float tensor of shape [num_boxes] representing a single
  677. score corresponding to each box (each row of boxes).
  678. *@li max_output_size: A scalar integer tensor representing the maximum number
  679. of boxes to be selected by non max suppression . \n
  680. *@par Attributes:
  681. *iou_threshold: A float representing the threshold for deciding whether boxes
  682. overlap too much with respect to IOU . \n
  683. *@par Outputs:
  684. *selected_indices: A 1-D integer tensor of shape [M] representing the selected
  685. indices from the boxes tensor, where M <= max_output_size . \n
  686. *@attention Constraints:
  687. *Input boxes and scores must be float type . \n
  688. *@par Third-party framework compatibility
  689. *Compatible with tensorflow NonMaxSuppression operator.
  690. */
  691. REG_OP(NonMaxSuppression)
  692. .INPUT(boxes, TensorType({DT_FLOAT}))
  693. .INPUT(scores, TensorType({DT_FLOAT}))
  694. .INPUT(max_output_size, TensorType({DT_INT32}))
  695. .OUTPUT(selected_indices, TensorType({DT_INT32}))
  696. .ATTR(iou_threshold, Float, 0.5f)
  697. .OP_END_FACTORY_REG(NonMaxSuppression)
  698. /**
  699. *@brief Greedily selects a subset of bounding boxes in descending order of
  700. score . \n
  701. *@par Inputs:
  702. *Input boxes and scores must be float type. Inputs include:
  703. *@li boxes: A 2-D float tensor of shape [num_boxes, 4].
  704. *@li scores: A 1-D float tensor of shape [num_boxes] representing a single
  705. score corresponding to each box (each row of boxes).
  706. *@li max_output_size: A scalar integer tensor representing the maximum number
  707. of boxes to be selected by non max suppression.
  708. *@li iou_threshold: A 0-D float tensor representing the threshold for deciding
  709. whether boxes overlap too much with respect to IOU . \n
  710. *@par Outputs:
  711. *selected_indices: A 1-D integer tensor of shape [M] representing the selected
  712. indices from the boxes tensor, where M <= max_output_size . \n
  713. *@attention Constraints:
  714. *Input boxes and scores must be float type . \n
  715. *@par Third-party framework compatibility
  716. *Compatible with tensorflow NonMaxSuppressionV2 operator.
  717. */
  718. REG_OP(NonMaxSuppressionV2)
  719. .INPUT(boxes, TensorType({DT_FLOAT16, DT_FLOAT}))
  720. .INPUT(scores, TensorType({DT_FLOAT16, DT_FLOAT}))
  721. .INPUT(max_output_size, TensorType({DT_INT32}))
  722. .INPUT(iou_threshold, TensorType({DT_FLOAT16,DT_FLOAT}))
  723. .OUTPUT(selected_indices, TensorType({DT_INT32}))
  724. .OP_END_FACTORY_REG(NonMaxSuppressionV2)
  725. /**
  726. *@brief Greedily selects a subset of bounding boxes in descending order of
  727. score . \n
  728. *@par Inputs:
  729. *Input boxes and scores must be float type. Inputs include:
  730. *@li boxes: A 2-D float tensor of shape [num_boxes, 4].
  731. *@li scores: A 1-D float tensor of shape [num_boxes] representing a single
  732. score corresponding to each box (each row of boxes).
  733. *@li max_output_size: A scalar integer tensor representing the maximum number
  734. of boxes to be selected by non max suppression.
  735. *@li iou_threshold: A 0-D float tensor representing the threshold for deciding
  736. whether boxes overlap too much with respect to IOU.
  737. *@li score_threshold: A 0-D float tensor representing the threshold for
  738. deciding when to remove boxes based on score . \n
  739. *@par Outputs:
  740. *selected_indices: A 1-D integer tensor of shape [M] representing the selected
  741. indices from the boxes tensor, where M <= max_output_size . \n
  742. *@attention Constraints:
  743. *Input boxes and scores must be float type . \n
  744. *@par Third-party framework compatibility
  745. *Compatible with tensorflow NonMaxSuppressionV3 operator.
  746. */
  747. REG_OP(NonMaxSuppressionV3)
  748. .INPUT(boxes, TensorType({DT_FLOAT16, DT_FLOAT}))
  749. .INPUT(scores, TensorType({DT_FLOAT16, DT_FLOAT}))
  750. .INPUT(max_output_size, TensorType({DT_INT32}))
  751. .INPUT(iou_threshold, TensorType({DT_FLOAT16,DT_FLOAT}))
  752. .INPUT(score_threshold, TensorType({DT_FLOAT16,DT_FLOAT}))
  753. .OUTPUT(selected_indices, TensorType({DT_INT32}))
  754. .OP_END_FACTORY_REG(NonMaxSuppressionV3)
  755. /**
  756. *@brief Greedily selects a subset of bounding boxes in descending order of
  757. score . \n
  758. *@par Inputs:
  759. *Input boxes and scores must be float type. Inputs include:
  760. *@li boxes: A 2-D float tensor of shape [num_boxes, 4].
  761. *@li scores: A 1-D float tensor of shape [num_boxes] representing a single
  762. score corresponding to each box (each row of boxes).
  763. *@li max_output_size: A scalar integer tensor representing the maximum number
  764. of boxes to be selected by non max suppression.
  765. *@li iou_threshold: A 0-D float tensor representing the threshold for deciding
  766. whether boxes overlap too much with respect to IOU.
  767. *@li score_threshold: A 0-D float tensor representing the threshold for
  768. deciding when to remove boxes based on score . \n
  769. *@par Attributes:
  770. *pad_to_max_output_size: If true, the output selected_indices is padded
  771. to be of length max_output_size. Defaults to false . \n
  772. *@par Outputs:
  773. *@li selected_indices: A 1-D integer tensor of shape [M] representing the
  774. selected indices from the boxes tensor, where M <= max_output_size.
  775. *@li valid_outputs: A 0-D integer tensor representing the number of valid
  776. elements in selected_indices, with the valid elements appearing first . \n
  777. *@attention Constraints:
  778. *Input boxes and scores must be float type . \n
  779. *@par Third-party framework compatibility
  780. *Compatible with tensorflow NonMaxSuppressionV4 operator.
  781. */
  782. REG_OP(NonMaxSuppressionV4)
  783. .INPUT(boxes, TensorType({DT_FLOAT16, DT_FLOAT}))
  784. .INPUT(scores, TensorType({DT_FLOAT16, DT_FLOAT}))
  785. .INPUT(max_output_size, TensorType({DT_INT32}))
  786. .INPUT(iou_threshold, TensorType({DT_FLOAT16,DT_FLOAT}))
  787. .INPUT(score_threshold, TensorType({DT_FLOAT16,DT_FLOAT}))
  788. .OUTPUT(selected_indices, TensorType({DT_INT32}))
  789. .OUTPUT(valid_outputs, TensorType({DT_INT32}))
  790. .ATTR(pad_to_max_output_size, Bool, false)
  791. .OP_END_FACTORY_REG(NonMaxSuppressionV4)
  792. /**
  793. *@brief Greedily selects a subset of bounding boxes in descending order of
  794. score . \n
  795. *@par Inputs:
  796. *Input overlaps and scores must be float type. Inputs include:
  797. *@li overlaps: A 2-D float tensor of shape [num_boxes, num_boxes]
  798. representing the n-by-n box overlap values.
  799. *@li scores: A 1-D float tensor of shape [num_boxes] representing a single
  800. score corresponding to each box (each row of boxes).
  801. *@li max_output_size: A scalar integer tensor representing the maximum number
  802. of boxes to be selected by non max suppression.
  803. *@li overlap_threshold: A 0-D float tensor representing the threshold for
  804. deciding whether boxes overlap too.
  805. *@li score_threshold: A 0-D float tensor representing the threshold for
  806. deciding when to remove boxes based on score . \n
  807. *@par Attributes:
  808. *pad_to_max_output_size: If true, the output selected_indices is padded
  809. to be of length max_output_size. Defaults to false . \n
  810. *@par Outputs:
  811. *selected_indices: A 1-D integer tensor of shape [M] representing the
  812. selected indices from the boxes tensor, where M <= max_output_size . \n
  813. *@par Third-party framework compatibility
  814. *Compatible with tensorflow NonMaxSuppressionWithOverlaps operator.
  815. */
  816. REG_OP(NonMaxSuppressionWithOverlaps)
  817. .INPUT(overlaps, TensorType({DT_FLOAT}))
  818. .INPUT(scores, TensorType({DT_FLOAT}))
  819. .INPUT(max_output_size, TensorType({DT_INT32}))
  820. .INPUT(overlap_threshold, TensorType({DT_FLOAT}))
  821. .INPUT(score_threshold, TensorType({DT_FLOAT}))
  822. .OUTPUT(selected_indices, TensorType({DT_INT32}))
  823. .OP_END_FACTORY_REG(NonMaxSuppressionWithOverlaps)
  824. /**
  825. *@brief JPEG-encode an image . \n
  826. *@par Inputs:
  827. *Input image must be unit8 type. Inputs include:
  828. *image: A 3-D uint8 Tensor of shape [height, width, channels] . \n
  829. *@par Attributes:
  830. *@li format: Per pixel image format.
  831. *@li quality: Quality of the compression from 0 to 100 (higher is better
  832. and slower).
  833. *@li progressive: If True, create a JPEG that loads progressively (coarse
  834. to fine).
  835. *@li optimize_size: If True, spend CPU/RAM to reduce size with no quality
  836. change.
  837. *@li chroma_downsampling: A boolean, default is true.
  838. *@li density_unit: Unit used to specify x_density and y_density: pixels per
  839. inch ('in') or centimeter ('cm').
  840. *@li x_density: Horizontal pixels per density unit.
  841. *@li y_density: Vertical pixels per density unit.
  842. *@li xmp_metadata: If not empty, embed this XMP metadata in the image header . \n
  843. *@par Outputs:
  844. *contents: 0-D. JPEG-encoded image . \n
  845. *@par Third-party framework compatibility
  846. *Compatible with tensorflow EncodeJpeg operator.
  847. */
  848. REG_OP(EncodeJpeg)
  849. .INPUT(image, TensorType({DT_UINT8}))
  850. .OUTPUT(contents, TensorType({DT_STRING}))
  851. .ATTR(format, String, "")
  852. .ATTR(quality, Int, 95)
  853. .ATTR(progressive, Bool, false)
  854. .ATTR(optimize_size, Bool, false)
  855. .ATTR(chroma_downsampling, Bool, true)
  856. .ATTR(density_unit, String, "in")
  857. .ATTR(x_density, Int, 300)
  858. .ATTR(y_density, Int, 300)
  859. .ATTR(xmp_metadata, String, "")
  860. .OP_END_FACTORY_REG(EncodeJpeg)
  861. /**
  862. *@brief PNG-encode an image.
  863. *@par Inputs:
  864. *Input image must be unit8 or uint16 type. Inputs include:
  865. *image: is a 3-D uint8 or uint16 Tensor of shape [height, width, channels]
  866. where channels is: 1: for grayscale; 2: for grayscale + alpha; 3: for RGB;
  867. 4: for RGBA . \n
  868. *@par Attributes:
  869. *compression: Compression level . \n
  870. *@par Outputs:
  871. *contents: 0-D. PNG-encoded image . \n
  872. *@par Third-party framework compatibility
  873. *Compatible with tensorflow EncodePng operator.
  874. */
  875. REG_OP(EncodePng)
  876. .INPUT(image, TensorType({DT_UINT8, DT_UINT16}))
  877. .OUTPUT(contents, TensorType({DT_STRING}))
  878. .ATTR(compression, Int, -1)
  879. .OP_END_FACTORY_REG(EncodePng)
  880. /**
  881. *@brief Resizes "images" to "size" using bilinear interpolation . \n
  882. *@par Inputs:
  883. * One input:
  884. *x: An NC1HWC0 Tensor.
  885. * Must be one of the following types: float16, float32 . \n
  886. *@par Attributes:
  887. *@li size: A required int32 Tensor specifying the new size for the images.
  888. No default value.
  889. *@li align_corners: An optional bool. If "true", the centers of the corner
  890. pixels of the input and output tensors are aligned. Defaults to "false" . \n
  891. *@par Outputs:
  892. *y: A Tensor with type float32 and the same format as input "images" . \n
  893. *@attention Constraints:
  894. *@li The input "size" must be a tensor of 2 elements: size[0] <= 2048,
  895. size[1] <= 2048.
  896. *@li The input "images" must be a tensor of 5 elements: images[2] <= 2048,
  897. images[3] <= 2048 . \n
  898. *@par Third-party framework compatibility
  899. * Compatible with TensorFlow operator ResizeBilinearV2D.
  900. *
  901. * @par Restrictions:
  902. * Warning: THIS FUNCTION IS DEPRECATED. Please use ResizeBilinearV2 instead.
  903. */
  904. REG_OP(ResizeBilinearV2D)
  905. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  906. .OUTPUT(y, TensorType({DT_FLOAT}))
  907. .ATTR(align_corners, Bool, false)
  908. .ATTR(half_pixel_centers, Bool, false)
  909. .REQUIRED_ATTR(size, ListInt)
  910. .OP_END_FACTORY_REG(ResizeBilinearV2D)
  911. /**
  912. *@brief Resizes "images" to "size" using bilinear interpolation and keep ratio at the time. \n
  913. *@par Inputs:
  914. * One input:
  915. *images: An NC1HWC0 Tensor.
  916. * Must be one of the following types: float16, float32 . \n
  917. *@par Attributes:
  918. *@li min_dimension: A required int32 attribute for the min dimension for the images.
  919. * No default value.
  920. *@li max_dimension: A required int32 attribute for the max dimension for the images.
  921. * No default value.
  922. *@li align_corners: An optional bool. If "true", the centers of the corner
  923. * pixels of the input and output tensors are aligned. Defaults to "false".
  924. *@li half_pixel_centers: indicates if the offset coordinates are normalized
  925. * Defaults to "false" . \n
  926. *@par Outputs:
  927. *y: A Tensor with type float32 and the same format as input "images" . \n
  928. *@attention Constraints:
  929. * The input "images" must be a tensor of 5 elements: images[2] <= 2048,
  930. images[3] <= 2048.
  931. */
  932. REG_OP(KeepRatioResizeBilinear)
  933. .INPUT(images, TensorType({DT_FLOAT16, DT_FLOAT}))
  934. .OUTPUT(y, TensorType({DT_FLOAT}))
  935. .REQUIRED_ATTR(min_dimension, Int)
  936. .REQUIRED_ATTR(max_dimension, Int)
  937. .ATTR(align_corners, Bool, false)
  938. .ATTR(half_pixel_centers, Bool, false)
  939. .OP_END_FACTORY_REG(KeepRatioResizeBilinear)
  940. /**
  941. *@brief Resizes "images" to "size" using nearest neighbor interpolation . \n
  942. *@par Inputs:
  943. * One input:
  944. *x: An NC1HWC0 Tensor.
  945. * Must be one of the following types: float16, float32, int32, int8, uint8
  946. *@par Attributes:
  947. *@li size: A required int32 Tensor specifying the new size for the images.
  948. No default value.
  949. *@li align_corners: An optional bool. If "true", the centers of the corner
  950. pixels of the input and output tensors are aligned. Defaults to "false" . \n
  951. *@par Outputs:
  952. *y: A Tensor with the same type and format as input "images" . \n
  953. *@attention Constraints:
  954. * The input "size" must be a tensor of 2 elements: size[0] <= 7680,
  955. size[1] <= 4320
  956. *@par Third-party framework compatibility
  957. * Compatible with TensorFlow operator ResizeNearestNeighborV2.
  958. *
  959. * @par Restrictions:
  960. * Warning: THIS FUNCTION IS DEPRECATED. Please use ResizeNearestNeighborV2 instead.
  961. */
  962. REG_OP(ResizeNearestNeighborV2D)
  963. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  964. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT,DT_INT32,DT_INT8,DT_UINT8}))
  965. .REQUIRED_ATTR(size, ListInt)
  966. .ATTR(align_corners, Bool, false)
  967. .ATTR(half_pixel_centers, Bool, false)
  968. .OP_END_FACTORY_REG(ResizeNearestNeighborV2D)
  969. /**
  970. *@brief Extract the shape information of a JPEG-encoded image . \n
  971. *@par Inputs:
  972. *Input contents must be 0-D. Inputs include:
  973. *contents: 0-D. The JPEG-encoded image . \n
  974. *@par Attributes:
  975. *output_type: The output type of the operation (int32 or int64). Defaults
  976. to int32 . \n
  977. *@par Outputs:
  978. *image_shape: 1-D. The image shape with format [height, width, channels] . \n
  979. *@par Third-party framework compatibility
  980. *Compatible with tensorflow ExtractJpegShape operator.
  981. */
  982. REG_OP(ExtractJpegShape)
  983. .INPUT(contents, TensorType({DT_STRING}))
  984. .OUTPUT(image_shape, TensorType({DT_INT32, DT_INT64}))
  985. .REQUIRED_ATTR(output_type, Type)
  986. .OP_END_FACTORY_REG(ExtractJpegShape)
  987. /**
  988. *@brief Draw bounding boxes on a batch of images . \n
  989. *@par Inputs:
  990. *@li images: 4-D with shape `[batch, height, width, depth]`.
  991. A batch of images.
  992. *@li boxes: 3-D with shape `[batch, num_bounding_boxes, 4]`
  993. containing bounding boxes.
  994. *@li colors: 2-D. A list of RGBA colors to cycle through for the boxes . \n
  995. *@par Outputs:
  996. *y: Returns 4-D with the same shape as `images`.
  997. The batch of input images with bounding boxes drawn on the images . \n
  998. *@par Third-party framework compatibility
  999. * Compatible with tensorflow DrawBoundingBoxesV2 operator.
  1000. */
  1001. REG_OP(DrawBoundingBoxesV2)
  1002. .INPUT(images, TensorType({DT_FLOAT}))
  1003. .INPUT(boxes, TensorType({DT_FLOAT}))
  1004. .INPUT(colors, TensorType({DT_FLOAT}))
  1005. .OUTPUT(y, TensorType({DT_FLOAT}))
  1006. .OP_END_FACTORY_REG(DrawBoundingBoxesV2)
  1007. /**
  1008. *@brief Greedily selects a subset of bounding boxes in descending order of score,
  1009. pruning away boxes that have high intersection-over-union (IOU) overlap
  1010. with previously selected boxes . \n
  1011. *@par Inputs:
  1012. *@li boxes: A 2-D float tensor of shape `[num_boxes, 4]`.
  1013. *@li scores: A 1-D float tensor of shape `[num_boxes]` representing a single
  1014. score corresponding to each box (each row of boxes).
  1015. *@li max_output_size: A scalar integer tensor representing the maximum number of
  1016. boxes to be selected by non max suppression.
  1017. *@li iou_threshold: A 0-D float tensor representing the threshold for deciding whether
  1018. boxes overlap too much with respect to IOU.
  1019. *@li score_threshold: A 0-D float tensor representing the threshold for deciding when to
  1020. remove boxes based on score.
  1021. *@li soft_nms_sigma: A 0-D float tensor representing the sigma parameter for Soft NMS . \n
  1022. *@par Attributes:
  1023. pad_to_max_output_size: If true, the output `selected_indices` is padded to be of length
  1024. `max_output_size`. Defaults to false. If not specified, defaults to false . \n
  1025. *@par Outputs:
  1026. *@li selected_indices: A 1-D integer tensor of shape [M] representing the
  1027. selected indices from the boxes tensor, where M <= max_output_size.
  1028. *@li selected_scores: A 1-D float tensor of shape `[M]` representing the corresponding
  1029. scores for each selected box, where `M <= max_output_size`.
  1030. *@li valid_outputs: A 0-D integer tensor representing the number of valid
  1031. elements in selected_indices, with the valid elements appearing first . \n
  1032. *@par Third-party framework compatibility
  1033. * Compatible with tensorflow NonMaxSuppressionV5 operator.
  1034. */
  1035. REG_OP(NonMaxSuppressionV5)
  1036. .INPUT(boxes, TensorType({DT_FLOAT16, DT_FLOAT}))
  1037. .INPUT(scores, TensorType({DT_FLOAT16, DT_FLOAT}))
  1038. .INPUT(max_output_size, TensorType({DT_INT32}))
  1039. .INPUT(iou_threshold, TensorType({DT_FLOAT16, DT_FLOAT}))
  1040. .INPUT(score_threshold, TensorType({DT_FLOAT16, DT_FLOAT}))
  1041. .INPUT(soft_nms_sigma, TensorType({DT_FLOAT16, DT_FLOAT}))
  1042. .OUTPUT(selected_indices, TensorType({DT_INT32}))
  1043. .OUTPUT(selected_scores, TensorType({DT_FLOAT16, DT_FLOAT}))
  1044. .OUTPUT(valid_outputs, TensorType({DT_INT32}))
  1045. .ATTR(pad_to_max_output_size, Bool, false)
  1046. .REQUIRED_ATTR(T, Type)
  1047. .OP_END_FACTORY_REG(NonMaxSuppressionV5)
  1048. /**
  1049. *@brief Resizes "images" to "size" by scale and translate . \n
  1050. *@par Inputs:
  1051. *@li images: A `Tensor`. Must be one of the following types: `int8`, `uint8`,
  1052. `int16`, `uint16`, `int32`, `int64`, `bfloat16`, `float32`, `float64`.
  1053. *@li size: A `Tensor` of type `int32`.
  1054. *@li scale: A `Tensor` of type `float32`.
  1055. *@li translation: A `Tensor` of type `float32` . \n
  1056. *@li kernel_type: type is string, default lanczos3
  1057. *@li antialias: type is bool, default true \n
  1058. *@par Outputs:
  1059. *y: A Tensor with type float32 . \n
  1060. *@par Third-party framework compatibility
  1061. * Compatible with TensorFlow ScaleAndTranslate operator.
  1062. */
  1063. REG_OP(ScaleAndTranslate)
  1064. .INPUT(images, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16,
  1065. DT_INT32, DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  1066. .INPUT(size, TensorType({DT_INT32}))
  1067. .INPUT(scale, TensorType({DT_FLOAT}))
  1068. .INPUT(translation, TensorType({DT_FLOAT}))
  1069. .OUTPUT(y, TensorType({DT_FLOAT}))
  1070. .ATTR(kernel_type, String, "lanczos3")
  1071. .ATTR(antialias, Bool, true)
  1072. .OP_END_FACTORY_REG(ScaleAndTranslate)
  1073. /**
  1074. *@brief Computes the gradient by scale and translate . \n
  1075. *@par Inputs:
  1076. *@li grads: A `Tensor`. Must be one of the following types: `float32`.
  1077. *@li original_image: A `Tensor`. Must have the same type as `grads`.
  1078. *@li scale: A `Tensor` of type `float32`.
  1079. *@li translation: A `Tensor` of type `float32` . \n
  1080. *@li kernel_type: type is string, default lanczos3
  1081. *@li antialias: type is bool, default true
  1082. *@par Outputs:
  1083. *y: A `Tensor`. Has the same type as `grads` . \n
  1084. *@par Third-party framework compatibility
  1085. * Compatible with TensorFlow ScaleAndTranslateGrad operator.
  1086. */
  1087. REG_OP(ScaleAndTranslateGrad)
  1088. .INPUT(grads, TensorType({DT_FLOAT}))
  1089. .INPUT(original_image, TensorType({DT_FLOAT}))
  1090. .INPUT(scale, TensorType({DT_FLOAT}))
  1091. .INPUT(translation, TensorType({DT_FLOAT}))
  1092. .OUTPUT(y, TensorType({DT_FLOAT}))
  1093. .ATTR(kernel_type, String, "lanczos3")
  1094. .ATTR(antialias, Bool, true)
  1095. .OP_END_FACTORY_REG(ScaleAndTranslateGrad)
  1096. /**
  1097. *@brief Greedily selects a subset of bounding boxes in descending order of score,
  1098. This operation performs non_max_suppression on the inputs per batch, across all classes . \n
  1099. *@par Inputs:
  1100. *@li boxes: A 4-D float tensor of shape `[batch_size, num_boxes, q, 4]`. If `q` is 1 then
  1101. same boxes are used for all classes otherwise, if `q` is equal to number of
  1102. classes, class-specific boxes are used.
  1103. *@li scores: A 3-D float tensor of shape `[batch_size, num_boxes, num_classes]`
  1104. representing a single score corresponding to each box (each row of boxes).
  1105. *@li max_output_size_per_class: A scalar integer tensor representing the maximum number of
  1106. boxes to be selected by non max suppression per class.
  1107. *@li max_total_size: A scalar representing maximum number of boxes retained over all classes.
  1108. *@li iou_threshold: A 0-D float tensor representing the threshold for deciding whether
  1109. boxes overlap too much with respect to IOU.
  1110. *@li score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
  1111. boxes based on score . \n
  1112. *@par Attributes:
  1113. *@li pad_per_class: If false, the output nmsed boxes, scores and classes
  1114. are padded/clipped to `max_total_size`. If true, the
  1115. output nmsed boxes, scores and classes are padded to be of length
  1116. `max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in
  1117. which case it is clipped to `max_total_size`. Defaults to false.
  1118. *@li clip_boxes: If true, assume the box coordinates are between [0, 1] and clip the output boxes
  1119. if they fall beyond [0, 1]. If false, do not do clipping and output the box
  1120. coordinates as it is. If not specified, defaults to true . \n
  1121. *@par Outputs:
  1122. *nmsed_boxes:type is float
  1123. *nmsed_scores:type is float
  1124. *nmsed_classes:type is float \n
  1125. *@par Third-party framework compatibility
  1126. * Compatible with tensorflow CombinedNonMaxSuppression operator.
  1127. */
  1128. REG_OP(CombinedNonMaxSuppression)
  1129. .INPUT(boxes, TensorType({DT_FLOAT}))
  1130. .INPUT(scores, TensorType({DT_FLOAT}))
  1131. .INPUT(max_output_size_per_class, TensorType({DT_INT32}))
  1132. .INPUT(max_total_size, TensorType({DT_INT32}))
  1133. .INPUT(iou_threshold, TensorType({DT_FLOAT}))
  1134. .INPUT(score_threshold, TensorType({DT_FLOAT}))
  1135. .OUTPUT(nmsed_boxes, TensorType({DT_FLOAT}))
  1136. .OUTPUT(nmsed_scores, TensorType({DT_FLOAT}))
  1137. .OUTPUT(nmsed_classes, TensorType({DT_FLOAT}))
  1138. .OUTPUT(valid_detections, TensorType({DT_INT32}))
  1139. .ATTR(pad_per_class, Bool, false)
  1140. .ATTR(clip_boxes, Bool, true)
  1141. .OP_END_FACTORY_REG(CombinedNonMaxSuppression)
  1142. /**
  1143. *@brief Function spatial transformer . \n
  1144. *@par Inputs:
  1145. *@li x: A Tensor dtype of float16, float32.
  1146. *@li theta: A Tensor dtype of float16, float32, auxiliary coefficients . \n
  1147. *@par Attributes:
  1148. *@li output_size: A tuple output size.
  1149. *@li default_theta: A tuple default theta
  1150. *@li use_default_theta: List use default theta
  1151. *@li align_corners: Align corners
  1152. *@par Outputs:
  1153. *y: A Tensor dtype of float16, float32, should be same shape and type as x.
  1154. */
  1155. REG_OP(SpatialTransformerD)
  1156. .INPUT(x, TensorType({DT_FLOAT,DT_FLOAT16}))
  1157. .OPTIONAL_INPUT(theta, TensorType({DT_FLOAT,DT_FLOAT16}))
  1158. .OUTPUT(y, TensorType({DT_FLOAT,DT_FLOAT16}))
  1159. .ATTR(output_size, ListInt, {-1, -1})
  1160. .ATTR(default_theta, ListFloat, {})
  1161. .ATTR(align_corners, Bool, false)
  1162. .ATTR(use_default_theta, ListBool, {})
  1163. .OP_END_FACTORY_REG(SpatialTransformerD)
  1164. /**
  1165. * @brief Resize the input tensor. \n
  1166. currently, only support resize image tensor using nearest neighbor and linear interpolation.
  1167. * @par Inputs:
  1168. * Input x must be a 4-D tensor. Inputs include: \n
  1169. * @li x: A Tensor. Must be one of the following types: uint8, int8, int16, \n
  1170. int32, int64, float16, float, double. 4-D with shape [batch, height, width, channels] \n
  1171. or shape [batch, channels, height, width].
  1172. * @li roi: A 1-D float Tensor. only takes effect when attr coordinate_transformation_mode \n
  1173. is "tf_crop_and_resize"
  1174. * @li scales: A 1-D float Tensor, the scale array along each dimension, Only one of \n
  1175. 'scales' and 'sizes' can be specified.
  1176. * @li sizes: A 1-D int64 Tensor, The size of the output tensor. nly one of \n
  1177. 'scales' and 'sizes' can be specified. If 'size' is specified, then set scales \n
  1178. to empty data (zero shape) in this operator's input list.
  1179. * @par Attributes:
  1180. * @li coordinate_transformation_mode: String. Defaults to half_pixel. how to transform \n
  1181. the coordinate in the resized tensor to the coordinate in the original tensor. \n
  1182. other optional: pytorch_half_pixel, align_corners, asymmetric, tf_half_pixel_for_nn, \n
  1183. tf_crop_and_resize.
  1184. * @li cubic_coeff_a: Float. Defaults to -0.75, only used in cubic interpolation. \n
  1185. other optional: -0.5
  1186. * @li exclude_outside: Int. Defaults to 0, If set to 1, the weight of sampling \n
  1187. locations outside the tensor will be set to 0 and the weight will be renormalized \n
  1188. so that their sum is 1.0.
  1189. * @li extrapolation_value: Float. Defaults to 0.0f. When coordinate_transformation_mode \n
  1190. is "tf_crop_and_resize" and x_original is outside the range [0, length_original - 1], \n
  1191. this value is used as the corresponding output value.
  1192. * @li mode: String. Defaults to nearest. Three interpolation modes: nearest (default), \n
  1193. linear and cubic.
  1194. * @li nearest_mode: String. Defaults to round_prefer_floor. Four modes: round_prefer_floor, \n
  1195. round_prefer_ceil, floor, ceil. Only used by nearest interpolation.
  1196. * @par Outputs:
  1197. * y: A Tensor. Has the same type as x.
  1198. * @attention Constraints: \n
  1199. * Input x must be a 4-D tensor.
  1200. * @par Third-party framework compatibility
  1201. * Compatible with tensorflow ResizeNearestNeighborV2 operator.
  1202. */
  1203. REG_OP(Resize)
  1204. .INPUT(x, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32,
  1205. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  1206. .INPUT(roi, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  1207. .INPUT(scales, TensorType({DT_FLOAT}))
  1208. .OPTIONAL_INPUT(sizes, TensorType({DT_INT64}))
  1209. .OUTPUT(y, TensorType({DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32,
  1210. DT_INT64, DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  1211. .ATTR(coordinate_transformation_mode, String, "half_pixel")
  1212. .ATTR(cubic_coeff_a, Float, -0.75)
  1213. .ATTR(exclude_outside, Int, 0)
  1214. .ATTR(extrapolation_value, Float, 0)
  1215. .ATTR(mode, String, "nearest")
  1216. .ATTR(nearest_mode, String, "round_prefer_floor")
  1217. .OP_END_FACTORY_REG(Resize)
  1218. /**
  1219. *@brief Function parse image from string to int. \n
  1220. *@par Inputs:
  1221. *@li contents: A Tensor of type string. 0-D. The JPEG-encoded image. \n
  1222. *@par Attributes:
  1223. *@li channels: An optional int. Defaults to 0. Number of color channels for the decoded image.
  1224. *@li ratio: An optional int. Defaults to 1. Downscaling ratio.
  1225. *@li fancy_upscaling: An optional bool. Defaults to True. If true use a slower but nicer upscaling of the chroma planes
  1226. *@li try_recover_truncated: An optional bool. Defaults to False. If true try to recover an image from truncated input.
  1227. *@li acceptable_fraction: An optional float. Defaults to 1. The minimum required fraction of lines before a truncated input is accepted.
  1228. *@li dct_method: An optional string. Defaults to "". string specifying a hint about the algorithm used for decompression. \n
  1229. *@par Outputs:
  1230. *image: A Tensor dtype of uint8.
  1231. */
  1232. REG_OP(DecodeJpeg)
  1233. .INPUT(contents, TensorType({DT_STRING}))
  1234. .OUTPUT(image, TensorType({DT_UINT8}))
  1235. .ATTR(channels, Int, 0)
  1236. .ATTR(ratio, Int, 1)
  1237. .ATTR(fancy_upscaling, Bool, true)
  1238. .ATTR(try_recover_truncated, Bool, false)
  1239. .ATTR(acceptable_fraction, Float, 1.0)
  1240. .ATTR(dct_method, String, "")
  1241. .OP_END_FACTORY_REG(DecodeJpeg)
  1242. /**
  1243. *@brief Image warping using per-pixel flow vectors. \n
  1244. *@par Inputs:
  1245. *@li images: 4-D Tensor with shape `[batch, height, width, channels]`.
  1246. *@li flow: 4-D Tensor with shape `[batch, height, width, 2]`. \n
  1247. *@par Outputs:
  1248. *y: Returns 4-D with the same shape and dtype as `images`. \n
  1249. */
  1250. REG_OP(DenseImageWarp)
  1251. .INPUT(image, TensorType({DT_FLOAT, DT_FLOAT16}))
  1252. .INPUT(flow, TensorType({DT_FLOAT, DT_FLOAT16}))
  1253. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16}))
  1254. .OP_END_FACTORY_REG(DenseImageWarp)
  1255. /**
  1256. *@brief Computes the gradients of DenseImageWarp with respect to image and flow. \n
  1257. *@par Inputs:
  1258. *@li grad: gradients with respect to DenseImageWarp output.
  1259. *@li images: 4-D Tensor with shape `[batch, height, width, channels]`.
  1260. *@li flow: 4-D Tensor with shape `[batch, height, width, 2]`. \n
  1261. *@par Outputs:
  1262. *grad_image: Returns 4-D with the same shape and dtype as `images`.
  1263. *grad_flow: Returns 4-D with the same shape and dtype as `flow`. \n
  1264. */
  1265. REG_OP(DenseImageWarpGrad)
  1266. .INPUT(grad, TensorType({DT_FLOAT, DT_FLOAT16}))
  1267. .INPUT(image, TensorType({DT_FLOAT, DT_FLOAT16}))
  1268. .INPUT(flow, TensorType({DT_FLOAT, DT_FLOAT16}))
  1269. .OUTPUT(grad_image, TensorType({DT_FLOAT, DT_FLOAT16}))
  1270. .OUTPUT(grad_flow, TensorType({DT_FLOAT, DT_FLOAT16}))
  1271. .OP_END_FACTORY_REG(DenseImageWarpGrad)
  1272. } // namespace ge
  1273. #endif // OPS_BUILT_IN_OP_PROTO_INC_IMAGE_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示