You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

random_ops.h 24 kB

5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
5 years ago
3 years ago
3 years ago
5 years ago
5 years ago
3 years ago
3 years ago
5 years ago
3 years ago
3 years ago
5 years ago
3 years ago
5 years ago
3 years ago
3 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file random_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_RANDOM_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_RANDOM_OPS_H_
  22. #include <vector>
  23. #include "graph/operator_reg.h"
  24. namespace ge {
  25. /**
  26. *@brief Draws samples from a multinomial distribution . \n
  27. *@par Inputs:
  28. *Inputs include:
  29. * @li logits: A Tensor. Must be one of the following types: float16, float, double.
  30. 2-D Tensor with shape [batch_size, num_classes].
  31. * @li num_samples: A Tensor of type int32. 0-D. Number of independent samples to draw for each row slice . \n
  32. *@par Attributes:
  33. *@li output_dtype: An optional type from: int32, int64. Defaults to int64.
  34. *@li seed: An optional int. Defaults to 0.
  35. *@li seed2: An optional int. Defaults to 0 . \n
  36. *@par Outputs:
  37. *y_indices: A Tensor of type output_dtype . \n
  38. *@attention Constraints:
  39. *The implementation for Multinomial on Ascend uses AICPU, with bad performance.
  40. *@par Third-party framework compatibility
  41. *@li compatible with tensorflow Multinomial operator.
  42. */
  43. REG_OP(Multinomial)
  44. .INPUT(logits, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  45. .INPUT(num_samples, TensorType({DT_INT32}))
  46. .OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
  47. .ATTR(dtype, Type, DT_INT64)
  48. .ATTR(seed, Int, 0)
  49. .ATTR(seed2, Int, 0)
  50. .OP_END_FACTORY_REG(Multinomial)
  51. /**
  52. *@brief Outputs random values from a normal distribution . \n
  53. *@par Inputs:
  54. *Inputs include:
  55. * @li shape: A Tensor. Must be one of the following types: int32, int64.
  56. The shape of the output tensor. Batches are indexed by the 0th dimension.
  57. * @li means: A Tensor. Must be one of the following types: half, bfloat16, float32, float64.
  58. * @li stdevs: A Tensor. Must have the same type as means.
  59. * @li min: A Tensor. Must have the same type as means. The minimum cutoff. May be -infinity.
  60. * @li max: A Tensor. Must have the same type as means . \n
  61. *@par Attributes:
  62. *@li seed: An optional int. Defaults to 0.
  63. *@li seed2: An optional int. Defaults to 0 . \n
  64. *@par Outputs:
  65. *y: A Tensor. Has the same type as means . \n
  66. *@attention Constraints:
  67. *The implementation for ParameterizedTruncatedNormal on Ascend uses AICPU, with bad performance.
  68. *@par Third-party framework compatibility
  69. *@li compatible with tensorflow ParameterizedTruncatedNormal operator.
  70. */
  71. REG_OP(ParameterizedTruncatedNormal)
  72. .INPUT(shape, TensorType({DT_INT32, DT_INT64}))
  73. .INPUT(means, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  74. .INPUT(stdevs, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  75. .INPUT(min, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  76. .INPUT(max, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  77. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  78. .ATTR(seed, Int, 0)
  79. .ATTR(seed2, Int, 0)
  80. .OP_END_FACTORY_REG(ParameterizedTruncatedNormal)
  81. /**
  82. *@brief Computes the derivative of a Gamma random sample w.r.t. alpha . \n
  83. *@par Inputs:
  84. *Inputs include:
  85. * @li alpha: A Tensor. Must be one of the following types: float32, float64.
  86. * @li sample: A Tensor. Must have the same type as alpha . \n
  87. *@par Outputs:
  88. *y: A Tensor. Has the same type as alpha . \n
  89. *@attention Constraints:
  90. *The implementation for RandomGammaGrad on Ascend uses AICPU, with bad performance.
  91. *@par Third-party framework compatibility
  92. *@li compatible with tensorflow RandomGammaGrad operator.
  93. */
  94. REG_OP(RandomGammaGrad)
  95. .INPUT(alpha, TensorType({DT_FLOAT, DT_DOUBLE}))
  96. .INPUT(sample, TensorType({DT_FLOAT, DT_DOUBLE}))
  97. .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE}))
  98. .OP_END_FACTORY_REG(RandomGammaGrad)
  99. /**
  100. *@brief Outputs random values from the Gamma distribution(s) described by alpha . \n
  101. *@par Inputs:
  102. *Inputs include:
  103. * @li shape: A Tensor. Must be one of the following types: int32, int64. 1-D integer tensor.
  104. * @li alpha: A Tensor. Must be one of the following types: half, float32, float64 . \n
  105. *@par Attributes:
  106. *@li seed: An optional int. Defaults to 0.
  107. *@li seed2: An optional int. Defaults to 0 . \n
  108. *@par Outputs:
  109. *y: A Tensor. Has the same type as alpha . \n
  110. *@attention Constraints:
  111. *The implementation for RandomGamma on Ascend uses AICPU, with bad performance.
  112. *@par Third-party framework compatibility
  113. *@li compatible with tensorflow RandomGamma operator.
  114. */
  115. REG_OP(RandomGamma)
  116. .INPUT(shape, TensorType({DT_INT32, DT_INT64}))
  117. .INPUT(alpha, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  118. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  119. .ATTR(seed, Int, 0)
  120. .ATTR(seed2, Int, 0)
  121. .OP_END_FACTORY_REG(RandomGamma)
  122. /**
  123. *@brief Returns the random permutation of integers from 0 to n-1. \n
  124. *@par Attributes:
  125. *@li n: An required int.
  126. *@li dtype: An optional str. Defaults to int64 .
  127. *@li layout: An optional int. Defaults to 0 . \n
  128. *@par Outputs:
  129. *out: A required Tensor. Must be one of the following types:
  130. float16, float32, float32, int8, uint8, int16, int32, int64. \n
  131. *@attention Constraints:
  132. *The implementation for Randperm on Ascend uses AICPU, with bad performance.
  133. *@par Third-party framework compatibility
  134. *@li compatible with Pytorch Randperm operator.
  135. */
  136. REG_OP(Randperm)
  137. .OUTPUT(out, TensorType({DT_INT64, DT_INT32, DT_INT16,
  138. DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))
  139. .REQUIRED_ATTR(n, Int)
  140. .ATTR(layout, Int, 0)
  141. .ATTR(dtype, Type, DT_INT64)
  142. .OP_END_FACTORY_REG(Randperm)
  143. /**
  144. *@brief Outputs random values from the Poisson distribution(s) described by rate . \n
  145. *@par Inputs:
  146. *Inputs include:
  147. * @li shape: A Tensor. Must be one of the following types: int32, int64. 1-D integer tensor.
  148. * @li rate: A Tensor. Must be one of the following types: half, float32, float64, int32, int64 . \n
  149. *@par Attributes:
  150. *@li dtype: An optional type from: half, float32, float64, int32, int64. Defaults to int64.
  151. *@li seed: An optional int. Defaults to 0. If either seed or seed2 are set to be non-zero,
  152. the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed.
  153. *@li seed2: An optional int. Defaults to 0 . A second seed to avoid seed collision. \n
  154. *@par Outputs:
  155. *y: A Tensor of type dtype float16, float, double, int32, int64. \n
  156. *@attention Constraints:
  157. *The implementation for RandomPoisson on Ascend uses AICPU, with bad performance.
  158. *@par Third-party framework compatibility
  159. *@li compatible with tensorflow RandomPoisson operator.
  160. */
  161. REG_OP(RandomPoisson)
  162. .INPUT(shape, TensorType({DT_INT32, DT_INT64}))
  163. .INPUT(rate, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  164. DT_INT32, DT_INT64}))
  165. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  166. DT_INT32, DT_INT64}))
  167. .ATTR(dtype, Type, DT_INT64)
  168. .ATTR(seed, Int, 0)
  169. .ATTR(seed2, Int, 0)
  170. .OP_END_FACTORY_REG(RandomPoisson)
  171. /**
  172. *@brief Randomly shuffles a tensor along its first dimension . \n
  173. *@par Inputs:
  174. *Inputs include:
  175. *x: A Tensor. The tensor to be shuffled . \n
  176. *@par Attributes:
  177. *@li seed: An optional int. Defaults to 0. If either seed or seed2 are set to be non-zero,
  178. the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed.
  179. *@li seed2: An optional int. Defaults to 0 . A second seed to avoid seed collision. \n
  180. *@par Outputs:
  181. *y: A Tensor. Has the same type as x . A Tensor of type float16, float,
  182. *double, int32, int64, int16, uint16, int8, uint8, int32,int64. \n
  183. *@attention Constraints:
  184. *The implementation for RandomShuffle on Ascend uses AICPU, with bad performance.
  185. *@par Third-party framework compatibility
  186. *@li compatible with tensorflow RandomShuffle operator.
  187. */
  188. REG_OP(RandomShuffle)
  189. .INPUT(x, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16,
  190. DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64,
  191. DT_COMPLEX128, DT_BOOL, DT_STRING, DT_RESOURCE}))
  192. .OUTPUT(y, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16,
  193. DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64,
  194. DT_COMPLEX128, DT_BOOL, DT_STRING, DT_RESOURCE}))
  195. .ATTR(seed, Int, 0)
  196. .ATTR(seed2, Int, 0)
  197. .OP_END_FACTORY_REG(RandomShuffle)
  198. /**
  199. *@brief Outputs random values from a normal distribution . \n
  200. *@par Inputs:
  201. *Inputs include:
  202. *shape: A Tensor. Must be one of the following types: int32, int64. The shape of the output tensor . \n
  203. *@par Attributes:
  204. *@li dtype: A type from: half, float16, float32, float64. The type of the output.
  205. *@li seed: An optional int. Defaults to 0. If either seed or seed2 are set to be non-zero,
  206. the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed.
  207. *@li seed2: An optional int. Defaults to 0 . A second seed to avoid seed collision. \n
  208. *@par Outputs:
  209. *y: A Tensor of type float32, float16, double. \n
  210. *@attention Constraints:
  211. *The implementation for RandomStandardNormal on Ascend uses AICPU, with bad performance.
  212. *@par Third-party framework compatibility
  213. *@li compatible with tensorflow RandomStandardNormal operator.
  214. */
  215. REG_OP(RandomStandardNormal)
  216. .INPUT(shape, TensorType({DT_INT32, DT_INT64}))
  217. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  218. .REQUIRED_ATTR(dtype, Type)
  219. .ATTR(seed, Int, 0)
  220. .ATTR(seed2, Int, 0)
  221. .OP_END_FACTORY_REG(RandomStandardNormal)
  222. /**
  223. *@brief Output random value from separate normal distribution. \n
  224. *@par Inputs:
  225. *Inputs include:
  226. *mean: The mean is a tensor with the mean of each output element’s normal distribution .
  227. *std: The std is a tensor with the standard deviation of each output element’s normal distribution. \n
  228. *@par Outputs:
  229. *y: A Tensor of type dtype . \n
  230. *@attention Constraints:
  231. *The implementation for Normal on Ascend uses AICPU, with bad performance.
  232. *@par Third-party framework compatibility
  233. *@li compatible with Pytorch Normal operator.
  234. */
  235. REG_OP(Normal)
  236. .INPUT(mean, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  237. .INPUT(std, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  238. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  239. .OP_END_FACTORY_REG(Normal)
  240. /**
  241. *@brief Outputs random integers from a uniform distribution . \n
  242. *@par Inputs:
  243. *Inputs include:
  244. * @li shape: A Tensor. Must be one of the following types: int32, int64. The shape of the output tensor.
  245. * @li min: A Tensor. Must be one of the following types: int32, int64. 0-D.
  246. * @li max: A Tensor. Must have the same type as minval. 0-D . \n
  247. *@par Attributes:
  248. *@li seed: An optional int. Defaults to 0. If either seed or seed2 are set to be non-zero,
  249. the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed.
  250. *@li seed2: An optional int. Defaults to 0 . A second seed to avoid seed collision. \n
  251. *@par Outputs:
  252. *y: A Tensor. Has the same type as min . \n
  253. *@attention Constraints:
  254. *The implementation for RandomUniformInt on Ascend uses AICPU, with bad performance.
  255. *@par Third-party framework compatibility
  256. *@li compatible with tensorflow RandomUniformInt operator.
  257. */
  258. REG_OP(RandomUniformInt)
  259. .INPUT(shape, TensorType({DT_INT32, DT_INT64}))
  260. .INPUT(min, TensorType({DT_INT32, DT_INT64}))
  261. .INPUT(max, TensorType({DT_INT32, DT_INT64}))
  262. .OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
  263. .ATTR(seed, Int, 0)
  264. .ATTR(seed2, Int, 0)
  265. .OP_END_FACTORY_REG(RandomUniformInt)
  266. /**
  267. *@brief Outputs random values from a uniform distribution . \n
  268. *@par Inputs:
  269. *Inputs include:
  270. *shape: A Tensor. Must be one of the following types: int32, int64. The shape of the output tensor . \n
  271. *@par Attributes:
  272. *@li dtype: A type from: half, float16, float32, float64. The type of the output.
  273. *@li seed: An optional int. Defaults to 0. If either seed or seed2 are set to be non-zero,
  274. the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed.
  275. *@li seed2: An optional int. Defaults to 0 . A second seed to avoid seed collision. \n
  276. *@par Outputs:
  277. *y: A Tensor of type dtype . \n
  278. *@attention Constraints:
  279. *The implementation for RandomUniform on Ascend uses AICPU, with bad performance.
  280. *@par Third-party framework compatibility
  281. *@li compatible with tensorflow RandomUniform operator.
  282. */
  283. REG_OP(RandomUniform)
  284. .INPUT(shape, TensorType({DT_INT32, DT_INT64}))
  285. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  286. .REQUIRED_ATTR(dtype, Type)
  287. .ATTR(seed, Int, 0)
  288. .ATTR(seed2, Int, 0)
  289. .OP_END_FACTORY_REG(RandomUniform)
  290. /**
  291. *@brief Outputs random values from a truncated normal distribution . \n
  292. *@par Inputs:
  293. *Inputs include:
  294. *shape: A Tensor. Must be one of the following types: int32, int64 . \n
  295. *@par Attributes:
  296. *@li seed: An optional int. Defaults to 0.If either `seed` or `seed2`
  297. are set to be non-zero, the random number generator is seeded by the given
  298. seed. Otherwise, it is seeded by a random seed.
  299. *@li seed2: An optional int. Defaults to 0 . A second seed to avoid seed collision. \n
  300. *@par Outputs:
  301. *y: A Tensor of types: float16, float32, double . A tensor of the specified shape
  302. filled with random truncated normal values. \n
  303. *@attention Constraints:
  304. *The implementation for TruncatedNormal on Ascend uses AICPU, with bad performance.
  305. *@par Third-party framework compatibility
  306. *@li compatible with tensorflow TruncatedNormal operator.
  307. */
  308. REG_OP(TruncatedNormal)
  309. .INPUT(shape, TensorType({ DT_INT32, DT_INT64 }))
  310. .OUTPUT(y, TensorType({ DT_FLOAT16, DT_FLOAT, DT_DOUBLE }))
  311. .ATTR(seed, Int, 0)
  312. .ATTR(seed2, Int, 0)
  313. .OP_END_FACTORY_REG(TruncatedNormal)
  314. /**
  315. *@brief Generate random bit mask for dropout . \n
  316. *@par Inputs:
  317. include:
  318. *@li shape:The shape of the output tensor.
  319. *@li prob:0-D. Number of bit 1 . \n
  320. *@par Attributes:
  321. *@li seed:If either seed or seed2 are set to be non-zero, the random number
  322. *generator is seeded by the given seed. Otherwise, it is seeded by a random seed.
  323. *@li seed2:A second seed to avoid seed collision . \n
  324. *@par Outputs:
  325. *y:Output (1-D) random number using uint data format . \n
  326. *@attention Constraints:
  327. *The output is aligned with 128 bits
  328. *@see DropOutGenMask()
  329. */
  330. REG_OP(DropOutGenMask)
  331. .INPUT(shape, TensorType({ DT_INT32, DT_INT64 }))
  332. .INPUT(prob, TensorType({ DT_FLOAT16, DT_FLOAT }))
  333. .OUTPUT(y, TensorType({ DT_UINT8 }))
  334. .ATTR(seed, Int, 0)
  335. .ATTR(seed2, Int, 0)
  336. .OP_END_FACTORY_REG(DropOutGenMask)
  337. /**
  338. *@brief Generate random uint8 mask for dropout v3 . \n
  339. *@par Inputs:
  340. include:
  341. *@li shape:The shape of the output tensor.
  342. *@li prob:0-D. Prob of 1 . \n
  343. *@par Attributes:
  344. *@li seed:If either seed or seed2 are set to be non-zero, the random number
  345. *generator is seeded by the given seed. Otherwise, it is seeded by a random seed.
  346. *@li seed2:A second seed to avoid seed collision . \n
  347. *@par Outputs:
  348. *y:Output (1-D) random number using uint8 data format . \n
  349. *@attention Constraints:
  350. *The output is aligned with 16
  351. *@par Restrictions:
  352. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  353. *@see DropOutGenMaskV3()
  354. */
  355. REG_OP(DropOutGenMaskV3)
  356. .INPUT(shape, TensorType({ DT_INT32, DT_INT64 }))
  357. .INPUT(prob, TensorType({ DT_FLOAT16, DT_FLOAT }))
  358. .OUTPUT(y, TensorType({ DT_UINT8 }))
  359. .ATTR(seed, Int, 0)
  360. .ATTR(seed2, Int, 0)
  361. .OP_END_FACTORY_REG(DropOutGenMaskV3)
  362. /**
  363. *@brief Generates values in an interval . \n
  364. *@par Inputs:
  365. * Four ND inputs, including:
  366. *@li assist: A 1D Tensor of type float32.
  367. *@li start: A 1D Tensor of type float32, for the first entry in the range.
  368. *@li stop: A 1D Tensor of type float32, for the last entry in the range.
  369. *@li num: A 1D Tensor of type int32 or int64, for the common difference of the entries . \n
  370. *@par Outputs:
  371. *output_op: A 1D Tensor of type float32 . \n
  372. *@attention Constraints:
  373. * "input_assist" is a sequence of "input_num" evenly-spaced values beginning at 0 with an common difference of 1 . \n
  374. *@par Third-party framework compatibility
  375. * Compatible with the TensorFlow operator lin_space.
  376. *
  377. * @par Restrictions:
  378. * Warning: THIS FUNCTION IS DEPRECATED. Please use LinSpace instead.
  379. */
  380. REG_OP(LinSpaceD)
  381. .INPUT(assist, TensorType({DT_FLOAT}))
  382. .INPUT(start, TensorType({DT_FLOAT}))
  383. .INPUT(stop, TensorType({DT_FLOAT}))
  384. .INPUT(num, TensorType::IndexNumberType())
  385. .OUTPUT(output, TensorType({DT_FLOAT}))
  386. .OP_END_FACTORY_REG(LinSpaceD)
  387. /**
  388. *@brief Generates values in an interval . \n
  389. *@par Inputs:
  390. * Four ND inputs, including:
  391. *@li start: A 1D Tensor of type float32, for the first entry in the range.
  392. *@li stop: A 1D Tensor of type float32, for the last entry in the range.
  393. *@li num: A 1D Tensor of type int32 or int64, for the common difference of the entries . \n
  394. *@par Outputs:
  395. *output_op: A 1D Tensor of type float32 . \n
  396. *@attention Constraints:
  397. * "input_assist" is a sequence of "input_num" evenly-spaced values beginning at 0 with an common difference of 1 . \n
  398. *@par Third-party framework compatibility
  399. * Compatible with the TensorFlow operator lin_space.
  400. */
  401. REG_OP(LinSpace)
  402. .INPUT(start, TensorType({DT_FLOAT, DT_DOUBLE}))
  403. .INPUT(stop, TensorType({DT_FLOAT, DT_DOUBLE}))
  404. .INPUT(num, TensorType::IndexNumberType())
  405. .OUTPUT(output, TensorType({DT_FLOAT, DT_DOUBLE}))
  406. .OP_END_FACTORY_REG(LinSpace)
  407. /**
  408. *@brief The dropout operator randomly sets (according to the given dropout probability)
  409. *the outputs of some units to zero, while others are remain unchanged. . \n
  410. *@par Inputs:
  411. *One input, including:
  412. *@li x:The input tensor variable. The data type is float32. \n
  413. *@par Attributes:
  414. *@li dropout_ratio:Float between 0 and 1. Fraction of the input units to drop.Defaults to "0.5".
  415. *@li scale_train: Bool,default to true.
  416. *@li alpha: An optional float32. A scaling factor. Defaults to "1.0".
  417. *@li beta: An optional float32. An exponent. Defaults to "0.0". \n
  418. *@par Outputs:
  419. *y: A Variable holding Tensor representing the dropout, has same shape and data type with x. \n
  420. */
  421. REG_OP(Dropout)
  422. .INPUT(x, TensorType{DT_FLOAT})
  423. .OUTPUT(y, TensorType{DT_FLOAT})
  424. .ATTR(dropout_ratio, Float, 0.5)
  425. .ATTR(scale_train, Bool, true)
  426. .ATTR(alpha, Float, 1.0)
  427. .ATTR(beta, Float, 0.0)
  428. .OP_END_FACTORY_REG(Dropout)
  429. /**
  430. *@brief Shuffle index of no-zero element . \n
  431. *@par Inputs:
  432. include:
  433. *x:A tensor <= 5-D . \n
  434. *@par Attributes:
  435. *@li count:the count of output, if 0, out all no-zero elements.
  436. *@li seed:If either seed or seed2 are set to be non-zero, the random number generator is seeded by the given seed.
  437. Otherwise, it is seeded by a random seed.
  438. *@li seed2:A second seed to avoid seed collision . \n
  439. *@par Outputs:
  440. *@li y:2-D tensor, no-zero element index.
  441. *@li mask:1-D, whether the corresponding index is valid . \n
  442. *@see RandomChoiceWithMask()
  443. */
  444. REG_OP(RandomChoiceWithMask)
  445. .INPUT(x, TensorType({DT_BOOL}))
  446. .OUTPUT(y, TensorType({DT_INT32}))
  447. .OUTPUT(mask, TensorType({DT_BOOL}))
  448. .ATTR(count, Int, 0)
  449. .ATTR(seed, Int, 0)
  450. .ATTR(seed2, Int, 0)
  451. .OP_END_FACTORY_REG(RandomChoiceWithMask)
  452. /**
  453. *@brief Permutes data in the channel dimension of the input
  454. *@par Inputs:
  455. *Inputs including:
  456. * x: A required Tensor. Must be one of the following types:
  457. float16, float32, int8, uint8, int16, uint16, int32, uint32, int64, uint64 . \n
  458. *@par Attributes:
  459. * group: A required int32, specifying the number of groups to split the channel dimension into. Defaults to "1" . \n
  460. *@par Outputs:
  461. * y: A required Tensor. Has same type and shape as "x". Must be one of the following types:
  462. float16, float32, int8, uint8, int16, uint16, int32, uint32, int64, uint64 . \n
  463. *@attention Constraints:
  464. *@li "group" must be greater than 0 and must evenly divide the channel dimension size.
  465. *@li The format of input "x" must be NCHW.
  466. *@par Third-party framework compatibility
  467. * Compatible with the Caffe operator ShuffleChannel.
  468. */
  469. REG_OP(ShuffleChannel)
  470. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT8, DT_UINT8, DT_INT16,
  471. DT_UINT16, DT_INT32, DT_UINT32,DT_INT64,DT_UINT64}))
  472. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT8, DT_UINT8, DT_INT16,
  473. DT_UINT16, DT_INT32, DT_UINT32,DT_INT64,DT_UINT64}))
  474. .ATTR(group, Int, 1)
  475. .OP_END_FACTORY_REG(ShuffleChannel)
  476. /**
  477. * @briefGenerate a tensor of samples from a multinomial
  478. * distribution according to the probabilities of each of
  479. * the possible outcomes.
  480. *
  481. * @par inputs
  482. * one input including:
  483. * @li x:Input tensor with shape [batch_size, class_size],
  484. * where class_size is the number of all possible outcomes.
  485. * Each value along the axis zero represents the unnormalized
  486. * log-probability of each corresponding outcome in a batch.
  487. *
  488. * @par output
  489. * one output including:
  490. * @li y:Output tensor with shape [batch_size, sample_size],
  491. * where sample_size is the number of times to sample.
  492. * Each value along the axis zero represents the outcome of
  493. * the corresponding sample in a batch.
  494. *
  495. * @par Restrictions:
  496. * Warning:THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  497. */
  498. REG_OP(MultinomialFuss)
  499. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_FLOAT64}))
  500. .OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
  501. .ATTR(dtype, Int, 6)
  502. .ATTR(sample_size, Int, 1)
  503. .ATTR(seed, Float, 0)
  504. .OP_END_FACTORY_REG(MultinomialFuss)
  505. /**
  506. * @brief During training, randomly zeroes some of the elements of the input tensor
  507. * with probability
  508. *
  509. * @par Inputs:
  510. * @li x: A ND Tensor. Must be one of the following data types: Float, Float16
  511. * @li seed: A ND Tensor. Must be one of the following data types: Float
  512. *
  513. * @par Attributes:
  514. * @li p: probability of an element to be zeroed
  515. *
  516. * @par Outputs:
  517. * @li y: A tensor with the same shape and type as "x".
  518. * @li mask: A tensor with the same shape and type as "x".
  519. * @li new_seed: A tensor with the same shape and type as "seed".
  520. */
  521. REG_OP(DropoutV2)
  522. .INPUT(x, TensorType({ DT_FLOAT16, DT_FLOAT }))
  523. .INPUT(seed, TensorType({ DT_FLOAT }))
  524. .OUTPUT(y, TensorType({ DT_FLOAT16, DT_FLOAT }))
  525. .OUTPUT(mask, TensorType({ DT_FLOAT }))
  526. .OUTPUT(seed, TensorType({ DT_FLOAT }))
  527. .REQUIRED_ATTR(p, Float)
  528. .OP_END_FACTORY_REG(DropoutV2)
  529. /**
  530. * @brief The Bernoulli distribution with probability . \n
  531. * @par Inputs:
  532. * @li x: A ND Tensor. Must be one of the following data types:
  533. int8, uint8, int16, int32, int64, bool, float32, float64 .
  534. * @li p: A ND Tensor. The probability of an element to be zeroed.
  535. Must be one of the following data types: float32, float64. \n
  536. * @par Attributes:
  537. * seed: An Integer, the seed of the random generator. Default value -1
  538. to use current timestamp, otherwise it should be a positive integer.
  539. * @par Outputs:
  540. * y: A tensor with the same shape and type as "x".
  541. */
  542. REG_OP(Bernoulli)
  543. .INPUT(x, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE}))
  544. .INPUT(p, TensorType({ DT_FLOAT, DT_DOUBLE }))
  545. .OUTPUT(y, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE}))
  546. .ATTR(seed, Int, -1)
  547. .OP_END_FACTORY_REG(Bernoulli)
  548. /**
  549. * @brief: Fill the input tensor with values drawn from the uniform distribution U(from, to). \n
  550. * @par Inputs:
  551. * x: A Tensor. Must be one of the following types: float16, float, double. \n
  552. * @par Attributes:
  553. * @li from: The lower bound of the uniform. Defaults: 0.0
  554. * @li to: The upper bound of the uniform. Defaults: 1.0 \n
  555. * @par Outputs:
  556. * y: A Tensor has the same type as x. \n
  557. */
  558. REG_OP(Uniform)
  559. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  560. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  561. .ATTR(from, Float, 0.0)
  562. .ATTR(to, Float, 1.0)
  563. .OP_END_FACTORY_REG(Uniform)
  564. } // namespace ge
  565. #endif // OPS_BUILT_IN_OP_PROTO_INC_RANDOM_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示