You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

random_ops.h 31 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
5 years ago
3 years ago
3 years ago
5 years ago
5 years ago
3 years ago
3 years ago
5 years ago
3 years ago
3 years ago
5 years ago
3 years ago
5 years ago
3 years ago
3 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
3 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file random_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_RANDOM_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_RANDOM_OPS_H_
  22. #include <vector>
  23. #include "graph/operator_reg.h"
  24. namespace ge {
  25. /**
  26. *@brief Draws samples from a multinomial distribution . \n
  27. *@par Inputs:
  28. *Inputs include:
  29. * @li logits: A Tensor. Must be one of the following types: float16, float, double.
  30. 2-D Tensor with shape [batch_size, num_classes].
  31. * @li num_samples: A Tensor of type int32. 0-D. Number of independent samples to draw for each row slice . \n
  32. *@par Attributes:
  33. *@li output_dtype: An optional type from: int32, int64. Defaults to int64.
  34. *@li seed: An optional int. Defaults to 0.
  35. *@li seed2: An optional int. Defaults to 0 . \n
  36. *@par Outputs:
  37. *y_indices: A Tensor of type output_dtype . \n
  38. *@attention Constraints:
  39. *The implementation for Multinomial on Ascend uses AICPU, with bad performance.
  40. *@par Third-party framework compatibility
  41. *@li compatible with tensorflow Multinomial operator.
  42. */
  43. REG_OP(Multinomial)
  44. .INPUT(logits, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  45. .INPUT(num_samples, TensorType({DT_INT32}))
  46. .OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
  47. .ATTR(dtype, Type, DT_INT64)
  48. .ATTR(seed, Int, 0)
  49. .ATTR(seed2, Int, 0)
  50. .OP_END_FACTORY_REG(Multinomial)
  51. /**
  52. *@brief Creates a multinomial distribution. \n
  53. *@par Inputs:
  54. *Inputs include:
  55. * @li q: A Tensor. Must be one of the following types: float, double.
  56. 1-D Tensor with shape [num_classes].
  57. * @li j: A Tensor. Must be one of the following types: int64.
  58. 1-D Tensor with shape [num_classes].
  59. * @li num_samples: A Tensor of type int32. 0-D. Number of independent samples to draw for each row slice . \n
  60. *@par Attributes:
  61. *@li output_dtype: An optional type from: int32, int64. Defaults to int64.
  62. *@li seed: An optional int. Defaults to 0.
  63. *@li seed2: An optional int. Defaults to 0. \n
  64. *@par Outputs:
  65. *y: A Tensor of type int32 or int64. \n
  66. *@attention Constraints:
  67. *The implementation for MultinomialAliasDraw on Ascend uses AICPU, with bad performance.
  68. *@par Third-party framework compatibility
  69. *@li compatible with torch _multinomial_alias_draw operator.
  70. */
  71. REG_OP(MultinomialAliasDraw)
  72. .INPUT(q, TensorType({DT_FLOAT, DT_DOUBLE}))
  73. .INPUT(j, TensorType({DT_INT64}))
  74. .OUTPUT(y, TensorType({DT_INT64}))
  75. .REQUIRED_ATTR(num_samples, Int)
  76. .ATTR(seed, Int, 0)
  77. .OP_END_FACTORY_REG(MultinomialAliasDraw)
  78. /**
  79. *@brief Prepares for MultinomialAliasDraw to create a multinomial distribution. \n
  80. *@par Inputs:
  81. *Inputs include:
  82. * @li probs: A Tensor. Must be one of the following types: float, double.
  83. 1-D Tensor with shape [num_classes]. \n
  84. *@par Outputs:
  85. *j: A Tensor. Must be one of the following types: int64.
  86. 1-D Tensor with shape [num_classes].
  87. *q: A Tensor. Must be one of the following types: float, double.
  88. 1-D Tensor with shape [num_classes]. \n
  89. *@attention Constraints:
  90. *The implementation for MultinomialAliasSetup on Ascend uses AICPU, with bad performance.
  91. *@par Third-party framework compatibility
  92. *@li compatible with torch _multinomial_alias_setup operator.
  93. */
  94. REG_OP(MultinomialAliasSetup)
  95. .INPUT(probs, TensorType({DT_FLOAT, DT_DOUBLE}))
  96. .OUTPUT(j, TensorType({DT_INT64}))
  97. .OUTPUT(q, TensorType({DT_FLOAT, DT_DOUBLE}))
  98. .OP_END_FACTORY_REG(MultinomialAliasSetup)
  99. /**
  100. *@brief Outputs random values from a normal distribution . \n
  101. *@par Inputs:
  102. *Inputs include:
  103. * @li shape: A Tensor. Must be one of the following types: int32, int64.
  104. The shape of the output tensor. Batches are indexed by the 0th dimension.
  105. * @li means: A Tensor. Must be one of the following types: half, bfloat16, float32, float64.
  106. * @li stdevs: A Tensor. Must have the same type as means.
  107. * @li min: A Tensor. Must have the same type as means. The minimum cutoff. May be -infinity.
  108. * @li max: A Tensor. Must have the same type as means . \n
  109. *@par Attributes:
  110. *@li seed: An optional int. Defaults to 0.
  111. *@li seed2: An optional int. Defaults to 0 . \n
  112. *@par Outputs:
  113. *y: A Tensor. Has the same type as means . \n
  114. *@attention Constraints:
  115. *The implementation for ParameterizedTruncatedNormal on Ascend uses AICPU, with bad performance.
  116. *@par Third-party framework compatibility
  117. *@li compatible with tensorflow ParameterizedTruncatedNormal operator.
  118. */
  119. REG_OP(ParameterizedTruncatedNormal)
  120. .INPUT(shape, TensorType({DT_INT32, DT_INT64}))
  121. .INPUT(means, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  122. .INPUT(stdevs, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  123. .INPUT(min, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  124. .INPUT(max, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  125. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  126. .ATTR(seed, Int, 0)
  127. .ATTR(seed2, Int, 0)
  128. .OP_END_FACTORY_REG(ParameterizedTruncatedNormal)
  129. /**
  130. *@brief Computes the derivative of a Gamma random sample w.r.t. alpha . \n
  131. *@par Inputs:
  132. *Inputs include:
  133. * @li alpha: A Tensor. Must be one of the following types: float32, float64.
  134. * @li sample: A Tensor. Must have the same type as alpha . \n
  135. *@par Outputs:
  136. *y: A Tensor. Has the same type as alpha . \n
  137. *@attention Constraints:
  138. *The implementation for RandomGammaGrad on Ascend uses AICPU, with bad performance.
  139. *@par Third-party framework compatibility
  140. *@li compatible with tensorflow RandomGammaGrad operator.
  141. */
  142. REG_OP(RandomGammaGrad)
  143. .INPUT(alpha, TensorType({DT_FLOAT, DT_DOUBLE}))
  144. .INPUT(sample, TensorType({DT_FLOAT, DT_DOUBLE}))
  145. .OUTPUT(y, TensorType({DT_FLOAT, DT_DOUBLE}))
  146. .OP_END_FACTORY_REG(RandomGammaGrad)
  147. /**
  148. *@brief Outputs random values from the Gamma distribution(s) described by alpha . \n
  149. *@par Inputs:
  150. *Inputs include:
  151. * @li shape: A Tensor. Must be one of the following types: int32, int64. 1-D integer tensor.
  152. * @li alpha: A Tensor. Must be one of the following types: half, float32, float64 . \n
  153. *@par Attributes:
  154. *@li seed: An optional int. Defaults to 0.
  155. *@li seed2: An optional int. Defaults to 0 . \n
  156. *@par Outputs:
  157. *y: A Tensor. Has the same type as alpha . \n
  158. *@attention Constraints:
  159. *The implementation for RandomGamma on Ascend uses AICPU, with bad performance.
  160. *@par Third-party framework compatibility
  161. *@li compatible with tensorflow RandomGamma operator.
  162. */
  163. REG_OP(RandomGamma)
  164. .INPUT(shape, TensorType({DT_INT32, DT_INT64}))
  165. .INPUT(alpha, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  166. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  167. .ATTR(seed, Int, 0)
  168. .ATTR(seed2, Int, 0)
  169. .OP_END_FACTORY_REG(RandomGamma)
  170. /**
  171. *@brief Returns the random permutation of integers from 0 to n-1. \n
  172. *@par Attributes:
  173. *@li n: An required int.
  174. *@li dtype: An optional str. Defaults to int64 .
  175. *@li layout: An optional int. Defaults to 0 . \n
  176. *@par Outputs:
  177. *out: A required Tensor. Must be one of the following types:
  178. float16, float32, float32, int8, uint8, int16, int32, int64. \n
  179. *@attention Constraints:
  180. *The implementation for Randperm on Ascend uses AICPU, with bad performance.
  181. *@par Third-party framework compatibility
  182. *@li compatible with Pytorch Randperm operator.
  183. */
  184. REG_OP(Randperm)
  185. .OUTPUT(out, TensorType({DT_INT64, DT_INT32, DT_INT16,
  186. DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT32, DT_DOUBLE}))
  187. .REQUIRED_ATTR(n, Int)
  188. .ATTR(layout, Int, 0)
  189. .ATTR(dtype, Type, DT_INT64)
  190. .OP_END_FACTORY_REG(Randperm)
  191. /**
  192. *@brief Fills a tensor with elements drawn from the poisson distribution. \n
  193. *@par Inputs:
  194. *x: A Tensor. Must be one of the following types: float16, float. \n
  195. *@par Attributes:
  196. *@li seed: An optional int. Defaults to 0. \n
  197. *@par Outputs:
  198. *y: A Tensor list with same type as "x" . \n
  199. *@par Third-party framework compatibility
  200. *@ Compatible with the Pytorch operator Poisson.
  201. */
  202. REG_OP(Poisson)
  203. .INPUT(x, TensorType({ DT_FLOAT16,DT_FLOAT }))
  204. .OUTPUT(y, TensorType({ DT_FLOAT16,DT_FLOAT }))
  205. .ATTR(seed, Int, 0)
  206. .OP_END_FACTORY_REG(Poisson)
  207. /**
  208. *@brief Outputs random values from the Poisson distribution(s) described by rate . \n
  209. *@par Inputs:
  210. *Inputs include:
  211. * @li shape: A Tensor. Must be one of the following types: int32, int64. 1-D integer tensor.
  212. * @li rate: A Tensor. Must be one of the following types: half, float32, float64, int32, int64 . \n
  213. *@par Attributes:
  214. *@li dtype: An optional type from: half, float32, float64, int32, int64. Defaults to int64.
  215. *@li seed: An optional int. Defaults to 0. If either seed or seed2 are set to be non-zero,
  216. the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed.
  217. *@li seed2: An optional int. Defaults to 0 . A second seed to avoid seed collision. \n
  218. *@par Outputs:
  219. *y: A Tensor of type dtype float16, float, double, int32, int64. \n
  220. *@attention Constraints:
  221. *The implementation for RandomPoisson on Ascend uses AICPU, with bad performance.
  222. *@par Third-party framework compatibility
  223. *@li compatible with tensorflow RandomPoisson operator.
  224. */
  225. REG_OP(RandomPoisson)
  226. .INPUT(shape, TensorType({DT_INT32, DT_INT64}))
  227. .INPUT(rate, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  228. DT_INT32, DT_INT64}))
  229. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE, \
  230. DT_INT32, DT_INT64}))
  231. .ATTR(dtype, Type, DT_INT64)
  232. .ATTR(seed, Int, 0)
  233. .ATTR(seed2, Int, 0)
  234. .OP_END_FACTORY_REG(RandomPoisson)
  235. /**
  236. *@brief Randomly shuffles a tensor along its first dimension . \n
  237. *@par Inputs:
  238. *Inputs include:
  239. *x: A Tensor. The tensor to be shuffled . \n
  240. *@par Attributes:
  241. *@li seed: An optional int. Defaults to 0. If either seed or seed2 are set to be non-zero,
  242. the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed.
  243. *@li seed2: An optional int. Defaults to 0 . A second seed to avoid seed collision. \n
  244. *@par Outputs:
  245. *y: A Tensor. Has the same type as x . A Tensor of type float16, float,
  246. *double, int32, int64, int16, uint16, int8, uint8, int32,int64. \n
  247. *@attention Constraints:
  248. *The implementation for RandomShuffle on Ascend uses AICPU, with bad performance.
  249. *@par Third-party framework compatibility
  250. *@li compatible with tensorflow RandomShuffle operator.
  251. */
  252. REG_OP(RandomShuffle)
  253. .INPUT(x, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16,
  254. DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64,
  255. DT_COMPLEX128, DT_BOOL, DT_STRING, DT_RESOURCE}))
  256. .OUTPUT(y, TensorType({DT_INT64, DT_INT32, DT_UINT16, DT_INT16,
  257. DT_UINT8, DT_INT8, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64,
  258. DT_COMPLEX128, DT_BOOL, DT_STRING, DT_RESOURCE}))
  259. .ATTR(seed, Int, 0)
  260. .ATTR(seed2, Int, 0)
  261. .OP_END_FACTORY_REG(RandomShuffle)
  262. /**
  263. *@brief Outputs random values from a normal distribution . \n
  264. *@par Inputs:
  265. *Inputs include:
  266. *shape: A Tensor. Must be one of the following types: int32, int64. The shape of the output tensor . \n
  267. *@par Attributes:
  268. *@li dtype: A type from: half, float16, float32, float64. The type of the output.
  269. *@li seed: An optional int. Defaults to 0. If either seed or seed2 are set to be non-zero,
  270. the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed.
  271. *@li seed2: An optional int. Defaults to 0 . A second seed to avoid seed collision. \n
  272. *@par Outputs:
  273. *y: A Tensor of type float32, float16, double. \n
  274. *@attention Constraints:
  275. *The implementation for RandomStandardNormal on Ascend uses AICPU, with bad performance.
  276. *@par Third-party framework compatibility
  277. *@li compatible with tensorflow RandomStandardNormal operator.
  278. */
  279. REG_OP(RandomStandardNormal)
  280. .INPUT(shape, TensorType({DT_INT32, DT_INT64}))
  281. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  282. .REQUIRED_ATTR(dtype, Type)
  283. .ATTR(seed, Int, 0)
  284. .ATTR(seed2, Int, 0)
  285. .OP_END_FACTORY_REG(RandomStandardNormal)
  286. /**
  287. *@brief Output random value from separate normal distribution. \n
  288. *@par Inputs:
  289. *Inputs include:
  290. *mean: The mean is a tensor with the mean of each output element’s normal distribution .
  291. *std: The std is a tensor with the standard deviation of each output element’s normal distribution. \n
  292. *@par Outputs:
  293. *y: A Tensor of type dtype . \n
  294. *@attention Constraints:
  295. *The implementation for Normal on Ascend uses AICPU, with bad performance.
  296. *@par Third-party framework compatibility
  297. *@li compatible with Pytorch Normal operator.
  298. */
  299. REG_OP(Normal)
  300. .INPUT(mean, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  301. .INPUT(std, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  302. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  303. .OP_END_FACTORY_REG(Normal)
  304. /**
  305. *@brief Outputs random integers from a uniform distribution . \n
  306. *@par Inputs:
  307. *Inputs include:
  308. * @li shape: A Tensor. Must be one of the following types: int32, int64. The shape of the output tensor.
  309. * @li min: A Tensor. Must be one of the following types: int32, int64. 0-D.
  310. * @li max: A Tensor. Must have the same type as minval. 0-D . \n
  311. *@par Attributes:
  312. *@li seed: An optional int. Defaults to 0. If either seed or seed2 are set to be non-zero,
  313. the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed.
  314. *@li seed2: An optional int. Defaults to 0 . A second seed to avoid seed collision. \n
  315. *@par Outputs:
  316. *y: A Tensor. Has the same type as min . \n
  317. *@attention Constraints:
  318. *The implementation for RandomUniformInt on Ascend uses AICPU, with bad performance.
  319. *@par Third-party framework compatibility
  320. *@li compatible with tensorflow RandomUniformInt operator.
  321. */
  322. REG_OP(RandomUniformInt)
  323. .INPUT(shape, TensorType({DT_INT32, DT_INT64}))
  324. .INPUT(min, TensorType({DT_INT32, DT_INT64}))
  325. .INPUT(max, TensorType({DT_INT32, DT_INT64}))
  326. .OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
  327. .ATTR(seed, Int, 0)
  328. .ATTR(seed2, Int, 0)
  329. .OP_END_FACTORY_REG(RandomUniformInt)
  330. /**
  331. *@brief Outputs random values from a uniform distribution . \n
  332. *@par Inputs:
  333. *Inputs include:
  334. *shape: A Tensor. Must be one of the following types: int32, int64. The shape of the output tensor . \n
  335. *@par Attributes:
  336. *@li dtype: A type from: half, float16, float32, float64. The type of the output.
  337. *@li seed: An optional int. Defaults to 0. If either seed or seed2 are set to be non-zero,
  338. the random number generator is seeded by the given seed. Otherwise, it is seeded by a random seed.
  339. *@li seed2: An optional int. Defaults to 0 . A second seed to avoid seed collision. \n
  340. *@par Outputs:
  341. *y: A Tensor of type dtype . \n
  342. *@attention Constraints:
  343. *The implementation for RandomUniform on Ascend uses AICPU, with bad performance.
  344. *@par Third-party framework compatibility
  345. *@li compatible with tensorflow RandomUniform operator.
  346. */
  347. REG_OP(RandomUniform)
  348. .INPUT(shape, TensorType({DT_INT32, DT_INT64}))
  349. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  350. .REQUIRED_ATTR(dtype, Type)
  351. .ATTR(seed, Int, 0)
  352. .ATTR(seed2, Int, 0)
  353. .OP_END_FACTORY_REG(RandomUniform)
  354. /**
  355. *@brief Outputs random values from a truncated normal distribution . \n
  356. *@par Inputs:
  357. *Inputs include:
  358. *shape: A Tensor. Must be one of the following types: int32, int64 . \n
  359. *@par Attributes:
  360. *@li seed: An optional int. Defaults to 0.If either `seed` or `seed2`
  361. are set to be non-zero, the random number generator is seeded by the given
  362. seed. Otherwise, it is seeded by a random seed.
  363. *@li seed2: An optional int. Defaults to 0 . A second seed to avoid seed collision. \n
  364. *@par Outputs:
  365. *y: A Tensor of types: float16, float32, double . A tensor of the specified shape
  366. filled with random truncated normal values. \n
  367. *@attention Constraints:
  368. *The implementation for TruncatedNormal on Ascend uses AICPU, with bad performance.
  369. *@par Third-party framework compatibility
  370. *@li compatible with tensorflow TruncatedNormal operator.
  371. */
  372. REG_OP(TruncatedNormal)
  373. .INPUT(shape, TensorType({ DT_INT32, DT_INT64 }))
  374. .OUTPUT(y, TensorType({ DT_FLOAT16, DT_FLOAT, DT_DOUBLE }))
  375. .ATTR(seed, Int, 0)
  376. .ATTR(seed2, Int, 0)
  377. .OP_END_FACTORY_REG(TruncatedNormal)
  378. /**
  379. *@brief Generate random bit mask for dropout . \n
  380. *@par Inputs:
  381. include:
  382. *@li shape:The shape of the output tensor.
  383. *@li prob:0-D. Number of bit 1 . \n
  384. *@par Attributes:
  385. *@li seed:If either seed or seed2 are set to be non-zero, the random number
  386. *generator is seeded by the given seed. Otherwise, it is seeded by a random seed.
  387. *@li seed2:A second seed to avoid seed collision . \n
  388. *@par Outputs:
  389. *y:Output (1-D) random number using uint data format . \n
  390. *@attention Constraints:
  391. *The output is aligned with 128 bits
  392. *@see DropOutGenMask()
  393. */
  394. REG_OP(DropOutGenMask)
  395. .INPUT(shape, TensorType({ DT_INT32, DT_INT64 }))
  396. .INPUT(prob, TensorType({ DT_FLOAT16, DT_FLOAT }))
  397. .OUTPUT(y, TensorType({ DT_UINT8 }))
  398. .ATTR(seed, Int, 0)
  399. .ATTR(seed2, Int, 0)
  400. .OP_END_FACTORY_REG(DropOutGenMask)
  401. /**
  402. *@brief Generate random uint8 mask for dropout v3 . \n
  403. *@par Inputs:
  404. include:
  405. *@li shape:The shape of the output tensor.
  406. *@li prob:0-D. Prob of 1 . \n
  407. *@par Attributes:
  408. *@li seed:If either seed or seed2 are set to be non-zero, the random number
  409. *generator is seeded by the given seed. Otherwise, it is seeded by a random seed.
  410. *@li seed2:A second seed to avoid seed collision . \n
  411. *@par Outputs:
  412. *y:Output (1-D) random number using uint8 data format . \n
  413. *@attention Constraints:
  414. *The output is aligned with 16
  415. *@par Restrictions:
  416. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  417. *@see DropOutGenMaskV3()
  418. */
  419. REG_OP(DropOutGenMaskV3)
  420. .INPUT(shape, TensorType({ DT_INT32, DT_INT64 }))
  421. .INPUT(prob, TensorType({ DT_FLOAT16, DT_FLOAT }))
  422. .OUTPUT(y, TensorType({ DT_UINT8 }))
  423. .ATTR(seed, Int, 0)
  424. .ATTR(seed2, Int, 0)
  425. .OP_END_FACTORY_REG(DropOutGenMaskV3)
  426. /**
  427. * @brief Generate stateless random bit mask for dropout . \n
  428. * @par Inputs:
  429. include:
  430. * @li shape:The shape of the output tensor.
  431. * @li prob:0-D. Number of bit 1 . \n
  432. * @li seed:Frist seed to avoid seed collision.
  433. * @li seed1:Second seed to avoid seed collision . \n
  434. * @li offset:Initial offset of random number . \n
  435. * @par Outputs:
  436. *y:Output (1-D) random number using uint data format . \n
  437. * @attention Constraints:
  438. *The output is aligned with 128 bits
  439. * @see StatelessDropOutGenMask()
  440. */
  441. REG_OP(StatelessDropOutGenMask)
  442. .INPUT(shape, TensorType({ DT_INT32, DT_INT64 }))
  443. .INPUT(prob, TensorType({ DT_FLOAT16, DT_FLOAT }))
  444. .INPUT(seed, TensorType({ DT_INT32, DT_INT64 }))
  445. .INPUT(seed1, TensorType({ DT_INT32, DT_INT64 }))
  446. .OPTIONAL_INPUT(offset, TensorType({ DT_INT64 }))
  447. .OUTPUT(y, TensorType({ DT_UINT8 }))
  448. .OP_END_FACTORY_REG(StatelessDropOutGenMask)
  449. /**
  450. * @brief Generate bernoulli distribution for tensor input . \n
  451. * @par Inputs:
  452. include:
  453. * @li shape:The shape of the output tensor. A Tensor of type int32, int64.
  454. * @li prob:0-D. Number of bit 1 . \n
  455. * @li seed:If seed is set to be -1, and offset is set to be 0, the random number
  456. * generator is seeded by arandom seed. Otherwise, it is seeded by the given seed.
  457. * @li offset:To avoid seed collision . \n
  458. * @par Outputs:
  459. * y:A Tensor. A Tensor of type int8, uint8, int16, uint16,
  460. * int32, uint32, int64, uint64, bool, float16, float, double, bf16. \n
  461. */
  462. REG_OP(StatelessBernoulli)
  463. .INPUT(shape, TensorType({ DT_INT32, DT_INT64}))
  464. .INPUT(prob, TensorType({ DT_FLOAT16, DT_FLOAT, DT_DOUBLE }))
  465. .INPUT(seed, TensorType({ DT_INT64 }))
  466. .INPUT(offset, TensorType({ DT_INT64 }))
  467. .OUTPUT(y, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_UINT16, DT_INT32, DT_UINT32,
  468. DT_INT64, DT_UINT64, DT_BOOL, DT_FLOAT16, DT_FLOAT, DT_DOUBLE, DT_BF16}))
  469. .OP_END_FACTORY_REG(StatelessBernoulli)
  470. /**
  471. *@brief Generates values in an interval . \n
  472. *@par Inputs:
  473. * Four ND inputs, including:
  474. *@li assist: A 1D Tensor of type float32.
  475. *@li start: A 1D Tensor of type float32, for the first entry in the range.
  476. *@li stop: A 1D Tensor of type float32, for the last entry in the range.
  477. *@li num: A 1D Tensor of type int32 or int64, for the common difference of the entries . \n
  478. *@par Outputs:
  479. *output_op: A 1D Tensor of type float32 . \n
  480. *@attention Constraints:
  481. * "input_assist" is a sequence of "input_num" evenly-spaced values beginning at 0 with an common difference of 1 . \n
  482. *@par Third-party framework compatibility
  483. * Compatible with the TensorFlow operator lin_space.
  484. *
  485. * @par Restrictions:
  486. * Warning: THIS FUNCTION IS DEPRECATED. Please use LinSpace instead.
  487. */
  488. REG_OP(LinSpaceD)
  489. .INPUT(assist, TensorType({DT_FLOAT}))
  490. .INPUT(start, TensorType({DT_FLOAT}))
  491. .INPUT(stop, TensorType({DT_FLOAT}))
  492. .INPUT(num, TensorType::IndexNumberType())
  493. .OUTPUT(output, TensorType({DT_FLOAT}))
  494. .OP_END_FACTORY_REG(LinSpaceD)
  495. /**
  496. *@brief Generates values in an interval . \n
  497. *@par Inputs:
  498. * Four ND inputs, including:
  499. *@li start: A 1D Tensor of type float32, for the first entry in the range.
  500. *@li stop: A 1D Tensor of type float32, for the last entry in the range.
  501. *@li num: A 1D Tensor of type int32 or int64, for the common difference of the entries . \n
  502. *@par Outputs:
  503. *output_op: A 1D Tensor of type float32 . \n
  504. *@attention Constraints:
  505. * "input_assist" is a sequence of "input_num" evenly-spaced values beginning at 0 with an common difference of 1 . \n
  506. *@par Third-party framework compatibility
  507. * Compatible with the TensorFlow operator lin_space.
  508. */
  509. REG_OP(LinSpace)
  510. .INPUT(start, TensorType({DT_FLOAT, DT_DOUBLE}))
  511. .INPUT(stop, TensorType({DT_FLOAT, DT_DOUBLE}))
  512. .INPUT(num, TensorType::IndexNumberType())
  513. .OUTPUT(output, TensorType({DT_FLOAT, DT_DOUBLE}))
  514. .OP_END_FACTORY_REG(LinSpace)
  515. /**
  516. *@brief The dropout operator randomly sets (according to the given dropout probability)
  517. *the outputs of some units to zero, while others are remain unchanged. . \n
  518. *@par Inputs:
  519. *One input, including:
  520. *@li x:The input tensor variable. The data type is float32. \n
  521. *@par Attributes:
  522. *@li dropout_ratio:Float between 0 and 1. Fraction of the input units to drop.Defaults to "0.5".
  523. *@li scale_train: Bool,default to true.
  524. *@li alpha: An optional float32. A scaling factor. Defaults to "1.0".
  525. *@li beta: An optional float32. An exponent. Defaults to "0.0". \n
  526. *@par Outputs:
  527. *y: A Variable holding Tensor representing the dropout, has same shape and data type with x. \n
  528. */
  529. REG_OP(Dropout)
  530. .INPUT(x, TensorType{DT_FLOAT})
  531. .OUTPUT(y, TensorType{DT_FLOAT})
  532. .ATTR(dropout_ratio, Float, 0.5)
  533. .ATTR(scale_train, Bool, true)
  534. .ATTR(alpha, Float, 1.0)
  535. .ATTR(beta, Float, 0.0)
  536. .OP_END_FACTORY_REG(Dropout)
  537. /**
  538. *@brief Shuffle index of no-zero element . \n
  539. *@par Inputs:
  540. include:
  541. *x:A tensor <= 5-D . \n
  542. *@par Attributes:
  543. *@li count:the count of output, if 0, out all no-zero elements.
  544. *@li seed:If either seed or seed2 are set to be non-zero, the random number generator is seeded by the given seed.
  545. Otherwise, it is seeded by a random seed.
  546. *@li seed2:A second seed to avoid seed collision . \n
  547. *@par Outputs:
  548. *@li y:2-D tensor, no-zero element index.
  549. *@li mask:1-D, whether the corresponding index is valid . \n
  550. *@see RandomChoiceWithMask()
  551. */
  552. REG_OP(RandomChoiceWithMask)
  553. .INPUT(x, TensorType({DT_BOOL}))
  554. .OUTPUT(y, TensorType({DT_INT32}))
  555. .OUTPUT(mask, TensorType({DT_BOOL}))
  556. .ATTR(count, Int, 0)
  557. .ATTR(seed, Int, 0)
  558. .ATTR(seed2, Int, 0)
  559. .OP_END_FACTORY_REG(RandomChoiceWithMask)
  560. /**
  561. *@brief Permutes data in the channel dimension of the input
  562. *@par Inputs:
  563. *Inputs including:
  564. * x: A required Tensor. Must be one of the following types:
  565. float16, float32, int8, uint8, int16, uint16, int32, uint32, int64, uint64 . \n
  566. *@par Attributes:
  567. * group: A required int32, specifying the number of groups to split the channel dimension into. Defaults to "1" . \n
  568. *@par Outputs:
  569. * y: A required Tensor. Has same type and shape as "x". Must be one of the following types:
  570. float16, float32, int8, uint8, int16, uint16, int32, uint32, int64, uint64 . \n
  571. *@attention Constraints:
  572. *@li "group" must be greater than 0 and must evenly divide the channel dimension size.
  573. *@li The format of input "x" must be NCHW.
  574. *@par Third-party framework compatibility
  575. * Compatible with the Caffe operator ShuffleChannel.
  576. */
  577. REG_OP(ShuffleChannel)
  578. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT8, DT_UINT8, DT_INT16,
  579. DT_UINT16, DT_INT32, DT_UINT32,DT_INT64,DT_UINT64}))
  580. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT,DT_INT8, DT_UINT8, DT_INT16,
  581. DT_UINT16, DT_INT32, DT_UINT32,DT_INT64,DT_UINT64}))
  582. .ATTR(group, Int, 1)
  583. .OP_END_FACTORY_REG(ShuffleChannel)
  584. /**
  585. * @briefGenerate a tensor of samples from a multinomial
  586. * distribution according to the probabilities of each of
  587. * the possible outcomes.
  588. *
  589. * @par inputs
  590. * one input including:
  591. * @li x:Input tensor with shape [batch_size, class_size],
  592. * where class_size is the number of all possible outcomes.
  593. * Each value along the axis zero represents the unnormalized
  594. * log-probability of each corresponding outcome in a batch.
  595. *
  596. * @par output
  597. * one output including:
  598. * @li y:Output tensor with shape [batch_size, sample_size],
  599. * where sample_size is the number of times to sample.
  600. * Each value along the axis zero represents the outcome of
  601. * the corresponding sample in a batch.
  602. *
  603. * @par Restrictions:
  604. * Warning:THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  605. */
  606. REG_OP(MultinomialFuss)
  607. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_FLOAT64}))
  608. .OUTPUT(y, TensorType({DT_INT32, DT_INT64}))
  609. .ATTR(dtype, Int, 6)
  610. .ATTR(sample_size, Int, 1)
  611. .ATTR(seed, Float, 0)
  612. .OP_END_FACTORY_REG(MultinomialFuss)
  613. /**
  614. * @brief During training, randomly zeroes some of the elements of the input tensor
  615. * with probability
  616. *
  617. * @par Inputs:
  618. * @li x: A ND Tensor. Must be one of the following data types: Float, Float16
  619. * @li seed: A ND Tensor. Must be one of the following data types: Float
  620. *
  621. * @par Attributes:
  622. * @li p: probability of an element to be zeroed
  623. *
  624. * @par Outputs:
  625. * @li y: A tensor with the same shape and type as "x".
  626. * @li mask: A tensor with the same shape and type as "x".
  627. * @li new_seed: A tensor with the same shape and type as "seed".
  628. */
  629. REG_OP(DropoutV2)
  630. .INPUT(x, TensorType({ DT_FLOAT16, DT_FLOAT }))
  631. .INPUT(seed, TensorType({ DT_FLOAT }))
  632. .OUTPUT(y, TensorType({ DT_FLOAT16, DT_FLOAT }))
  633. .OUTPUT(mask, TensorType({ DT_FLOAT }))
  634. .OUTPUT(seed, TensorType({ DT_FLOAT }))
  635. .REQUIRED_ATTR(p, Float)
  636. .OP_END_FACTORY_REG(DropoutV2)
  637. /**
  638. * @brief The Bernoulli distribution with probability . \n
  639. * @par Inputs:
  640. * @li x: A ND Tensor. Must be one of the following data types:
  641. int8, uint8, int16, int32, int64, bool, float32, float64 .
  642. * @li p: A ND Tensor. The probability of an element to be zeroed.
  643. Must be one of the following data types: float32, float64. \n
  644. * @par Attributes:
  645. * seed: An Integer, the seed of the random generator. Default value -1
  646. to use current timestamp, otherwise it should be a positive integer.
  647. * @par Outputs:
  648. * y: A tensor with the same shape and type as "x".
  649. */
  650. REG_OP(Bernoulli)
  651. .INPUT(x, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE}))
  652. .INPUT(p, TensorType({ DT_FLOAT, DT_DOUBLE }))
  653. .OUTPUT(y, TensorType({ DT_INT8, DT_UINT8, DT_INT16, DT_INT32, DT_INT64, DT_BOOL, DT_FLOAT, DT_DOUBLE}))
  654. .ATTR(seed, Int, -1)
  655. .OP_END_FACTORY_REG(Bernoulli)
  656. /**
  657. * @brief: Fill the input tensor with values drawn from the uniform distribution U(from, to). \n
  658. * @par Inputs:
  659. * x: A Tensor. Must be one of the following types: float16, float, double. \n
  660. * @par Attributes:
  661. * @li from: The lower bound of the uniform. Defaults: 0.0
  662. * @li to: The upper bound of the uniform. Defaults: 1.0 \n
  663. * @par Outputs:
  664. * y: A Tensor has the same type as x. \n
  665. */
  666. REG_OP(Uniform)
  667. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  668. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  669. .ATTR(from, Float, 0.0)
  670. .ATTR(to, Float, 1.0)
  671. .OP_END_FACTORY_REG(Uniform)
  672. /**
  673. *@brief Outputs integers consisting of 0 and 1, used for lstm etc. \n
  674. *@par Inputs
  675. * @li time_step: A tensor with data type int64. 0-D.
  676. * @li batch_size: A tensor with data type int64. 0-D.
  677. *@par Outputs:
  678. *y: A Tensor. Has the type float16 or float, 2-D, [time_step,batch_size]. \n
  679. *@attention Constraints:
  680. * Compatible with the Caffe operator ContinuationIndicator.
  681. */
  682. REG_OP(ContinuationIndicator)
  683. .REQUIRED_ATTR(time_step, Int)
  684. .REQUIRED_ATTR(batch_size, Int)
  685. .OUTPUT(y, TensorType({DT_FLOAT}))
  686. .OP_END_FACTORY_REG(ContinuationIndicator)
  687. /**
  688. *@brief Outputs random values from the Exponential distribution(s) described by rate . \n
  689. *@par Inputs:
  690. *Inputs include:
  691. * @li x: A Tensor. Must be one of the following types: half, float32, float64. \n
  692. *@par Attributes:
  693. *@li lambda: An optional float. Defaults to 1.
  694. *@li seed: An optional int. Defaults to 0.The random number generator is seeded by the given seed.
  695. Otherwise, it is seeded by a random seed. \n
  696. *@par Outputs:
  697. *y: A Tensor of type dtype float16, float, double. \n
  698. *@attention Constraints:
  699. *The implementation for Exponential on Ascend uses AICPU, with bad performance.
  700. *@par Third-party framework compatibility
  701. *@li compatible with tensorflow Exponential operator.
  702. */
  703. REG_OP(Exponential)
  704. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  705. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  706. .ATTR(lambda, Float, 1)
  707. .ATTR(seed, Int, 0)
  708. .OP_END_FACTORY_REG(Exponential)
  709. /**
  710. *@brief Fills a tensor with elements drawn from the geometric distribution. \n
  711. *@par Inputs:
  712. *x: A Tensor. Must be one of the following types: float16, float. \n
  713. *@par Attributes:
  714. *@li p: The probability of experimental success in Bernoulli's experiment.
  715. *@li seed: An optional int. Defaults to 0. \n
  716. *@par Outputs:
  717. *y: A Tensor list with same type as "x" . \n
  718. *@par Third-party framework compatibility
  719. *@ Compatible with the Pytorch operator Geometric.
  720. */
  721. REG_OP(Geometric)
  722. .INPUT(x, TensorType({ DT_FLOAT16,DT_FLOAT }))
  723. .OUTPUT(y, TensorType({ DT_FLOAT16,DT_FLOAT }))
  724. .REQUIRED_ATTR(p, Float)
  725. .ATTR(seed, Int, 0)
  726. .OP_END_FACTORY_REG(Geometric)
  727. } // namespace ge
  728. #endif // OPS_BUILT_IN_OP_PROTO_INC_RANDOM_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示