You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

nonlinear_fuc_ops.h 30 kB

5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file nonlinear_fuc_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_NONLINEAR_FUC_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_NONLINEAR_FUC_OPS_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. *@brief Computes the for the gelu of "x" . \n
  26. *@par Inputs:
  27. *Two inputs, including:
  28. * @li x: A Tensor. Must be one of the following types: float16, float32
  29. *@par Outputs:
  30. *y: A Tensor. Has the same type as "x".
  31. *@par Third-party framework compatibility
  32. *Compatible with the TensorFlow operator Gelu
  33. */
  34. REG_OP(Gelu)
  35. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  36. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  37. .OP_END_FACTORY_REG(Gelu)
  38. /**
  39. *@brief Computes the gradient for the gelu of "x" . \n
  40. *@par Inputs:
  41. *Three inputs, including:
  42. * @li dy: A Tensor. Must be one of the following types: float16, float32
  43. * @li x: A Tensor of the same type as "dy".
  44. * @li y: A Tensor of the same type as "dy" . \n
  45. *@par Outputs:
  46. *z: A Tensor. Has the same type as "dy".
  47. *@par Third-party framework compatibility
  48. *Compatible with the TensorFlow operator GeluGrad
  49. */
  50. REG_OP(GeluGrad)
  51. .INPUT(dy, TensorType({DT_FLOAT16, DT_FLOAT}))
  52. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  53. .INPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  54. .OUTPUT(z, TensorType({DT_FLOAT16, DT_FLOAT}))
  55. .OP_END_FACTORY_REG(GeluGrad)
  56. /**
  57. *@brief Computes the for the fast_gelu of "x" . \n
  58. *@par Inputs:
  59. *Two inputs, including:
  60. * @li x: A Tensor. Must be one of the following types: float16, float32
  61. *@par Outputs:
  62. *y: A Tensor. Has the same type as "x".
  63. *@par Third-party framework compatibility
  64. *Compatible with the TensorFlow operator FastGelu
  65. */
  66. REG_OP(FastGelu)
  67. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  68. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  69. .OP_END_FACTORY_REG(FastGelu)
  70. /**
  71. *@brief Computes the gradient for the fast_gelu of "x" . \n
  72. *@par Inputs:
  73. *Three inputs, including:
  74. * @li dy: A Tensor. Must be one of the following types: float16, float32
  75. * @li x: A Tensor of the same type as "dy" . \n
  76. *@par Outputs:
  77. *z: A Tensor. Has the same type as "dy".
  78. *@par Third-party framework compatibility
  79. *Compatible with the TensorFlow operator FastGeluGrad
  80. */
  81. REG_OP(FastGeluGrad)
  82. .INPUT(dy, TensorType({DT_FLOAT16, DT_FLOAT}))
  83. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  84. .OUTPUT(z, TensorType({DT_FLOAT16, DT_FLOAT}))
  85. .OP_END_FACTORY_REG(FastGeluGrad)
  86. /**
  87. *@brief Computes the gradient for the tanh of "x" . \n
  88. *@par Inputs:
  89. *Two inputs, including:
  90. * @li y: A Tensor. Must be one of the following types: float16, float32,
  91. * double, complex64, complex128.
  92. * @li dy: A Tensor of the same type as "y" . \n
  93. *@par Outputs:
  94. *z: A Tensor. Has the same type as "y".
  95. *@par Third-party framework compatibility
  96. *Compatible with the TensorFlow operator TanhGrad.
  97. */
  98. REG_OP(TanhGrad)
  99. .INPUT(y, TensorType::UnaryDataType())
  100. .INPUT(dy, TensorType::UnaryDataType())
  101. .OUTPUT(z, TensorType::UnaryDataType())
  102. .OP_END_FACTORY_REG(TanhGrad)
  103. /**
  104. *@brief: Computes hyperbolic tangent of "x" element-wise . \n
  105. *@par Inputs:
  106. *One input:
  107. *x: A Tensor. Must be one of the following types: float16, float32, complex64, complex128, double . \n
  108. *@par Outputs:
  109. *y: A Tensor. Has the same type as "x" . \n
  110. *@par Third-party framework compatibility
  111. * Compatible with TensorFlow operator Tanh.
  112. */
  113. REG_OP(Tanh)
  114. .INPUT(x, TensorType::UnaryDataType())
  115. .OUTPUT(y, TensorType::UnaryDataType())
  116. .OP_END_FACTORY_REG(Tanh)
  117. /**
  118. * @brief Computes rectified linear: "max(x, 0)".
  119. *
  120. * @par Inputs:
  121. * x: A tensor. Must be one of the following types: float32, float64, int32, uint8,
  122. * int16, int8, int64, uint16, float16, qint8.
  123. *
  124. * @par Outputs:
  125. * y: A tensor. Has the same type as "x".
  126. *
  127. * @par Third-party framework compatibility
  128. * @li Compatible with the TensorFlow operator Relu.
  129. * @li Compatible with the Caffe operator ReLULayer.
  130. *
  131. */
  132. REG_OP(Relu)
  133. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE,
  134. DT_INT8, DT_INT32, DT_INT16, DT_INT64,
  135. DT_UINT8, DT_UINT16, DT_QINT8}))
  136. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE,
  137. DT_INT8, DT_INT32, DT_INT16, DT_INT64,
  138. DT_UINT8, DT_UINT16, DT_QINT8}))
  139. .OP_END_FACTORY_REG(Relu)
  140. /**
  141. * @brief Computes rectified linear 6.
  142. * activations = min(max(x, 0), 6) . \n
  143. * @par Inputs:
  144. * x: A Tensor of type RealNumberType . \n
  145. * @par Outputs:
  146. * y: A Tensor of type RealNumberType . \n
  147. * @par Third-party framework compatibility
  148. * Compatible with the TensorFlow operator Relu6.
  149. */
  150. REG_OP(Relu6)
  151. .INPUT(x, TensorType::RealNumberType())
  152. .OUTPUT(y, TensorType::RealNumberType())
  153. .OP_END_FACTORY_REG(Relu6)
  154. /**
  155. * @brief Computes rectified linear 6*scale.
  156. * activations = min(max(x, 0), 6*scale) . \n
  157. * @par Inputs:
  158. * x: A Tensor of type RealNumberType . \n
  159. * @par Attributes:
  160. * epsilon: A required scalar. The data type is float32 . \n
  161. * @par Outputs:
  162. * y: A Tensor of type RealNumberType . \n
  163. * @par Third-party framework compatibility
  164. * Compatible with the TensorFlow operator Relu6.
  165. *
  166. *@par Restrictions:
  167. *Warning: THIS FUNCTION IS DEPRECATED. Please use Relu6 instead.
  168. */
  169. REG_OP(Relu6D)
  170. .INPUT(x, TensorType::RealNumberType())
  171. .OUTPUT(y, TensorType::RealNumberType())
  172. .ATTR(scale, Float, 1.0)
  173. .OP_END_FACTORY_REG(Relu6D)
  174. /**
  175. * @brief Computes rectified linear 6 gradients for a Relu6 operation.
  176. * backprops = gradients * (features > 0) * (features < 6) . \n
  177. * @par Inputs:
  178. * @li features: A Tensor of type RealNumberType.
  179. * @li gradients: A Tensor of type RealNumberType . \n
  180. * @par Outputs:
  181. * backprops: A Tensor of type RealNumberType . \n
  182. * @par Third-party framework compatibility
  183. * Compatible with the TensorFlow operator Relu6Grad.
  184. */
  185. REG_OP(Relu6Grad)
  186. .INPUT(gradients, TensorType::RealNumberType())
  187. .INPUT(features, TensorType::RealNumberType())
  188. .OUTPUT(backprops, TensorType::RealNumberType())
  189. .OP_END_FACTORY_REG(Relu6Grad)
  190. /**
  191. *@brief Calculate the elu_grad_v2 function.
  192. *Applies the element-wise function:
  193. * Computes the backward for the elu: if x>0, 1; otherwise elu() + alpha .
  194. *@par Inputs:
  195. *One inputs, including:
  196. * @li grads: A tensor. Must be one of the following types:
  197. * float16, float32.
  198. * @li activations: A tensor. Must be one of the following types:
  199. * float16, float32.
  200. *
  201. *@par Outputs:
  202. *y: A Tensor with the same type and shape of grads's.
  203. *
  204. *@par Attributes:
  205. *@li alpha: scalar parameter, default value = 1.0
  206. */
  207. REG_OP(EluGradV2)
  208. .INPUT(grads, TensorType({DT_FLOAT, DT_FLOAT16}))
  209. .INPUT(activations, TensorType({DT_FLOAT, DT_FLOAT16}))
  210. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16}))
  211. .ATTR(alpha, Float, 1.0)
  212. .OP_END_FACTORY_REG(EluGradV2)
  213. /**
  214. * @brief Compute sigmoid of "x" element-wise . \n
  215. * @par Inputs:
  216. * A Tensor of type complex64, complex128, float16, float32 or double . \n
  217. * @par Outputs:
  218. * A Tensor. Has the same type as "x" . \n
  219. * @see Relu()
  220. * @par Third-party framework compatibility
  221. * Compatible with the TensorFlow operator Sigmoid.
  222. */
  223. REG_OP(Sigmoid)
  224. .INPUT(x, TensorType::UnaryDataType())
  225. .OUTPUT(y, TensorType::UnaryDataType())
  226. .OP_END_FACTORY_REG(Sigmoid)
  227. /**
  228. * @brief Computes z = (y - y*y)*dy . \n
  229. * @par Inputs:
  230. * @li y: The input is Tensor, dtype is UnaryDataType.
  231. * @li dy: The input is Tensor, dtype is UnaryDataType . \n
  232. * @par Outputs:
  233. * z: The shape of output, dtype is UnaryDataType.
  234. */
  235. REG_OP(SigmoidGrad)
  236. .INPUT(y, TensorType(UnaryDataType))
  237. .INPUT(dy, TensorType(UnaryDataType))
  238. .OUTPUT(z, TensorType(UnaryDataType))
  239. .OP_END_FACTORY_REG(SigmoidGrad)
  240. /**
  241. *@brief Computes the binomial normal log likelihood (BNLL) output:
  242. *if x>0, x+log(1+exp(-x)); otherwise log(1+exp(x)) . \n
  243. *@par Inputs:
  244. *x: A Tensor of type double, float16 or float32 . \n
  245. *@par Outputs:
  246. *y: A tensor. Has the same type and format as input "x" . \n
  247. *@par Third-party framework compatibility
  248. * Compatible with the Caffe operator BNLL.
  249. */
  250. REG_OP(BNLL)
  251. .INPUT(x, TensorType::FloatingDataType())
  252. .OUTPUT(y, TensorType::FloatingDataType())
  253. .OP_END_FACTORY_REG(BNLL)
  254. /**
  255. *@brief Computes softplus: log(exp(x) + 1) . \n
  256. *@par Inputs:
  257. * One input:
  258. *x: A Tensor of type float16 or float32. Up to 8D . \n
  259. *@par Outputs:
  260. *y: The activations tensor. Has the same type and format as input "x"
  261. *@par Third-party framework compatibility
  262. * Compatible with the TensorFlow operator Softplus.
  263. */
  264. REG_OP(Softplus)
  265. .INPUT(x, TensorType::FloatingDataType())
  266. .OUTPUT(y, TensorType::FloatingDataType())
  267. .OP_END_FACTORY_REG(Softplus)
  268. /**
  269. *@brief Computes softplus gradients for a softplus operation . \n
  270. *@par Inputs:
  271. *Two inputs:
  272. * @li gradients: An NC1HWC0 or ND Tensor of type float16 or float32.
  273. * @li features: An NC1HWC0 or ND Tensor of type float16 or float32.
  274. *@par Outputs:
  275. *backprops: A Tensor. Has the same type and format as input "gradients" . \n
  276. *@par Third-party framework compatibility
  277. * Compatible with the TensorFlow operator SoftplusGrad.
  278. */
  279. REG_OP(SoftplusGrad)
  280. .INPUT(gradients, TensorType::FloatingDataType())
  281. .INPUT(features, TensorType::FloatingDataType())
  282. .OUTPUT(backprops, TensorType::FloatingDataType())
  283. .OP_END_FACTORY_REG(SoftplusGrad)
  284. /**
  285. *@brief Computes softsign: x/(abs(x) + 1) . \n
  286. *@par Inputs:
  287. * One input:
  288. *x: A Tensor of type float16 or float32. Up to 8D . \n
  289. *@par Outputs:
  290. *y: The activations tensor. Has the same type and format as "x"
  291. *@par Third-party framework compatibility
  292. * Compatible with the TensorFlow operator Softsign.
  293. */
  294. REG_OP(Softsign)
  295. .INPUT(x, TensorType::FloatingDataType())
  296. .OUTPUT(y, TensorType::FloatingDataType())
  297. .OP_END_FACTORY_REG(Softsign)
  298. /**
  299. *@brief Computes scaled exponential linear: scale * alpha * (exp(x) - 1) . \n
  300. *@par Inputs:
  301. * One input:
  302. *x: A Tensor. Must be one of the following types: float16, float, double
  303. * int32, int8. format:ND, NC1HWC0 . \n
  304. *@par Outputs:
  305. *y: A Tensor. Has the same type and format as input "x". format:ND, NC1HWC0 . \n
  306. *@see Region()
  307. *@par Third-party framework compatibility
  308. * Compatible with the TensorFlow operator Selu.
  309. */
  310. REG_OP(Selu)
  311. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,
  312. DT_INT8,DT_INT32}))
  313. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,
  314. DT_INT8,DT_INT32}))
  315. .OP_END_FACTORY_REG(Selu)
  316. /**
  317. *@brief Computes rectified linear gradients for a ReLU operation . \n
  318. *@par Inputs:
  319. * Two inputs, including:
  320. *@li gradients: A Tensor. Must be one of the following types: float32, double,
  321. * int32, int8, int16, int64, uint16, float16, uint32, uint64
  322. *@li features: A Tensor. Must be one of the following types: float32, double,
  323. * int32, int8, int16, int64, uint16, float16, uint32, uint64
  324. *@par Outputs:
  325. *backprops: A Tensor. Must have the same type as"gradients" . \n
  326. *@attention Constraints:
  327. * The corresponding Relu operator needs to be called before using this operator on the network . \n
  328. *@see Relu
  329. *@par Third-party framework compatibility
  330. * Compatible with TensorFlow operator ReluGrad.
  331. */
  332. REG_OP(ReluGrad)
  333. .INPUT(gradients, TensorType::RealNumberType())
  334. .INPUT(features, TensorType::RealNumberType())
  335. .OUTPUT(backprops, TensorType::RealNumberType())
  336. .OP_END_FACTORY_REG(ReluGrad)
  337. /**
  338. *@brief Computes rectified linear gradients for a ReLU operation . \n
  339. *@par Inputs:
  340. * Two inputs, including:
  341. *@li gradients: A Tensor. Must be one of the following types: float32, double, int32, int8, int16, int8, int64, uint16, float16, uint32, uint64
  342. *@li mask: A Tensor. Must be the following types: uint8
  343. *@par Outputs:
  344. *backprops: A Tensor. Must have the same type as"gradients" . \n
  345. *@attention Constraints:
  346. * The corresponding Relu operator needs to be called before using this operator on the network . \n
  347. *@see Relu
  348. *@par Third-party framework compatibility
  349. * Compatible with TensorFlow operator ReluGradV2.
  350. */
  351. REG_OP(ReluGradV2)
  352. .INPUT(gradients, TensorType::RealNumberType())
  353. .INPUT(mask, TensorType({DT_UINT8}))
  354. .OUTPUT(backprops, TensorType::RealNumberType())
  355. .OP_END_FACTORY_REG(ReluGradV2)
  356. /**
  357. *@brief Computes rectified linear: "max(x, 0)".
  358. *
  359. *@attention Constraints:
  360. * The last dimension must be divisible by 8.
  361. * The second output "mask" is "1" (for y >= 0) or "0" ( for y < 0).
  362. *
  363. *@par Inputs:
  364. * x: A tensor. Must be one of the following types: float32, float64, int32, uint8,
  365. * int16, int8, int64, uint16, float16, qint8.
  366. *
  367. *@par Outputs:
  368. *@li y: A tensor. Has the same type as "x".
  369. *@li mask: A tensor of type uint8.
  370. *
  371. *@par Third-party framework compatibility
  372. * Incompatible with TensorFlow or Caffe.
  373. *
  374. */
  375. REG_OP(ReluV2)
  376. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_INT8, DT_INT32, DT_INT16, DT_INT64, DT_UINT8, DT_UINT16, DT_QINT8}))
  377. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_INT8, DT_INT32, DT_INT16, DT_INT64, DT_UINT8, DT_UINT16, DT_QINT8}))
  378. .OUTPUT(mask, TensorType({DT_UINT8}))
  379. .OP_END_FACTORY_REG(ReluV2)
  380. /**
  381. *@brief Performs parametric ReLU . \n
  382. *@par Inputs:
  383. * Two inputs, including:
  384. *@li x: A multi-dimensional Tensor of type float16 or float32.
  385. *@li weight: A Scalar or 1D Tensor of type float16 or float32, specifying the weight, the initial value of "a". The number of dimensions must be the same as the number of channels . \n
  386. *@par Outputs:
  387. *y: An activated Tensor. Has the same dimensions with "x" . \n
  388. *@par Third-party framework compatibility
  389. * Compatible with PyTorch and Caffe operator PReLU.
  390. */
  391. REG_OP(PRelu)
  392. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16}))
  393. .INPUT(weight, TensorType({DT_FLOAT, DT_FLOAT16}))
  394. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16}))
  395. .OP_END_FACTORY_REG(PRelu)
  396. /**
  397. *@brief Performs the backpropagation of PRelu for training scenarios . \n
  398. *@par Inputs:
  399. * Three inputs, including:
  400. *@li grads: Input gradient. Multi-dimensional Tensors are supported. The data type can be float16 or float32.
  401. *@li features: A multi-dimensional Tensor of type float16 or float32.
  402. *@li weights: A Scalar or 1D Tensor of type float16 or float32, specifying the weight. The number of dimensions must be the same as the number of channels . \n
  403. *@par Outputs:
  404. *@li dx: Reverse gradient of "features". Has the same dimensions and type as "features".
  405. *@li da: Reverse gradient of "weight". Has the same dimensions and type as "features" . \n
  406. *@par Third-party framework compatibility
  407. * Compatible with PyTorch operator PReluGrad.
  408. */
  409. REG_OP(PReluGrad)
  410. .INPUT(grads, TensorType({DT_FLOAT16, DT_FLOAT}))
  411. .INPUT(features, TensorType({DT_FLOAT16, DT_FLOAT}))
  412. .INPUT(weights, TensorType({DT_FLOAT16, DT_FLOAT}))
  413. .OUTPUT(dx, TensorType({DT_FLOAT16, DT_FLOAT}))
  414. .OUTPUT(da, TensorType({DT_FLOAT16, DT_FLOAT}))
  415. .OP_END_FACTORY_REG(PReluGrad)
  416. /**
  417. *@brief Activation function fused from sigmoid and ReLU, with soft saturation
  418. * on the left and no saturation on the right . \n
  419. *@par Inputs:
  420. *x: A float16, float32 or double, for the input data type . \n
  421. *@par Attributes:
  422. *alpha: A float32. Defines at which negative value the ELU saturates. Defaults to "1.0" . \n
  423. *@par Outputs:
  424. *y: A float16, float32 or double, for the normalized result . \n
  425. *@attention Constraints:
  426. *@li The input is of type float16 or float32 . \n
  427. *@par Multiple batches supported or not
  428. *Supported
  429. *@par Third-party framework compatibility
  430. *@li Compatible with Tensorflow's Elu operator
  431. *@li Compatible with Caffe's ELULayer operator
  432. *
  433. *@since V100R001C33
  434. */
  435. REG_OP(Elu)
  436. .INPUT(x, TensorType::FloatingDataType())
  437. .OUTPUT(y, TensorType::FloatingDataType())
  438. .ATTR(alpha, Float, 1.0)
  439. .OP_END_FACTORY_REG(Elu)
  440. /**
  441. *@brief Continuously Differentiable Exponential Linear Uints:
  442. * Perform the linear uint element-wise on the input tensor X using formula:
  443. * max(0, x) + min(0, alpha * (exp(x/alpha) - 1)). \n
  444. *@par Inputs:
  445. *x: A float16, float32 or double, for the input data type . \n
  446. *@par Attributes:
  447. *alpha: A float32. Defines at which negative value the ELU saturates. Defaults to "1.0" . \n
  448. *@par Outputs:
  449. *y: A float16, float32 or double, for the normalized result . \n
  450. *@attention Constraints:
  451. *@li The input is of type float16 or float32 . \n
  452. *@par Multiple batches supported or not
  453. *Supported
  454. *@par Third-party framework compatibility
  455. *@li Compatible with ONNX's Celu operator
  456. */
  457. REG_OP(Celu)
  458. .INPUT(x, TensorType::FloatingDataType())
  459. .OUTPUT(y, TensorType::FloatingDataType())
  460. .ATTR(alpha, Float, 1.0)
  461. .OP_END_FACTORY_REG(Celu)
  462. /**
  463. *@brief Computes gradients for the exponential linear (Elu) operation.
  464. *
  465. *@par Inputs:
  466. *@li grads: A tensor. Must be one of the following types: float16, float32, float64.
  467. * The backpropagated gradients to the corresponding Elu operation.
  468. *@li activations: A tensor. Has the same type as "grads".
  469. * The outputs of the corresponding Elu operation.
  470. *
  471. *@par Outputs:
  472. * y: A tensor. Has the same type as "grads".
  473. *
  474. *@par Third-party framework compatibility
  475. *Compatible with the TensorFlow operator EluGrad.
  476. *
  477. */
  478. REG_OP(EluGrad)
  479. .INPUT(grads, TensorType::FloatingDataType())
  480. .INPUT(activations, TensorType::FloatingDataType())
  481. .OUTPUT(y, TensorType::FloatingDataType())
  482. .OP_END_FACTORY_REG(EluGrad)
  483. /**
  484. *@brief Computes the output as x if x > 0 and negative_slope * x if x <= 0 . \n
  485. *@par Inputs:
  486. * One input:
  487. * x: A Tensor. Must be one of the following types: float32, float16, double.
  488. *
  489. *@par Attributes:
  490. *negative_slope: A float32. Defaults to "0.0".
  491. *
  492. *@par Outputs:
  493. *y: A Tensor. Has the same type as "x".
  494. *@par Third-party framework compatibility
  495. * Compatible with the Caffe operator ReLU.
  496. */
  497. REG_OP(LeakyRelu)
  498. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE}))
  499. .ATTR(negative_slope, Float, 0.0)
  500. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE}))
  501. .OP_END_FACTORY_REG(LeakyRelu)
  502. /**
  503. *@brief Computes the output as gradients if features > 0 and negative_slope * gradients if features <= 0 . \n
  504. *@par Inputs:
  505. * Two inputs, including:
  506. * @li gradients: A Tensor. Must be one of the following types: float16, float32, double.
  507. * @li features: A Tensor. Has the same type as "gradients" . \n
  508. *@par Attributes:
  509. *negative_slope: A float32. Defaults to "0.0" . \n
  510. *@par Outputs:
  511. *backprops: A Tensor. Has the same type as "gradients" . \n
  512. *@par Third-party framework compatibility
  513. * Compatible with the TensorFlow operator LeakyReluGrad.
  514. */
  515. REG_OP(LeakyReluGrad)
  516. .INPUT(gradients, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  517. .INPUT(features, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  518. .ATTR(negative_slope, Float, 0.0)
  519. .OUTPUT(backprops, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  520. .OP_END_FACTORY_REG(LeakyReluGrad)
  521. /**
  522. *@brief Thresholds grad each element of the input Tensor . \n
  523. *@par Inputs:
  524. * @li gradients: A Tensor shape and dtype of input gradients. Support float16, int32.
  525. * @li features: A Tensor shape and dtype of input features. Support float16, int32 . \n
  526. *@par Attributes:
  527. *threshold: A float32 scale value to threshold at . \n
  528. *@par Outputs:
  529. *backprops: A Tensor of shape and dtype of output backprops, should be same shape and type as inputs . \n
  530. *@par Restrictions:
  531. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  532. */
  533. REG_OP(ThresholdGradV2D)
  534. .INPUT(gradients, TensorType({DT_INT32, DT_FLOAT16}))
  535. .INPUT(features, TensorType({DT_INT32, DT_FLOAT16}))
  536. .OUTPUT(backprops, TensorType({DT_INT32, DT_FLOAT16}))
  537. .REQUIRED_ATTR(threshold, Float)
  538. .OP_END_FACTORY_REG(ThresholdGradV2D)
  539. /**
  540. *@brief Thresholds each element of the input Tensor y = (x > threshold) ? x : value . \n
  541. *@par Inputs:
  542. *x: A Tensor dtype of real number . \n
  543. *@par Attributes:
  544. *@li threshold: A float32 scale value to threshold at.
  545. *@li value: A float32 scale value to replace with . \n
  546. *@par Outputs:
  547. *y: A Tensor of shape and dtype of output, should be same shape and type as input . \n
  548. *@par Restrictions:
  549. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  550. */
  551. REG_OP(ThresholdV2D)
  552. .INPUT(x, TensorType::RealNumberType())
  553. .OUTPUT(y, TensorType::RealNumberType())
  554. .REQUIRED_ATTR(threshold, Float)
  555. .REQUIRED_ATTR(value, Float)
  556. .OP_END_FACTORY_REG(ThresholdV2D)
  557. /**
  558. *@brief: Computes hyperbolic tangent of "x" element-wise . \n
  559. *@par Inputs:
  560. *One input:
  561. *x: A Tensor. Must be one of the following types: float16, float32 . \n
  562. *@par Outputs:
  563. *y: A Tensor. Has the same type as "x" . \n
  564. *@par Third-party framework compatibility
  565. * Compatible with TensorFlow operator Mish.
  566. */
  567. REG_OP(Mish)
  568. .INPUT(x, TensorType({ DT_FLOAT,DT_FLOAT16 }))
  569. .OUTPUT(y, TensorType({ DT_FLOAT,DT_FLOAT16 }))
  570. .OP_END_FACTORY_REG(Mish)
  571. /**
  572. * @brief pytorch hardtanh_backward operator.
  573. *
  574. * @par Inputs:
  575. * 2 inputs, including:
  576. * @li result, minimum tensor of the linear region range,
  577. * datatype: float16/float32, format:ND/5HD.
  578. * @li grad, maximum tensor of the linear region range,
  579. * datatype:float16/float32, format:ND/5HD. \n
  580. * @par Attributes:
  581. * 2 attributes, including:
  582. * @li min_val, minimum value of the linear region range, datatype:float.
  583. * @li max_val, maximum value of the linear region range, datatype:float. \n
  584. * @par Outputs:
  585. * 1 output, including:
  586. * @li y, hardtanh_backward output tensor, datatype and format is same as
  587. * input result. \n
  588. * @attention Constraints:
  589. * This operator only supports dataType: float16/float32, format: ND/5HD. \n
  590. * @par Third-party framework compatibility
  591. * Compatible with the Pytorch operator HardtanhGrad.
  592. */
  593. REG_OP(HardtanhGrad)
  594. .INPUT(result, TensorType({ DT_FLOAT16, DT_FLOAT })) /* "First operand." */
  595. .INPUT(grad, TensorType({ DT_FLOAT16, DT_FLOAT })) /* "Second operand." */
  596. .OUTPUT(y, TensorType({ DT_FLOAT16, DT_FLOAT })) /* "Result, has same element type as two inputs" */
  597. .ATTR(min_val, Float, -1.0)
  598. .ATTR(max_val, Float, 1.0)
  599. .OP_END_FACTORY_REG(HardtanhGrad)
  600. /**
  601. * @brief Calculates the softplus loss function with attributes of beta and threshold. \n
  602. * @par Inputs:
  603. * One inputs, including:
  604. * @li x: A mutable Tensor. Must be one of the following types:
  605. * float16, float32. \n
  606. * @par Attributes:
  607. * @li beta: An optional float. Defaults to "1.0" \n
  608. * @li threshold: An optional float. Defaults to "20.0" \n
  609. * @par Outputs:
  610. * @li y: A mutable Tensor. Has the same type as "x" \n
  611. * @par Third-party framework compatibility
  612. * Compatible with the Pytorch operator Softplus.
  613. */
  614. REG_OP(SoftplusV2)
  615. .INPUT(x, TensorType({ DT_FLOAT, DT_FLOAT16 }))
  616. .OUTPUT(y, TensorType({ DT_FLOAT, DT_FLOAT16 }))
  617. .ATTR(beta, Float, 1.0)
  618. .ATTR(threshold, Float, 20.0)
  619. .OP_END_FACTORY_REG(SoftplusV2)
  620. /**
  621. * @brief Calculates the reversed outputs of the function "softplus_v2". \n
  622. * @par Inputs:
  623. * Two inputs, including:
  624. * @li input_gradients: A mutable Tensor. Must be one of the following types:
  625. * float16, float32.
  626. * @li input_features: A mutable Tensor of the same type as "input_gradients" \n
  627. * @par Attributes:
  628. * @li beta: An optional float. Defaults to "1.0" \n
  629. * @li threshold: An optional float. Defaults to "20.0" \n
  630. * @par Outputs:
  631. * @li output_backprops: A mutable Tensor. Has the same type as "input_gradients" \n
  632. * @par Third-party framework compatibility
  633. * Compatible with the Pytorch operator SoftplusGrad.
  634. */
  635. REG_OP(SoftplusV2Grad)
  636. .INPUT(input_gradients, TensorType({ DT_FLOAT, DT_FLOAT16 }))
  637. .INPUT(input_features, TensorType({ DT_FLOAT, DT_FLOAT16 }))
  638. .OUTPUT(output_backprops, TensorType({ DT_FLOAT, DT_FLOAT16 }))
  639. .ATTR(beta, Float, 1.0)
  640. .ATTR(threshold, Float, 20.0)
  641. .OP_END_FACTORY_REG(SoftplusV2Grad)
  642. /**
  643. * @brief ThresholdedRelu takes one input data (Tensor) and produces one output data (Tensor)
  644. * where the rectified linear function, y = x for x > alpha, y = 0 otherwise, is applied to the tensor elementwise.
  645. *
  646. * @par inputs
  647. * one input including:
  648. * @li x: input A Tensor. Must be one of the following types: float32, float16
  649. *
  650. * @par output
  651. * one output including:
  652. * @li y:A Tensor of the same type as x
  653. *
  654. */
  655. REG_OP(ThresholdedRelu)
  656. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  657. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  658. .ATTR(alpha, Float, 1.0)
  659. .OP_END_FACTORY_REG(ThresholdedRelu)
  660. /**
  661. * @brief Calculate the hard shrinkage function. \n
  662. * @par Inputs:
  663. * One inputs, including:
  664. * @li input_x: A tensor. Must be one of the following types:
  665. * float16, float32. \n
  666. * @par Attributes:
  667. * @li lambd: An optional float. Defaults to 0.5. \n
  668. * @par Outputs:
  669. * y: A Tensor with the same dtype and shape of input_x's. \n
  670. * @par Third-party framework compatibility
  671. * Compatible with the Pytorch operator Hardshrink. \n
  672. */
  673. REG_OP(HardShrink)
  674. .INPUT(input_x, TensorType({DT_FLOAT16, DT_FLOAT}))
  675. .OUTPUT(output_y, TensorType({DT_FLOAT16, DT_FLOAT}))
  676. .ATTR(lambd, Float, 0.5)
  677. .OP_END_FACTORY_REG(HardShrink)
  678. /**
  679. *@brief Calculate the hard shrink grad function. \n
  680. *
  681. * Computes the gradient for the HardShrink: if x > lambda or x < -lambda, x,otherwise 0
  682. *
  683. *@par Inputs:
  684. *Two inputs, including:
  685. * @li gradients: A tensor. Must be one of the following types:
  686. * float16, float32. \n
  687. * @li features: A tensor. Must be one of the following types:
  688. * float16, float32. \n
  689. *
  690. *@par Outputs:
  691. *backprops: A Tensor with the same type and shape of features's. \n
  692. *
  693. *@par Attributes:
  694. *@li lambda: An optional float.Defaults to 0.5. \n
  695. *
  696. *@par Third-party framework compatibility
  697. *Compatible with the Pytorch operator Hardshrink_backward. \n
  698. */
  699. REG_OP(HardShrinkGrad)
  700. .INPUT(gradients, TensorType({DT_FLOAT16, DT_FLOAT}))
  701. .INPUT(features, TensorType({DT_FLOAT16, DT_FLOAT}))
  702. .OUTPUT(backprops, TensorType({DT_FLOAT16, DT_FLOAT}))
  703. .ATTR(lambda, Float, 0.5)
  704. .OP_END_FACTORY_REG(HardShrinkGrad)
  705. /**
  706. * @brief Calculate the hard sigmoid function. \n
  707. * @par Inputs:
  708. * One inputs, including:
  709. * @li input_x: A tensor. Must be one of the following types:
  710. * float16, float32, int32. \n
  711. * @par Attributes:
  712. * @li alpha: An optional float. Defaults to 0.16666666. \n
  713. * @li beta: An optional float. Defaults to 0.5. \n
  714. * @par Outputs:
  715. * y: A Tensor with the same dtype and shape of input_x's. \n
  716. * @par Third-party framework compatibility
  717. * Compatible with the Pytorch operator Hardsigmoid. \n
  718. */
  719. REG_OP(HardSigmoid)
  720. .INPUT(input_x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
  721. .OUTPUT(output_y, TensorType({DT_FLOAT, DT_FLOAT16}))
  722. .ATTR(alpha, Float, 0.16666666)
  723. .ATTR(beta, Float, 0.5)
  724. .OP_END_FACTORY_REG(HardSigmoid)
  725. /**
  726. * @brief Calculate the soft shrinkage function. \n
  727. * @par Inputs:
  728. * One inputs, including:
  729. * @li input_x: A tensor. Must be one of the following types:
  730. * float16, float32. \n
  731. * @par Attributes:
  732. * @li lambd: An optional float. Defaults to 0.5. \n
  733. * @par Outputs:
  734. * y: A Tensor with the same dtype and shape of input_x's. \n
  735. * @par Third-party framework compatibility
  736. * Compatible with the Pytorch operator Softshrink. \n
  737. */
  738. REG_OP(SoftShrink)
  739. .INPUT(input_x, TensorType({DT_FLOAT16, DT_FLOAT}))
  740. .OUTPUT(output_y, TensorType({DT_FLOAT16, DT_FLOAT}))
  741. .ATTR(lambd, Float, 0.5)
  742. .OP_END_FACTORY_REG(SoftShrink)
  743. /**
  744. * @brief Calculate the reversed outputs of the function "soft_shrink". \n
  745. * @par Inputs:
  746. * Two inputs, including:
  747. * @li input_grad: A tensor. Must be one of the following types:
  748. * float16, float32. \n
  749. * @li input_x: A tensor of the same dtype as "input_grad". \n
  750. * @par Attributes:
  751. * @li lambd: An optional float. Defaults to 0.5. \n
  752. * @par Outputs:
  753. * y: A Tensor of the same dtype and shape as "input_graxd". \n
  754. * @par Third-party framework compatibility
  755. * Compatible with the Pytorch operator SoftShrinkGrad. \n
  756. */
  757. REG_OP(SoftShrinkGrad)
  758. .INPUT(input_grad, TensorType({DT_FLOAT16, DT_FLOAT}))
  759. .INPUT(input_x, TensorType({DT_FLOAT16, DT_FLOAT}))
  760. .OUTPUT(output_y, TensorType({DT_FLOAT16, DT_FLOAT}))
  761. .ATTR(lambd, Float, 0.5)
  762. .OP_END_FACTORY_REG(SoftShrinkGrad)
  763. /**
  764. *@brief Calculate -ln(1+e^(-x)). \n
  765. *@par Inputs:
  766. *One inputs, including:
  767. * @li x: A tensor. Must be one of the following types:
  768. * float16, float32. \n
  769. *@par Outputs:
  770. *One outputs, including:
  771. * @li y: A tensor with the same type and shape of x's. \n
  772. *@par Third-party framework compatibility
  773. *Compatible with the Pytorch operator LogSigmoid. \n
  774. */
  775. REG_OP(LogSigmoid)
  776. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT})) /* "input:x" */
  777. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT})) /* "output:y" */
  778. .OP_END_FACTORY_REG(LogSigmoid)
  779. /**
  780. *@brief Calculate the backward outputs of the function "hard_sigmoid" \n
  781. *@par Inputs:
  782. *One inputs, including:
  783. * @li grads: A tensor. Must be one of the following types:
  784. * float16, float32. \n
  785. * @li input_x: A tensor. Must be one of the following types:
  786. * float16, float32. \n
  787. *@par Outputs:
  788. *One outputs, including:
  789. * @li y: A tensor with the same type and shape of x's. \n
  790. * @par Attributes:
  791. * @li alpha: An optional float. Defaults to 0.16666666. \n
  792. * @li beta: An optional float. Defaults to 0.5. \n
  793. *@par Third-party framework compatibility
  794. *Compatible with the Pytorch operator LogSigmoidGrad. \n
  795. */
  796. REG_OP(HardSigmoidGrad)
  797. .INPUT(grads, TensorType({DT_FLOAT, DT_FLOAT16}))
  798. .INPUT(input_x, TensorType({DT_FLOAT, DT_FLOAT16}))
  799. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16}))
  800. .ATTR(alpha, Float, 0.16666666)
  801. .ATTR(beta, Float, 0.5)
  802. .OP_END_FACTORY_REG(HardSigmoidGrad)
  803. } // namespace ge
  804. #endif // OPS_BUILT_IN_OP_PROTO_INC_NONLINEAR_FUC_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示