You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

nonlinear_fuc_ops.h 26 kB

5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file nonlinear_fuc_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_NONLINEAR_FUC_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_NONLINEAR_FUC_OPS_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. *@brief Computes the for the gelu of "x" . \n
  26. *@par Inputs:
  27. *Two inputs, including:
  28. * @li x: A Tensor. Must be one of the following types: float16, float32
  29. *@par Outputs:
  30. *y: A Tensor. Has the same type as "x".
  31. *@par Third-party framework compatibility
  32. *Compatible with the TensorFlow operator Gelu
  33. */
  34. REG_OP(Gelu)
  35. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  36. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  37. .OP_END_FACTORY_REG(Gelu)
  38. /**
  39. *@brief Computes the gradient for the gelu of "x" . \n
  40. *@par Inputs:
  41. *Three inputs, including:
  42. * @li dy: A Tensor. Must be one of the following types: float16, float32
  43. * @li x: A Tensor of the same type as "dy".
  44. * @li y: A Tensor of the same type as "dy" . \n
  45. *@par Outputs:
  46. *z: A Tensor. Has the same type as "dy".
  47. *@par Third-party framework compatibility
  48. *Compatible with the TensorFlow operator GeluGrad
  49. */
  50. REG_OP(GeluGrad)
  51. .INPUT(dy, TensorType({DT_FLOAT16, DT_FLOAT}))
  52. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  53. .INPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  54. .OUTPUT(z, TensorType({DT_FLOAT16, DT_FLOAT}))
  55. .OP_END_FACTORY_REG(GeluGrad)
  56. /**
  57. *@brief Computes the for the fast_gelu of "x" . \n
  58. *@par Inputs:
  59. *Two inputs, including:
  60. * @li x: A Tensor. Must be one of the following types: float16, float32
  61. *@par Outputs:
  62. *y: A Tensor. Has the same type as "x".
  63. *@par Third-party framework compatibility
  64. *Compatible with the TensorFlow operator FastGelu
  65. */
  66. REG_OP(FastGelu)
  67. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  68. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  69. .OP_END_FACTORY_REG(FastGelu)
  70. /**
  71. *@brief Computes the gradient for the fast_gelu of "x" . \n
  72. *@par Inputs:
  73. *Three inputs, including:
  74. * @li dy: A Tensor. Must be one of the following types: float16, float32
  75. * @li x: A Tensor of the same type as "dy" . \n
  76. *@par Outputs:
  77. *z: A Tensor. Has the same type as "dy".
  78. *@par Third-party framework compatibility
  79. *Compatible with the TensorFlow operator FastGeluGrad
  80. */
  81. REG_OP(FastGeluGrad)
  82. .INPUT(dy, TensorType({DT_FLOAT16, DT_FLOAT}))
  83. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  84. .OUTPUT(z, TensorType({DT_FLOAT16, DT_FLOAT}))
  85. .OP_END_FACTORY_REG(FastGeluGrad)
  86. /**
  87. *@brief Computes the gradient for the tanh of "x" . \n
  88. *@par Inputs:
  89. *Two inputs, including:
  90. * @li y: A Tensor. Must be one of the following types: float16, float32,
  91. * double, complex64, complex128.
  92. * @li dy: A Tensor of the same type as "y" . \n
  93. *@par Outputs:
  94. *z: A Tensor. Has the same type as "y".
  95. *@par Third-party framework compatibility
  96. *Compatible with the TensorFlow operator TanhGrad.
  97. */
  98. REG_OP(TanhGrad)
  99. .INPUT(y, TensorType::UnaryDataType())
  100. .INPUT(dy, TensorType::UnaryDataType())
  101. .OUTPUT(z, TensorType::UnaryDataType())
  102. .OP_END_FACTORY_REG(TanhGrad)
  103. /**
  104. *@brief: Computes hyperbolic tangent of "x" element-wise . \n
  105. *@par Inputs:
  106. *One input:
  107. *x: A Tensor. Must be one of the following types: float16, float32, complex64, complex128, double . \n
  108. *@par Outputs:
  109. *y: A Tensor. Has the same type as "x" . \n
  110. *@par Third-party framework compatibility
  111. * Compatible with TensorFlow operator Tanh.
  112. */
  113. REG_OP(Tanh)
  114. .INPUT(x, TensorType::UnaryDataType())
  115. .OUTPUT(y, TensorType::UnaryDataType())
  116. .OP_END_FACTORY_REG(Tanh)
  117. /**
  118. * @brief Computes rectified linear: "max(x, 0)".
  119. *
  120. * @par Inputs:
  121. * x: A tensor. Must be one of the following types: float32, float64, int32, uint8,
  122. * int16, int8, int64, uint16, float16, qint8.
  123. *
  124. * @par Outputs:
  125. * y: A tensor. Has the same type as "x".
  126. *
  127. * @par Third-party framework compatibility
  128. * @li Compatible with the TensorFlow operator Relu.
  129. * @li Compatible with the Caffe operator ReLULayer.
  130. *
  131. */
  132. REG_OP(Relu)
  133. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE,
  134. DT_INT8, DT_INT32, DT_INT16, DT_INT64,
  135. DT_UINT8, DT_UINT16, DT_QINT8}))
  136. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE,
  137. DT_INT8, DT_INT32, DT_INT16, DT_INT64,
  138. DT_UINT8, DT_UINT16, DT_QINT8}))
  139. .OP_END_FACTORY_REG(Relu)
  140. /**
  141. * @brief Computes rectified linear 6.
  142. * activations = min(max(x, 0), 6) . \n
  143. * @par Inputs:
  144. * x: A Tensor of type RealNumberType . \n
  145. * @par Outputs:
  146. * y: A Tensor of type RealNumberType . \n
  147. * @par Third-party framework compatibility
  148. * Compatible with the TensorFlow operator Relu6.
  149. */
  150. REG_OP(Relu6)
  151. .INPUT(x, TensorType::RealNumberType())
  152. .OUTPUT(y, TensorType::RealNumberType())
  153. .OP_END_FACTORY_REG(Relu6)
  154. /**
  155. * @brief Computes rectified linear 6*scale.
  156. * activations = min(max(x, 0), 6*scale) . \n
  157. * @par Inputs:
  158. * x: A Tensor of type RealNumberType . \n
  159. * @par Attributes:
  160. * epsilon: A required scalar. The data type is float32 . \n
  161. * @par Outputs:
  162. * y: A Tensor of type RealNumberType . \n
  163. * @par Third-party framework compatibility
  164. * Compatible with the TensorFlow operator Relu6.
  165. *
  166. *@par Restrictions:
  167. *Warning: THIS FUNCTION IS DEPRECATED. Please use Relu6 instead.
  168. */
  169. REG_OP(Relu6D)
  170. .INPUT(x, TensorType::RealNumberType())
  171. .OUTPUT(y, TensorType::RealNumberType())
  172. .ATTR(scale, Float, 1.0)
  173. .OP_END_FACTORY_REG(Relu6D)
  174. /**
  175. * @brief Computes rectified linear 6 gradients for a Relu6 operation.
  176. * backprops = gradients * (features > 0) * (features < 6) . \n
  177. * @par Inputs:
  178. * @li features: A Tensor of type RealNumberType.
  179. * @li gradients: A Tensor of type RealNumberType . \n
  180. * @par Outputs:
  181. * backprops: A Tensor of type RealNumberType . \n
  182. * @par Third-party framework compatibility
  183. * Compatible with the TensorFlow operator Relu6Grad.
  184. */
  185. REG_OP(Relu6Grad)
  186. .INPUT(gradients, TensorType::RealNumberType())
  187. .INPUT(features, TensorType::RealNumberType())
  188. .OUTPUT(backprops, TensorType::RealNumberType())
  189. .OP_END_FACTORY_REG(Relu6Grad)
  190. /**
  191. * @brief Compute sigmoid of "x" element-wise . \n
  192. * @par Inputs:
  193. * A Tensor of type complex64, complex128, float16, float32 or double . \n
  194. * @par Outputs:
  195. * A Tensor. Has the same type as "x" . \n
  196. * @see Relu()
  197. * @par Third-party framework compatibility
  198. * Compatible with the TensorFlow operator Sigmoid.
  199. */
  200. REG_OP(Sigmoid)
  201. .INPUT(x, TensorType::UnaryDataType())
  202. .OUTPUT(y, TensorType::UnaryDataType())
  203. .OP_END_FACTORY_REG(Sigmoid)
  204. /**
  205. * @brief Computes z = (y - y*y)*dy . \n
  206. * @par Inputs:
  207. * @li y: The input is Tensor, dtype is UnaryDataType.
  208. * @li dy: The input is Tensor, dtype is UnaryDataType . \n
  209. * @par Outputs:
  210. * z: The shape of output, dtype is UnaryDataType.
  211. */
  212. REG_OP(SigmoidGrad)
  213. .INPUT(y, TensorType(UnaryDataType))
  214. .INPUT(dy, TensorType(UnaryDataType))
  215. .OUTPUT(z, TensorType(UnaryDataType))
  216. .OP_END_FACTORY_REG(SigmoidGrad)
  217. /**
  218. *@brief Computes the binomial normal log likelihood (BNLL) output:
  219. *if x>0, x+log(1+exp(-x)); otherwise log(1+exp(x)) . \n
  220. *@par Inputs:
  221. *x: A Tensor of type double, float16 or float32 . \n
  222. *@par Outputs:
  223. *y: A tensor. Has the same type and format as input "x" . \n
  224. *@par Third-party framework compatibility
  225. * Compatible with the Caffe operator BNLL.
  226. */
  227. REG_OP(BNLL)
  228. .INPUT(x, TensorType::FloatingDataType())
  229. .OUTPUT(y, TensorType::FloatingDataType())
  230. .OP_END_FACTORY_REG(BNLL)
  231. /**
  232. *@brief Computes softplus: log(exp(x) + 1) . \n
  233. *@par Inputs:
  234. * One input:
  235. *x: A Tensor of type float16 or float32. Up to 8D . \n
  236. *@par Outputs:
  237. *y: The activations tensor. Has the same type and format as input "x"
  238. *@par Third-party framework compatibility
  239. * Compatible with the TensorFlow operator Softplus.
  240. */
  241. REG_OP(Softplus)
  242. .INPUT(x, TensorType::FloatingDataType())
  243. .OUTPUT(y, TensorType::FloatingDataType())
  244. .OP_END_FACTORY_REG(Softplus)
  245. /**
  246. *@brief Computes softplus gradients for a softplus operation . \n
  247. *@par Inputs:
  248. *Two inputs:
  249. * @li gradients: An NC1HWC0 or ND Tensor of type float16 or float32.
  250. * @li features: An NC1HWC0 or ND Tensor of type float16 or float32.
  251. *@par Outputs:
  252. *backprops: A Tensor. Has the same type and format as input "gradients" . \n
  253. *@par Third-party framework compatibility
  254. * Compatible with the TensorFlow operator SoftplusGrad.
  255. */
  256. REG_OP(SoftplusGrad)
  257. .INPUT(gradients, TensorType::FloatingDataType())
  258. .INPUT(features, TensorType::FloatingDataType())
  259. .OUTPUT(backprops, TensorType::FloatingDataType())
  260. .OP_END_FACTORY_REG(SoftplusGrad)
  261. /**
  262. *@brief Computes softsign: x/(abs(x) + 1) . \n
  263. *@par Inputs:
  264. * One input:
  265. *x: A Tensor of type float16 or float32. Up to 8D . \n
  266. *@par Outputs:
  267. *y: The activations tensor. Has the same type and format as "x"
  268. *@par Third-party framework compatibility
  269. * Compatible with the TensorFlow operator Softsign.
  270. */
  271. REG_OP(Softsign)
  272. .INPUT(x, TensorType::FloatingDataType())
  273. .OUTPUT(y, TensorType::FloatingDataType())
  274. .OP_END_FACTORY_REG(Softsign)
  275. /**
  276. *@brief Computes scaled exponential linear: scale * alpha * (exp(x) - 1) . \n
  277. *@par Inputs:
  278. * One input:
  279. *x: A Tensor. Must be one of the following types: float16, float, double
  280. * int32, int8. format:ND, NC1HWC0 . \n
  281. *@par Outputs:
  282. *y: A Tensor. Has the same type and format as input "x". format:ND, NC1HWC0 . \n
  283. *@see Region()
  284. *@par Third-party framework compatibility
  285. * Compatible with the TensorFlow operator Selu.
  286. */
  287. REG_OP(Selu)
  288. .INPUT(x, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,
  289. DT_INT8,DT_INT32}))
  290. .OUTPUT(y, TensorType({DT_FLOAT16,DT_FLOAT,DT_DOUBLE,
  291. DT_INT8,DT_INT32}))
  292. .OP_END_FACTORY_REG(Selu)
  293. /**
  294. *@brief Computes rectified linear gradients for a ReLU operation . \n
  295. *@par Inputs:
  296. * Two inputs, including:
  297. *@li gradients: A Tensor. Must be one of the following types: float32, double,
  298. * int32, int8, int16, int64, uint16, float16, uint32, uint64
  299. *@li features: A Tensor. Must be one of the following types: float32, double,
  300. * int32, int8, int16, int64, uint16, float16, uint32, uint64
  301. *@par Outputs:
  302. *backprops: A Tensor. Must have the same type as"gradients" . \n
  303. *@attention Constraints:
  304. * The corresponding Relu operator needs to be called before using this operator on the network . \n
  305. *@see Relu
  306. *@par Third-party framework compatibility
  307. * Compatible with TensorFlow operator ReluGrad.
  308. */
  309. REG_OP(ReluGrad)
  310. .INPUT(gradients, TensorType::RealNumberType())
  311. .INPUT(features, TensorType::RealNumberType())
  312. .OUTPUT(backprops, TensorType::RealNumberType())
  313. .OP_END_FACTORY_REG(ReluGrad)
  314. /**
  315. *@brief Computes rectified linear gradients for a ReLU operation . \n
  316. *@par Inputs:
  317. * Two inputs, including:
  318. *@li gradients: A Tensor. Must be one of the following types: float32, double, int32, int8, int16, int8, int64, uint16, float16, uint32, uint64
  319. *@li mask: A Tensor. Must be the following types: uint8
  320. *@par Outputs:
  321. *backprops: A Tensor. Must have the same type as"gradients" . \n
  322. *@attention Constraints:
  323. * The corresponding Relu operator needs to be called before using this operator on the network . \n
  324. *@see Relu
  325. *@par Third-party framework compatibility
  326. * Compatible with TensorFlow operator ReluGradV2.
  327. */
  328. REG_OP(ReluGradV2)
  329. .INPUT(gradients, TensorType::RealNumberType())
  330. .INPUT(mask, TensorType({DT_UINT8}))
  331. .OUTPUT(backprops, TensorType::RealNumberType())
  332. .OP_END_FACTORY_REG(ReluGradV2)
  333. /**
  334. *@brief Computes rectified linear: "max(x, 0)".
  335. *
  336. *@attention Constraints:
  337. * The last dimension must be divisible by 8.
  338. * The second output "mask" is "1" (for y >= 0) or "0" ( for y < 0).
  339. *
  340. *@par Inputs:
  341. * x: A tensor. Must be one of the following types: float32, float64, int32, uint8,
  342. * int16, int8, int64, uint16, float16, qint8.
  343. *
  344. *@par Outputs:
  345. *@li y: A tensor. Has the same type as "x".
  346. *@li mask: A tensor of type uint8.
  347. *
  348. *@par Third-party framework compatibility
  349. * Incompatible with TensorFlow or Caffe.
  350. *
  351. */
  352. REG_OP(ReluV2)
  353. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_INT8, DT_INT32, DT_INT16, DT_INT64, DT_UINT8, DT_UINT16, DT_QINT8}))
  354. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE, DT_INT8, DT_INT32, DT_INT16, DT_INT64, DT_UINT8, DT_UINT16, DT_QINT8}))
  355. .OUTPUT(mask, TensorType({DT_UINT8}))
  356. .OP_END_FACTORY_REG(ReluV2)
  357. /**
  358. *@brief Performs parametric ReLU . \n
  359. *@par Inputs:
  360. * Two inputs, including:
  361. *@li x: A multi-dimensional Tensor of type float16 or float32.
  362. *@li weight: A Scalar or 1D Tensor of type float16 or float32, specifying the weight, the initial value of "a". The number of dimensions must be the same as the number of channels . \n
  363. *@par Outputs:
  364. *y: An activated Tensor. Has the same dimensions with "x" . \n
  365. *@par Third-party framework compatibility
  366. * Compatible with PyTorch and Caffe operator PReLU.
  367. */
  368. REG_OP(PRelu)
  369. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16}))
  370. .INPUT(weight, TensorType({DT_FLOAT, DT_FLOAT16}))
  371. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16}))
  372. .OP_END_FACTORY_REG(PRelu)
  373. /**
  374. *@brief Performs the backpropagation of PRelu for training scenarios . \n
  375. *@par Inputs:
  376. * Three inputs, including:
  377. *@li grads: Input gradient. Multi-dimensional Tensors are supported. The data type can be float16 or float32.
  378. *@li features: A multi-dimensional Tensor of type float16 or float32.
  379. *@li weights: A Scalar or 1D Tensor of type float16 or float32, specifying the weight. The number of dimensions must be the same as the number of channels . \n
  380. *@par Outputs:
  381. *@li dx: Reverse gradient of "features". Has the same dimensions and type as "features".
  382. *@li da: Reverse gradient of "weight". Has the same dimensions and type as "features" . \n
  383. *@par Third-party framework compatibility
  384. * Compatible with PyTorch operator PReluGrad.
  385. */
  386. REG_OP(PReluGrad)
  387. .INPUT(grads, TensorType({DT_FLOAT16, DT_FLOAT}))
  388. .INPUT(features, TensorType({DT_FLOAT16, DT_FLOAT}))
  389. .INPUT(weights, TensorType({DT_FLOAT16, DT_FLOAT}))
  390. .OUTPUT(dx, TensorType({DT_FLOAT16, DT_FLOAT}))
  391. .OUTPUT(da, TensorType({DT_FLOAT16, DT_FLOAT}))
  392. .OP_END_FACTORY_REG(PReluGrad)
  393. /**
  394. *@brief Activation function fused from sigmoid and ReLU, with soft saturation
  395. * on the left and no saturation on the right . \n
  396. *@par Inputs:
  397. *x: A float16, float32 or double, for the input data type . \n
  398. *@par Attributes:
  399. *alpha: A float32. Defines at which negative value the ELU saturates. Defaults to "1.0" . \n
  400. *@par Outputs:
  401. *y: A float16, float32 or double, for the normalized result . \n
  402. *@attention Constraints:
  403. *@li The input is of type float16 or float32 . \n
  404. *@par Multiple batches supported or not
  405. *Supported
  406. *@par Third-party framework compatibility
  407. *@li Compatible with Tensorflow's Elu operator
  408. *@li Compatible with Caffe's ELULayer operator
  409. *
  410. *@since V100R001C33
  411. */
  412. REG_OP(Elu)
  413. .INPUT(x, TensorType::FloatingDataType())
  414. .OUTPUT(y, TensorType::FloatingDataType())
  415. .ATTR(alpha, Float, 1.0)
  416. .OP_END_FACTORY_REG(Elu)
  417. /**
  418. *@brief Computes gradients for the exponential linear (Elu) operation.
  419. *
  420. *@par Inputs:
  421. *@li grads: A tensor. Must be one of the following types: float16, float32, float64.
  422. * The backpropagated gradients to the corresponding Elu operation.
  423. *@li activations: A tensor. Has the same type as "grads".
  424. * The outputs of the corresponding Elu operation.
  425. *
  426. *@par Outputs:
  427. * y: A tensor. Has the same type as "grads".
  428. *
  429. *@par Third-party framework compatibility
  430. *Compatible with the TensorFlow operator EluGrad.
  431. *
  432. */
  433. REG_OP(EluGrad)
  434. .INPUT(grads, TensorType::FloatingDataType())
  435. .INPUT(activations, TensorType::FloatingDataType())
  436. .OUTPUT(y, TensorType::FloatingDataType())
  437. .OP_END_FACTORY_REG(EluGrad)
  438. /**
  439. *@brief Computes the output as x if x > 0 and negative_slope * x if x <= 0 . \n
  440. *@par Inputs:
  441. * One input:
  442. * x: A Tensor. Must be one of the following types: float32, float16, double.
  443. *
  444. *@par Attributes:
  445. *negative_slope: A float32. Defaults to "0.0".
  446. *
  447. *@par Outputs:
  448. *y: A Tensor. Has the same type as "x".
  449. *@par Third-party framework compatibility
  450. * Compatible with the Caffe operator ReLU.
  451. */
  452. REG_OP(LeakyRelu)
  453. .INPUT(x, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE}))
  454. .ATTR(negative_slope, Float, 0.0)
  455. .OUTPUT(y, TensorType({DT_FLOAT, DT_FLOAT16, DT_DOUBLE}))
  456. .OP_END_FACTORY_REG(LeakyRelu)
  457. /**
  458. *@brief Computes the output as gradients if features > 0 and negative_slope * gradients if features <= 0 . \n
  459. *@par Inputs:
  460. * Two inputs, including:
  461. * @li gradients: A Tensor. Must be one of the following types: float16, float32, double.
  462. * @li features: A Tensor. Has the same type as "gradients" . \n
  463. *@par Attributes:
  464. *negative_slope: A float32. Defaults to "0.0" . \n
  465. *@par Outputs:
  466. *backprops: A Tensor. Has the same type as "gradients" . \n
  467. *@par Third-party framework compatibility
  468. * Compatible with the TensorFlow operator LeakyReluGrad.
  469. */
  470. REG_OP(LeakyReluGrad)
  471. .INPUT(gradients, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  472. .INPUT(features, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  473. .ATTR(negative_slope, Float, 0.0)
  474. .OUTPUT(backprops, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  475. .OP_END_FACTORY_REG(LeakyReluGrad)
  476. /**
  477. *@brief Thresholds grad each element of the input Tensor . \n
  478. *@par Inputs:
  479. * @li gradients: A Tensor shape and dtype of input gradients. Support float16, int32.
  480. * @li features: A Tensor shape and dtype of input features. Support float16, int32 . \n
  481. *@par Attributes:
  482. *threshold: A float32 scale value to threshold at . \n
  483. *@par Outputs:
  484. *backprops: A Tensor of shape and dtype of output backprops, should be same shape and type as inputs . \n
  485. *@par Restrictions:
  486. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  487. */
  488. REG_OP(ThresholdGradV2D)
  489. .INPUT(gradients, TensorType({DT_INT32, DT_FLOAT16}))
  490. .INPUT(features, TensorType({DT_INT32, DT_FLOAT16}))
  491. .OUTPUT(backprops, TensorType({DT_INT32, DT_FLOAT16}))
  492. .REQUIRED_ATTR(threshold, Float)
  493. .OP_END_FACTORY_REG(ThresholdGradV2D)
  494. /**
  495. *@brief Thresholds each element of the input Tensor y = (x > threshold) ? x : value . \n
  496. *@par Inputs:
  497. *x: A Tensor dtype of real number . \n
  498. *@par Attributes:
  499. *@li threshold: A float32 scale value to threshold at.
  500. *@li value: A float32 scale value to replace with . \n
  501. *@par Outputs:
  502. *y: A Tensor of shape and dtype of output, should be same shape and type as input . \n
  503. *@par Restrictions:
  504. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  505. */
  506. REG_OP(ThresholdV2D)
  507. .INPUT(x, TensorType::RealNumberType())
  508. .OUTPUT(y, TensorType::RealNumberType())
  509. .REQUIRED_ATTR(threshold, Float)
  510. .REQUIRED_ATTR(value, Float)
  511. .OP_END_FACTORY_REG(ThresholdV2D)
  512. /**
  513. *@brief: Computes hyperbolic tangent of "x" element-wise . \n
  514. *@par Inputs:
  515. *One input:
  516. *x: A Tensor. Must be one of the following types: float16, float32 . \n
  517. *@par Outputs:
  518. *y: A Tensor. Has the same type as "x" . \n
  519. *@par Third-party framework compatibility
  520. * Compatible with TensorFlow operator Mish.
  521. */
  522. REG_OP(Mish)
  523. .INPUT(x, TensorType({ DT_FLOAT,DT_FLOAT16 }))
  524. .OUTPUT(y, TensorType({ DT_FLOAT,DT_FLOAT16 }))
  525. .OP_END_FACTORY_REG(Mish)
  526. /**
  527. * @brief pytorch hardtanh_backward operator.
  528. *
  529. * @par Inputs:
  530. * 2 inputs, including:
  531. * @li result, minimum tensor of the linear region range,
  532. * datatype: float16/float32, format:ND/5HD.
  533. * @li grad, maximum tensor of the linear region range,
  534. * datatype:float16/float32, format:ND/5HD. \n
  535. * @par Attributes:
  536. * 2 attributes, including:
  537. * @li min_val, minimum value of the linear region range, datatype:float.
  538. * @li max_val, maximum value of the linear region range, datatype:float. \n
  539. * @par Outputs:
  540. * 1 output, including:
  541. * @li y, hardtanh_backward output tensor, datatype and format is same as
  542. * input result. \n
  543. * @attention Constraints:
  544. * This operator only supports dataType: float16/float32, format: ND/5HD. \n
  545. * @par Third-party framework compatibility
  546. * Compatible with the Pytorch operator HardtanhGrad.
  547. */
  548. REG_OP(HardtanhGrad)
  549. .INPUT(result, TensorType({ DT_FLOAT16, DT_FLOAT })) /* "First operand." */
  550. .INPUT(grad, TensorType({ DT_FLOAT16, DT_FLOAT })) /* "Second operand." */
  551. .OUTPUT(y, TensorType({ DT_FLOAT16, DT_FLOAT })) /* "Result, has same element type as two inputs" */
  552. .ATTR(min_val, Float, -1.0)
  553. .ATTR(max_val, Float, 1.0)
  554. .OP_END_FACTORY_REG(HardtanhGrad)
  555. /**
  556. * @brief Calculates the softplus loss function with attributes of beta and threshold. \n
  557. * @par Inputs:
  558. * One inputs, including:
  559. * @li x: A mutable Tensor. Must be one of the following types:
  560. * float16, float32. \n
  561. * @par Attributes:
  562. * @li beta: An optional float. Defaults to "1.0" \n
  563. * @li threshold: An optional float. Defaults to "20.0" \n
  564. * @par Outputs:
  565. * @li y: A mutable Tensor. Has the same type as "x" \n
  566. * @par Third-party framework compatibility
  567. * Compatible with the Pytorch operator Softplus.
  568. */
  569. REG_OP(SoftplusV2)
  570. .INPUT(x, TensorType({ DT_FLOAT, DT_FLOAT16 }))
  571. .OUTPUT(y, TensorType({ DT_FLOAT, DT_FLOAT16 }))
  572. .ATTR(beta, Float, 1.0)
  573. .ATTR(threshold, Float, 20.0)
  574. .OP_END_FACTORY_REG(SoftplusV2)
  575. /**
  576. * @brief Calculates the reversed outputs of the function "softplus_v2". \n
  577. * @par Inputs:
  578. * Two inputs, including:
  579. * @li input_gradients: A mutable Tensor. Must be one of the following types:
  580. * float16, float32.
  581. * @li input_features: A mutable Tensor of the same type as "input_gradients" \n
  582. * @par Attributes:
  583. * @li beta: An optional float. Defaults to "1.0" \n
  584. * @li threshold: An optional float. Defaults to "20.0" \n
  585. * @par Outputs:
  586. * @li output_backprops: A mutable Tensor. Has the same type as "input_gradients" \n
  587. * @par Third-party framework compatibility
  588. * Compatible with the Pytorch operator SoftplusGrad.
  589. */
  590. REG_OP(SoftplusV2Grad)
  591. .INPUT(input_gradients, TensorType({ DT_FLOAT, DT_FLOAT16 }))
  592. .INPUT(input_features, TensorType({ DT_FLOAT, DT_FLOAT16 }))
  593. .OUTPUT(output_backprops, TensorType({ DT_FLOAT, DT_FLOAT16 }))
  594. .ATTR(beta, Float, 1.0)
  595. .ATTR(threshold, Float, 20.0)
  596. .OP_END_FACTORY_REG(SoftplusV2Grad)
  597. /**
  598. * @brief ThresholdedRelu takes one input data (Tensor) and produces one output data (Tensor)
  599. * where the rectified linear function, y = x for x > alpha, y = 0 otherwise, is applied to the tensor elementwise.
  600. *
  601. * @par inputs
  602. * one input including:
  603. * @li x: input A Tensor. Must be one of the following types: float32, float16
  604. *
  605. * @par output
  606. * one output including:
  607. * @li y:A Tensor of the same type as x
  608. *
  609. */
  610. REG_OP(ThresholdedRelu)
  611. .INPUT(x, TensorType({DT_FLOAT16, DT_FLOAT}))
  612. .OUTPUT(y, TensorType({DT_FLOAT16, DT_FLOAT}))
  613. .ATTR(alpha, Float, 1.0)
  614. .OP_END_FACTORY_REG(ThresholdedRelu)
  615. /**
  616. * @brief Calculate the hard shrinkage function. \n
  617. * @par Inputs:
  618. * One inputs, including:
  619. * @li input_x: A tensor. Must be one of the following types:
  620. * float16, float32. \n
  621. * @par Attributes:
  622. * @li lambd: An optional float. Defaults to 0.5. \n
  623. * @par Outputs:
  624. * y: A Tensor with the same dtype and shape of input_x's. \n
  625. * @par Third-party framework compatibility
  626. * Compatible with the Pytorch operator Hardshrink. \n
  627. */
  628. REG_OP(HardShrink)
  629. .INPUT(input_x, TensorType({DT_FLOAT16, DT_FLOAT}))
  630. .OUTPUT(output_y, TensorType({DT_FLOAT16, DT_FLOAT}))
  631. .ATTR(lambd, Float, 0.5)
  632. .OP_END_FACTORY_REG(HardShrink)
  633. /**
  634. * @brief Calculate the hard sigmoid function. \n
  635. * @par Inputs:
  636. * One inputs, including:
  637. * @li input_x: A tensor. Must be one of the following types:
  638. * float16, float32, int32. \n
  639. * @par Attributes:
  640. * @li alpha: An optional float. Defaults to 0.16666666. \n
  641. * @li beta: An optional float. Defaults to 0.5. \n
  642. * @par Outputs:
  643. * y: A Tensor with the same dtype and shape of input_x's. \n
  644. * @par Third-party framework compatibility
  645. * Compatible with the Pytorch operator Hardsigmoid. \n
  646. */
  647. REG_OP(HardSigmoid)
  648. .INPUT(input_x, TensorType({DT_FLOAT, DT_FLOAT16, DT_INT32}))
  649. .OUTPUT(output_y, TensorType({DT_FLOAT, DT_FLOAT16}))
  650. .ATTR(alpha, Float, 0.16666666)
  651. .ATTR(beta, Float, 0.5)
  652. .OP_END_FACTORY_REG(HardSigmoid)
  653. /**
  654. * @brief Calculate the soft shrinkage function. \n
  655. * @par Inputs:
  656. * One inputs, including:
  657. * @li input_x: A tensor. Must be one of the following types:
  658. * float16, float32. \n
  659. * @par Attributes:
  660. * @li lambd: An optional float. Defaults to 0.5. \n
  661. * @par Outputs:
  662. * y: A Tensor with the same dtype and shape of input_x's. \n
  663. * @par Third-party framework compatibility
  664. * Compatible with the Pytorch operator Softshrink. \n
  665. */
  666. REG_OP(SoftShrink)
  667. .INPUT(input_x, TensorType({DT_FLOAT16, DT_FLOAT}))
  668. .OUTPUT(output_y, TensorType({DT_FLOAT16, DT_FLOAT}))
  669. .ATTR(lambd, Float, 0.5)
  670. .OP_END_FACTORY_REG(SoftShrink)
  671. /**
  672. * @brief Calculate the reversed outputs of the function "soft_shrink". \n
  673. * @par Inputs:
  674. * Two inputs, including:
  675. * @li input_grad: A tensor. Must be one of the following types:
  676. * float16, float32. \n
  677. * @li input_x: A tensor of the same dtype as "input_grad". \n
  678. * @par Attributes:
  679. * @li lambd: An optional float. Defaults to 0.5. \n
  680. * @par Outputs:
  681. * y: A Tensor of the same dtype and shape as "input_graxd". \n
  682. * @par Third-party framework compatibility
  683. * Compatible with the Pytorch operator SoftShrinkGrad. \n
  684. */
  685. REG_OP(SoftShrinkGrad)
  686. .INPUT(input_grad, TensorType({DT_FLOAT16, DT_FLOAT}))
  687. .INPUT(input_x, TensorType({DT_FLOAT16, DT_FLOAT}))
  688. .OUTPUT(output_y, TensorType({DT_FLOAT16, DT_FLOAT}))
  689. .ATTR(lambd, Float, 0.5)
  690. .OP_END_FACTORY_REG(SoftShrinkGrad)
  691. } // namespace ge
  692. #endif // OPS_BUILT_IN_OP_PROTO_INC_NONLINEAR_FUC_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示