You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

deep_md.h 8.4 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. /**
  2. * CCopyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file deep_md.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_DEEP_MD_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_DEEP_MD_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. * @brief Calculate TabulateFusion. \n
  26. *
  27. * @par Inputs:
  28. * Five inputs, including:
  29. * @li table: A Tensor. Must be one of the following types: float16, float32, float64.
  30. * @li table_info: A Tensor. Must be one of the following types: float16, float32, float64.
  31. * @li em_x: A Tensor. Must be one of the following types: float16, float32, float64.
  32. * @li em: A Tensor. Must be one of the following types: float16, float32, float64. \n
  33. *
  34. * @par Outputs:
  35. * descriptor: A Tensor. Must be one of the following types: float16, float32, float64. \n
  36. *
  37. * @par Attributes:
  38. * Three attributes, including:
  39. * @li last_layer_size: int value.
  40. * @li split_count: int value.
  41. * @li split_index: int value. \n
  42. *
  43. * @par Restrictions:
  44. * Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  45. */
  46. REG_OP(TabulateFusion)
  47. .INPUT(table, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  48. .INPUT(table_info, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  49. .INPUT(em_x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  50. .INPUT(em, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  51. .OUTPUT(descriptor, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  52. .REQUIRED_ATTR(last_layer_size, Int)
  53. .ATTR(split_count, Int, 1)
  54. .ATTR(split_index, Int, 0)
  55. .OP_END_FACTORY_REG(TabulateFusion)
  56. /**
  57. * @brief Calculate ProdEnvMatA. \n
  58. *
  59. * @par Inputs:
  60. * @li coord: A Tensor. Must be one of the following types: float32, float64.
  61. * @li type: A Tensor. Must be one of the following types: int32.
  62. * @li natoms: A Tensor. Must be one of the following types: int32.
  63. * @li box: A Tensor. Must be one of the following types: float32, float64.
  64. * @li mesh: A Tensor. Must be one of the following types: int32.
  65. * @li davg: A Tensor. Must be one of the following types: float32, float64.
  66. * @li dstd: A Tensor. Must be one of the following types: float32, float64.
  67. *
  68. * @par Outputs:
  69. * descrpt: A Tensor. Must be one of the following types: float32, float64.
  70. * descrpt_deriv: A Tensor. Must be one of the following types: float32, float64.
  71. * rij: A Tensor. Must be one of the following types: float32, float64.
  72. * nlist: A Tensor. Must be one of the following types: int32. \n
  73. *
  74. * @par Attributes:
  75. * @li rcut_a: A Float.
  76. * @li rcut_r: A Float.
  77. * @li rcut_r_smth: A Float.
  78. * @li sel_a: A ListInt.
  79. * @li split_count: A Int.
  80. * @li split_index: A Int.\n
  81. *
  82. */
  83. REG_OP(ProdEnvMatA)
  84. .INPUT(coord, TensorType({DT_FLOAT, DT_DOUBLE}))
  85. .INPUT(type, TensorType({DT_INT32}))
  86. .INPUT(natoms, TensorType({DT_INT32}))
  87. .INPUT(box, TensorType({DT_FLOAT, DT_DOUBLE}))
  88. .INPUT(mesh, TensorType({DT_INT32}))
  89. .INPUT(davg, TensorType({DT_FLOAT, DT_DOUBLE}))
  90. .INPUT(dstd, TensorType({DT_FLOAT, DT_DOUBLE}))
  91. .OUTPUT(descrpt, TensorType({DT_FLOAT, DT_DOUBLE}))
  92. .OUTPUT(descrpt_deriv, TensorType({DT_FLOAT, DT_DOUBLE}))
  93. .OUTPUT(rij, TensorType({DT_FLOAT, DT_DOUBLE}))
  94. .OUTPUT(nlist, TensorType({DT_INT32}))
  95. .ATTR(rcut_a, Float, 1.0)
  96. .ATTR(rcut_r, Float, 1.0)
  97. .ATTR(rcut_r_smth, Float, 1.0)
  98. .ATTR(sel_a, ListInt, {})
  99. .ATTR(sel_r, ListInt, {})
  100. .ATTR(split_count, Int, 1)
  101. .ATTR(split_index, Int, 0)
  102. .OP_END_FACTORY_REG(ProdEnvMatA)
  103. /**
  104. * @brief Calculate ProdForceSeA. \n
  105. *
  106. * @par Inputs:
  107. * Five inputs, including:
  108. * @li net_deriv: A Tensor. Must be one of the following types: float16, float32, float64.
  109. * @li in_deriv: A Tensor. Must be one of the following types: float16, float32, float64.
  110. * @li nlist: A Tensor. dtype is int32.
  111. * @li natoms: A Tensor. dtype is int32. \n
  112. *
  113. * @par Outputs:
  114. * atom_force: A Tensor. Must be one of the following types: float16, float32, float64. \n
  115. *
  116. * @par Attributes:
  117. * Two attributes, including:
  118. * @li n_a_sel: A Scalar.
  119. * @li n_r_sel: A Scalar. \n
  120. *
  121. * @par Restrictions:
  122. * Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  123. */
  124. REG_OP(ProdForceSeA)
  125. .INPUT(net_deriv, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  126. .INPUT(in_deriv, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  127. .INPUT(nlist, TensorType({DT_INT32}))
  128. .INPUT(natoms, TensorType({DT_INT32}))
  129. .OUTPUT(atom_force, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  130. .REQUIRED_ATTR(n_a_sel, Int)
  131. .REQUIRED_ATTR(n_r_sel, Int)
  132. .ATTR(split_count, Int, 1)
  133. .ATTR(split_index, Int, 0)
  134. .OP_END_FACTORY_REG(ProdForceSeA)
  135. /**
  136. * @brief Calculate ProdVirialSeA. \n
  137. *
  138. * @par Inputs:
  139. * Five inputs, including:
  140. * @li net_deriv: A Tensor. Must be one of the following types: float16, float32, float64.
  141. * @li in_deriv: A Tensor. Must be one of the following types: float16, float32, float64.
  142. * @li rij: A Tensor. Must be one of the following types: float16, float32, float64.
  143. * @li nlist: A Tensor. dtype is int32.
  144. * @li natoms: A Tensor. dtype is int32. \n
  145. *
  146. * @par Outputs:
  147. * Two outputs, including:
  148. * @li virial: A Tensor. Must be one of the following types: float16, float32, float64.
  149. * @li atom_virial: A Tensor. Must be one of the following types: float16, float32, float64. \n
  150. *
  151. * @par Attributes:
  152. * Two attributes, including:
  153. * @li n_a_sel: Int value.
  154. * @li n_r_sel: Int value.
  155. * @li split_count: Int value.
  156. * @li split_index: Int value. \n
  157. */
  158. REG_OP(ProdVirialSeA)
  159. .INPUT(net_deriv, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  160. .INPUT(in_deriv, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  161. .INPUT(rij, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  162. .INPUT(nlist, TensorType({DT_INT32}))
  163. .INPUT(natoms, TensorType({DT_INT32}))
  164. .OUTPUT(virial, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  165. .OUTPUT(atom_virial, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  166. .REQUIRED_ATTR(n_a_sel, Int)
  167. .REQUIRED_ATTR(n_r_sel, Int)
  168. .ATTR(split_count, Int, 1)
  169. .ATTR(split_index, Int, 0)
  170. .OP_END_FACTORY_REG(ProdVirialSeA)
  171. /**
  172. * @brief Calculate TabulateFusionGrad. \n
  173. *
  174. * @par Inputs:
  175. * Five inputs, including:
  176. * @li table: A Tensor. Must be one of the following types: float16, float32, float64.
  177. * @li table_info: A Tensor. Must be one of the following types: float16, float32, float64.
  178. * @li em_x: A Tensor. Must be one of the following types: float16, float32, float64.
  179. * @li em: A Tensor. Must be one of the following types: float16, float32, float64.
  180. * @li dy: A Tensor. Must be one of the following types: float16, float32, float64.
  181. * @li descriptor: A Tensor. Must be one of the following types: float16, float32, float64. \n
  182. *
  183. * @par Outputs:
  184. * @li dy_dem_x: A Tensor. Must be one of the following types: float16, float32, float64.
  185. * @li dy_dem: A Tensor. Must be one of the following types: float16, float32, float64. \n
  186. *
  187. * @par Attributes:
  188. * Two attributes, including:
  189. * @li split_count: A Scalar.
  190. * @li split_index: A Scalar. \n
  191. */
  192. REG_OP(TabulateFusionGrad)
  193. .INPUT(table, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  194. .INPUT(table_info, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  195. .INPUT(em_x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  196. .INPUT(em, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  197. .INPUT(dy, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  198. .INPUT(descriptor, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  199. .OUTPUT(dy_dem_x, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  200. .OUTPUT(dy_dem, TensorType({DT_FLOAT16, DT_FLOAT, DT_DOUBLE}))
  201. .ATTR(split_count, Int, 1)
  202. .ATTR(split_index, Int, 0)
  203. .OP_END_FACTORY_REG(TabulateFusionGrad)
  204. } // namespace ge
  205. #endif // OPS_BUILT_IN_OP_PROTO_INC_DEEP_MD_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示