You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

npu_loss_scale_ops.h 3.3 kB

5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. /**
  2. * Copyright 2019 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. /*!
  17. * \file npu_loss_scale_ops.h
  18. * \brief
  19. */
  20. #ifndef OPS_BUILT_IN_OP_PROTO_INC_NPU_LOSS_SCALE_OPS_H_
  21. #define OPS_BUILT_IN_OP_PROTO_INC_NPU_LOSS_SCALE_OPS_H_
  22. #include "graph/operator_reg.h"
  23. namespace ge {
  24. /**
  25. *@brief Computes NPU alloc float status operator function . \n
  26. *@par Outputs:
  27. *data: A Tensor of data value. Must be float32.
  28. *@par Restrictions:
  29. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  30. */
  31. REG_OP(NPUAllocFloatStatusOperator)
  32. .OUTPUT(data, TensorType({DT_FLOAT}))
  33. .OP_END_FACTORY_REG(NPUAllocFloatStatusOperator)
  34. /**
  35. *@brief Computes NPU clear float status operator function . \n
  36. *@par Inputs:
  37. *addr: A Tensor of data memory address. Must be float32 . \n
  38. *@par Outputs:
  39. *data: A Tensor of data value. Must be float32.
  40. *@par Restrictions:
  41. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  42. */
  43. REG_OP(NPUClearFloatStatusOperator)
  44. .INPUT(addr, TensorType{DT_FLOAT})
  45. .OUTPUT(data, TensorType({DT_FLOAT}))
  46. .OP_END_FACTORY_REG(NPUClearFloatStatusOperator)
  47. /**
  48. *@brief Computes NPU get float status operator function . \n
  49. *@par Inputs:
  50. *addr: A Tensor of data memory address. Must be float32 . \n
  51. *@par Outputs:
  52. *data: A Tensor of data value. Must be float32.
  53. *@par Restrictions:
  54. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  55. */
  56. REG_OP(NPUGetFloatStatusOperator)
  57. .INPUT(addr, TensorType{DT_FLOAT})
  58. .OUTPUT(data, TensorType({DT_FLOAT}))
  59. .OP_END_FACTORY_REG(NPUGetFloatStatusOperator)
  60. /**
  61. *@brief Produces a variable with 0 in memory . \n
  62. *@par Outputs:
  63. *y: A Tensor of type int32, output eight numbers with a value of zero.
  64. *@par Restrictions:
  65. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  66. */
  67. REG_OP(NPUAllocFloatStatus)
  68. .OUTPUT(data, TensorType({DT_FLOAT}))
  69. .OP_END_FACTORY_REG(NPUAllocFloatStatus)
  70. /**
  71. *@brief Set the value of address 0x40000 to 0 in each core . \n
  72. *@par Inputs:
  73. *addr: A tensor of type float32 . \n
  74. *@par Outputs:
  75. *data: A Tensor of type float32.
  76. *@par Restrictions:
  77. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  78. */
  79. REG_OP(NPUClearFloatStatus)
  80. .INPUT(addr, TensorType{DT_FLOAT})
  81. .OUTPUT(data, TensorType({DT_FLOAT}))
  82. .OP_END_FACTORY_REG(NPUClearFloatStatus)
  83. /**
  84. *@brief Get the value of address 0x40000 . \n
  85. *@par Inputs:
  86. *addr: A tensor of type float32 . \n
  87. *@par Outputs:
  88. *data: A Tensor of type float32.
  89. *@par Restrictions:
  90. *Warning: THIS FUNCTION IS EXPERIMENTAL. Please do not use.
  91. */
  92. REG_OP(NPUGetFloatStatus)
  93. .INPUT(addr, TensorType{DT_FLOAT})
  94. .OUTPUT(data, TensorType({DT_FLOAT}))
  95. .OP_END_FACTORY_REG(NPUGetFloatStatus)
  96. } // namespace ge
  97. #endif // OPS_BUILT_IN_OP_PROTO_INC_NPU_LOSS_SCALE_OPS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示