You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

model_utils.h 3.1 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef GE_GRAPH_LOAD_NEW_MODEL_MANAGER_MODEL_UTILS_H_
  17. #define GE_GRAPH_LOAD_NEW_MODEL_MANAGER_MODEL_UTILS_H_
  18. #include <vector>
  19. #include "common/ge_inner_error_codes.h"
  20. #include "common/types.h"
  21. #include "graph/load/new_model_manager/task_info/task_info.h"
  22. #include "graph/op_desc.h"
  23. #include "graph/utils/tensor_adapter.h"
  24. using std::vector;
  25. namespace ge {
  26. class ModelUtils {
  27. public:
  28. ModelUtils() = default;
  29. ~ModelUtils() = default;
  30. ///
  31. /// @ingroup ge
  32. /// @brief Get input size.
  33. /// @return vector<uint32_t>
  34. ///
  35. static vector<int64_t> GetInputSize(ConstOpDescPtr op_desc);
  36. ///
  37. /// @ingroup ge
  38. /// @brief Get output size.
  39. /// @return vector<uint32_t>
  40. ///
  41. static vector<int64_t> GetOutputSize(ConstOpDescPtr op_desc);
  42. ///
  43. /// @ingroup ge
  44. /// @brief Get workspace size.
  45. /// @return vector<uint32_t>
  46. ///
  47. static vector<int64_t> GetWorkspaceSize(ConstOpDescPtr op_desc);
  48. ///
  49. /// @ingroup ge
  50. /// @brief Get weight size.
  51. /// @return vector<uint32_t>
  52. ///
  53. static vector<int64_t> GetWeightSize(ConstOpDescPtr op_desc);
  54. ///
  55. /// @ingroup ge
  56. /// @brief Get weights.
  57. /// @return vector<ConstGeTensorPtr>
  58. ///
  59. static vector<ConstGeTensorPtr> GetWeights(ConstOpDescPtr op_desc);
  60. ///
  61. /// @ingroup ge
  62. /// @brief Get AiCpuOp Input descriptor.
  63. /// @return vector<::tagCcAICPUTensor>
  64. ///
  65. static vector<::tagCcAICPUTensor> GetInputDescs(ConstOpDescPtr op_desc);
  66. ///
  67. /// @ingroup ge
  68. /// @brief Get AiCpuOp Output descriptor.
  69. /// @return vector<::tagCcAICPUTensor>
  70. ///
  71. static vector<::tagCcAICPUTensor> GetOutputDescs(ConstOpDescPtr op_desc);
  72. ///
  73. /// @ingroup ge
  74. /// @brief Get input data address.
  75. /// @return vector<void*>
  76. ///
  77. static vector<void *> GetInputDataAddrs(const RuntimeParam &model_param, ConstOpDescPtr op_desc);
  78. ///
  79. /// @ingroup ge
  80. /// @brief Get output data address.
  81. /// @return vector<void*>
  82. ///
  83. static vector<void *> GetOutputDataAddrs(const RuntimeParam &model_param, ConstOpDescPtr op_desc);
  84. ///
  85. /// @ingroup ge
  86. /// @brief Get workspace data address.
  87. /// @return vector<void*>
  88. ///
  89. static vector<void *> GetWorkspaceDataAddrs(const RuntimeParam &model_param, ConstOpDescPtr op_desc);
  90. ///
  91. /// @ingroup ge
  92. /// @brief Get memory runtime base.
  93. /// @return Status
  94. ///
  95. static Status GetRtAddress(const RuntimeParam &model_param, uintptr_t logic_addr, uint8_t *&mem_addr);
  96. };
  97. } // namespace ge
  98. #endif // GE_GRAPH_LOAD_NEW_MODEL_MANAGER_MODEL_UTILS_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示