You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ge_executor.h 9.7 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef INC_FRAMEWORK_EXECUTOR_GE_EXECUTOR_H_
  17. #define INC_FRAMEWORK_EXECUTOR_GE_EXECUTOR_H_
  18. #include <memory>
  19. #include <string>
  20. #include <vector>
  21. #include "common/dynamic_aipp.h"
  22. #include "common/ge_inner_error_codes.h"
  23. #include "common/ge_types.h"
  24. #include "common/types.h"
  25. #include "graph/tensor.h"
  26. #include "runtime/base.h"
  27. namespace ge {
  28. class ModelListenerAdapter;
  29. class SingleOp;
  30. struct RunModelData {
  31. uint32_t index; // Data index
  32. uint32_t modelId;
  33. std::vector<DataBuffer> blobs; // All input/output data buffer
  34. uint32_t timestamp; // Data creation time
  35. uint32_t timeout; // Processing timeout
  36. uint64_t request_id = 0; // Request ID
  37. uint64_t dynamic_batch_size = 0; // Dynamic batch size scene, set dynamic size, not supported by default:0
  38. uint64_t dynamic_image_height = 0; // Dynamic image size scene, set image height, not supported by default:0
  39. uint64_t dynamic_image_width = 0; // Dynamic image size scene, set image width, not supported by default:0
  40. };
  41. class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeExecutor {
  42. public:
  43. GeExecutor();
  44. ~GeExecutor() = default;
  45. ge::Status Initialize();
  46. ge::Status Finalize();
  47. // Load model
  48. ge::Status LoadModelOffline(uint32_t &model_id, const std::string &path, const std::string &key, int32_t priority,
  49. std::shared_ptr<ge::ModelListener> listener);
  50. ge::Status UnloadModel(uint32_t modelId);
  51. ge::Status RunModel(const ge::RunModelData &input_data, ge::RunModelData &output_data);
  52. // Get input and output descriptor
  53. ge::Status GetModelDescInfo(uint32_t model_id, std::vector<ge::TensorDesc> &input_desc,
  54. std::vector<ge::TensorDesc> &output_desc, bool new_model_desc = false);
  55. ///
  56. /// @ingroup ge
  57. /// @brief Set dynamic batch size
  58. /// @param [in] model_id: model id allocate from manager
  59. /// @param [in] dynamic_input_addr: dynamic input addr created by user
  60. /// @param [in] length: length of dynamic input addr
  61. /// @param [in] batch_size: batch size entered by user in dynamic multi-batch scenario
  62. /// @return execute result
  63. ///
  64. ge::Status SetDynamicBatchSize(uint32_t model_id, void *dynamic_input_addr, uint64_t length, uint64_t batch_size);
  65. ///
  66. /// @ingroup ge
  67. /// @brief Set dynamic image info
  68. /// @param [in] model_id: model id allocate from manager
  69. /// @param [in] dynamic_input_addr: dynamic input addr created by user
  70. /// @param [in] length: length of dynamic input addr
  71. /// @param [in] image_height: image height entered by user in dynamic multi-resolution scenario
  72. /// @param [in] image_width: image width entered by user in dynamic multi-resolution scenario
  73. /// @return execute result
  74. ///
  75. ge::Status SetDynamicImageSize(uint32_t model_id, void *dynamic_input_addr, uint64_t length, uint64_t image_height,
  76. uint64_t image_width);
  77. ///
  78. /// @ingroup ge
  79. /// @brief Get dynamic batch_info
  80. /// @param [in] model_id
  81. /// @param [out] batch_info
  82. /// @return execute result
  83. ///
  84. ge::Status GetDynamicBatchInfo(uint32_t model_id, std::vector<std::vector<int64_t>> &batch_info);
  85. ge::Status GetCurShape(const uint32_t model_id, std::vector<int64_t> &batch_info);
  86. ///
  87. /// @ingroup ge
  88. /// @brief Set dynamic image info
  89. /// @param [in] model_id: model id allocate from manager
  90. /// @param [in] dynamic_input_addr: dynamic input addr created by user
  91. /// @param [in] length: length of dynamic input addr
  92. /// @param [in] aippBatchPara: kAippDynamicBatchPara vector by user in dynamic aipp
  93. /// @param [in] aippParms: kAippDynamicPara by user in dynamic aipp
  94. /// @return execute result
  95. ///
  96. ge::Status SetDynamicAippData(uint32_t model_id, void *dynamic_input_addr, uint64_t length,
  97. const std::vector<kAippDynamicBatchPara> &aippBatchPara,
  98. const kAippDynamicPara &aippParms);
  99. ge::Status GetAIPPInfo(uint32_t model_id, uint32_t index, AippConfigInfo &aipp_info);
  100. ge::Status GetModelAttr(uint32_t model_id, std::vector<std::string> &dynamic_output_shape_info);
  101. ge::Status GetModelDescInfoForZeroCopy(uint32_t model_id, std::vector<ge::TensorDesc> &input_desc,
  102. std::vector<ge::TensorDesc> &output_desc);
  103. ge::Status LoadModel(uint32_t &model_id, const ge::ModelData &model_data,
  104. std::shared_ptr<ge::ModelListener> listener);
  105. ge::Status CommandHandle(const ge::Command &command);
  106. ///
  107. /// @ingroup ge
  108. /// @brief Query model memory consuming interface
  109. /// @param [in] model_id Offline model ID
  110. /// @param [out] max_size Memory size
  111. /// @return SUCCESS
  112. /// @return FAILED
  113. ///
  114. ge::Status GetMaxUsedMemory(uint32_t model_id, uint32_t &max_size);
  115. ///
  116. /// @ingroup ge
  117. /// @brief Load data from model file to memory
  118. /// @param [in] const std::string &path: Offline model file path
  119. /// @param [out] ModelData &model_data: Offline model memory data
  120. /// @return SUCCESS handle successfully / others handle failed
  121. ///
  122. ge::Status LoadDataFromFile(const std::string &path, ge::ModelData &model_data);
  123. ///
  124. /// @ingroup ge
  125. /// @brief Load model from offline model memory data
  126. /// @param [in] ModelData &model_data: Offline model data
  127. /// @param [in] void *dev_ptr: Input/Output memory address
  128. /// @param [in] size_t mem_size: Input/Output memory length
  129. /// @param [in] void *weight_ptr: Weight memory address
  130. /// @param [in] size_t weight_size: Weight memory length
  131. /// @param [out] uint32_t &model_id: Corresponding identification after model loading
  132. /// @return SUCCESS handle successfully / others handle failed
  133. ///
  134. ge::Status LoadModelFromData(uint32_t &model_id, const ge::ModelData &model_data, void *dev_ptr, size_t mem_size,
  135. void *weight_ptr, size_t weight_size);
  136. ///
  137. /// @ingroup ge
  138. /// @brief Load task list from ModelData with queue.
  139. /// @param [out] model_id: model id allocate from manager.
  140. /// @param [in] model_data: Model data load from offline model.
  141. /// @param [in] input_queue_ids: input queue ids create from user.
  142. /// @param [in] output_queue_ids: input queue ids create from user.
  143. /// @return: 0 for success / others for fail
  144. ///
  145. ge::Status LoadModelWithQ(uint32_t &model_id, const ge::ModelData &model_data,
  146. const std::vector<uint32_t> &input_queue_ids,
  147. const std::vector<uint32_t> &output_queue_ids);
  148. ///
  149. /// @ingroup ge
  150. /// @brief Synchronous execution of offline model(Do not create thread)
  151. /// @param [in] uint32_t model_id: Model ID to execute
  152. /// @param [in] void* stream: stream to execute
  153. /// @param [in] bool async_mode: is asynchronize mode.
  154. /// @param [in] const domi::InputData *input_data: Model input data
  155. /// @param [out] domi::OutputData *output_data: Model output data
  156. /// @return SUCCESS handle successfully / others handle failed
  157. ///
  158. ge::Status ExecModel(uint32_t model_id, void *stream, const ge::RunModelData &input_data,
  159. ge::RunModelData &output_data, bool async_mode = false);
  160. ///
  161. /// @ingroup ge
  162. /// @brief Get weight memory size from model file
  163. /// @param [in] const std::string &path: Offline model file path
  164. /// @param [out] size_t &mem_size Execution memory size
  165. /// @param [out] size_t &weight_size Weight memory space size
  166. /// @return SUCCESS handle successfully / others handle failed
  167. ///
  168. ge::Status GetMemAndWeightSize(const std::string &path, size_t &mem_size, size_t &weight_size);
  169. ///
  170. /// @ingroup ge
  171. /// @brief Get weight memory size from model file
  172. /// @param [in] const void *model_data Offline model buffer
  173. /// @param [in] size_t model_size Offline model buffer length
  174. /// @param [out] size_t &mem_size Execution memory size
  175. /// @param [out] size_t &weight_size Weight memory space size
  176. /// @return SUCCESS handle successfully / others handle failed
  177. ///
  178. ge::Status GetMemAndWeightSize(const void *model_data, size_t model_size, size_t &mem_size, size_t &weight_size);
  179. static ge::Status LoadSingleOp(const std::string &modelName, const ge::ModelData &modelData, void *stream,
  180. SingleOp **single_op);
  181. static ge::Status ExecuteAsync(SingleOp *executor, const std::vector<DataBuffer> &inputs,
  182. std::vector<DataBuffer> &outputs);
  183. static ge::Status ReleaseSingleOpResource(void *stream);
  184. ge::Status GetBatchInfoSize(uint32_t model_id, size_t &shape_count);
  185. ge::Status GetOrigInputInfo(uint32_t model_id, uint32_t index, OriginInputInfo &orig_input_info);
  186. ge::Status GetAllAippInputOutputDims(uint32_t model_id, uint32_t index, std::vector<InputOutputDims> &input_dims,
  187. std::vector<InputOutputDims> &output_dims);
  188. private:
  189. static bool isInit_;
  190. };
  191. ge::Status ModelInfoParser(const ge::ModelData &model, ge::ModelInfo &model_info);
  192. } // namespace ge
  193. #endif // INC_FRAMEWORK_EXECUTOR_GE_EXECUTOR_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示