You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

ge_executor.h 15 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
3 years ago
5 years ago
5 years ago
3 years ago
3 years ago
3 years ago
3 years ago
4 years ago
3 years ago
4 years ago
5 years ago
3 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
3 years ago
5 years ago
5 years ago
5 years ago
3 years ago
5 years ago
3 years ago
5 years ago
5 years ago
3 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. /**
  2. * Copyright (c) Huawei Technologies Co., Ltd. 2021. All rights reserved.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #ifndef INC_FRAMEWORK_EXECUTOR_GE_EXECUTOR_H_
  17. #define INC_FRAMEWORK_EXECUTOR_GE_EXECUTOR_H_
  18. #include <memory>
  19. #include <string>
  20. #include <vector>
  21. #include "common/dynamic_aipp.h"
  22. #include "framework/common/ge_inner_error_codes.h"
  23. #include "framework/common/ge_types.h"
  24. #include "framework/common/types.h"
  25. #include "graph/tensor.h"
  26. #include "graph/ge_tensor.h"
  27. namespace ge {
  28. class SingleOp;
  29. class DynamicSingleOp;
  30. class GeRootModel;
  31. struct RunModelData {
  32. uint32_t index; // Data index
  33. uint32_t modelId;
  34. std::vector<DataBuffer> blobs; // All input/output data buffer
  35. uint32_t timestamp; // Data creation time
  36. uint32_t timeout; // Processing timeout
  37. uint64_t request_id = 0UL; // Request ID
  38. uint64_t dynamic_batch_size = 0UL; // Dynamic batch size scene, set dynamic size, not supported by default:0
  39. uint64_t dynamic_image_height = 0UL; // Dynamic image size scene, set image height, not supported by default:0
  40. uint64_t dynamic_image_width = 0UL; // Dynamic image size scene, set image width, not supported by default:0
  41. std::vector<uint64_t> dynamic_dims; // Dynamic dims scene, set dynamic dims, not supported by default:empty
  42. };
  43. class GE_FUNC_VISIBILITY GeExecutor {
  44. public:
  45. GeExecutor();
  46. ~GeExecutor() = default;
  47. Status Initialize();
  48. Status Finalize();
  49. ///
  50. /// @ingroup ge
  51. /// @brief Initialize global execute environment.
  52. /// @param [in] options: environment variables.
  53. /// @return init result
  54. ///
  55. static Status Initialize(const std::map<std::string, std::string> &options);
  56. ///
  57. /// @ingroup ge
  58. /// @brief Finalize global execute environment.
  59. /// @return execute result
  60. ///
  61. static Status FinalizeEx();
  62. Status UnloadModel(const uint32_t model_id);
  63. // Get input and output descriptor
  64. Status GetModelDescInfo(const uint32_t model_id, std::vector<TensorDesc> &input_desc,
  65. std::vector<TensorDesc> &output_desc, const bool new_model_desc = false);
  66. ///
  67. /// @ingroup ge
  68. /// @brief Set dynamic batch size
  69. /// @param [in] model_id: model id allocate from manager
  70. /// @param [in] dynamic_input_addr: dynamic input addr created by user
  71. /// @param [in] length: length of dynamic input addr
  72. /// @param [in] batch_size: batch size entered by user in dynamic multi-batch scenario
  73. /// @return execute result
  74. ///
  75. Status SetDynamicBatchSize(const uint32_t model_id, void *const dynamic_input_addr, const uint64_t length,
  76. const uint64_t batch_size);
  77. ///
  78. /// @ingroup ge
  79. /// @brief Set dynamic image info
  80. /// @param [in] model_id: model id allocate from manager
  81. /// @param [in] dynamic_input_addr: dynamic input addr created by user
  82. /// @param [in] length: length of dynamic input addr
  83. /// @param [in] image_height: image height entered by user in dynamic multi-resolution scenario
  84. /// @param [in] image_width: image width entered by user in dynamic multi-resolution scenario
  85. /// @return execute result
  86. ///
  87. Status SetDynamicImageSize(const uint32_t model_id, void *const dynamic_input_addr, const uint64_t length,
  88. const uint64_t image_height, const uint64_t image_width);
  89. ///
  90. /// @ingroup ge
  91. /// @brief Set dynamic dims info
  92. /// @param [in] model_id: model id allocate from manager
  93. /// @param [in] dynamic_input_addr: dynamic input addr created by user
  94. /// @param [in] length: length of dynamic input addr
  95. /// @param [in] dynamic_dim_num: number of dynamic dimension
  96. /// @param [in] dynamic_dims: array of dynamic dimensions
  97. /// @return execute result
  98. ///
  99. Status SetDynamicDims(const uint32_t model_id, void *const dynamic_input_addr, const uint64_t length,
  100. const std::vector<uint64_t> &dynamic_dims);
  101. ///
  102. /// @ingroup ge
  103. /// @brief Get current dynamic dims info by combined dims
  104. /// @param [in] model_id: model id allocate from manager
  105. /// @param [in] dynamic_dims: cur gear dynamic dims value
  106. /// @param [out] cur_dynamic_dims: current dynamic dims
  107. /// @return execute result
  108. ///
  109. Status GetCurDynamicDims(const uint32_t model_id, const std::vector<uint64_t> &dynamic_dims,
  110. std::vector<uint64_t> &cur_dynamic_dims);
  111. ///
  112. /// @ingroup ge
  113. /// @brief Get dynamic batch_info
  114. /// @param [in] model_id
  115. /// @param [out] batch_info
  116. /// @param [out] dynamic_type
  117. /// @return execute result
  118. ///
  119. Status GetDynamicBatchInfo(const uint32_t model_id, std::vector<std::vector<int64_t>> &batch_info,
  120. int32_t &dynamic_type);
  121. ///
  122. /// @ingroup ge
  123. /// @brief Get combined dynamic dims info
  124. /// @param [in] model_id
  125. /// @param [out] batch_info
  126. /// @return execute result
  127. ///
  128. Status GetCombinedDynamicDims(const uint32_t model_id, std::vector<std::vector<int64_t>> &batch_info);
  129. ///
  130. /// @ingroup ge
  131. /// @brief Get user designeate shape order
  132. /// @param [in] model_id
  133. /// @param [out] user_designate_shape_order
  134. /// @return execute result
  135. ///
  136. Status GetUserDesignateShapeOrder(const uint32_t model_id, std::vector<std::string> &user_designate_shape_order);
  137. Status GetCurShape(const uint32_t model_id, std::vector<int64_t> &batch_info, int32_t &dynamic_type);
  138. ///
  139. /// @ingroup ge
  140. /// @brief Set dynamic image info
  141. /// @param [in] model_id: model id allocate from manager
  142. /// @param [in] dynamic_input_addr: dynamic input addr created by user
  143. /// @param [in] length: length of dynamic input addr
  144. /// @param [in] aippBatchPara: kAippDynamicBatchPara vector by user in dynamic aipp
  145. /// @param [in] aippParms: kAippDynamicPara by user in dynamic aipp
  146. /// @return execute result
  147. ///
  148. Status SetDynamicAippData(const uint32_t model_id, void *const dynamic_input_addr, const uint64_t length,
  149. const std::vector<kAippDynamicBatchPara> &aipp_batch_para,
  150. const kAippDynamicPara &aipp_parms);
  151. Status GetAIPPInfo(const uint32_t model_id, const uint32_t index, AippConfigInfo &aipp_info);
  152. Status GetOpAttr(const uint32_t model_id, const std::string &op_name, const std::string &attr_name,
  153. std::string &attr_value);
  154. Status GetModelAttr(const uint32_t model_id, std::vector<std::string> &dynamic_output_shape_info);
  155. Status GetAippType(const uint32_t model_id, const uint32_t index, InputAippType &type, size_t &aipp_index);
  156. Status CommandHandle(const Command &command) const;
  157. Status SetDump(const DumpConfig &dump_config);
  158. ///
  159. /// @ingroup ge
  160. /// @brief Query model memory consuming interface
  161. /// @param [in] model_id Offline model ID
  162. /// @param [out] max_size Memory size
  163. /// @return SUCCESS
  164. /// @return FAILED
  165. ///
  166. Status GetMaxUsedMemory(const uint32_t model_id, uint32_t &max_size);
  167. ///
  168. /// @ingroup ge
  169. /// @brief Load data from model file to memory
  170. /// @param [in] const std::string &path: Offline model file path
  171. /// @param [out] ModelData &model_data: Offline model memory data
  172. /// @return SUCCESS handle successfully / others handle failed
  173. ///
  174. Status LoadDataFromFile(const std::string &path, ModelData &model_data);
  175. ///
  176. /// @ingroup ge
  177. /// @brief Load model from offline model memory data
  178. /// @param [in] ModelData &model_data: Offline model data
  179. /// @param [in] void *dev_ptr: Input/Output memory address
  180. /// @param [in] size_t mem_size: Input/Output memory length
  181. /// @param [in] void *weight_ptr: Weight memory address
  182. /// @param [in] size_t weight_size: Weight memory length
  183. /// @param [out] uint32_t &model_id: Corresponding identification after model loading
  184. /// @return SUCCESS handle successfully / others handle failed
  185. ///
  186. Status LoadModelFromData(uint32_t &model_id, const ModelData &model_data, void *const dev_ptr, const size_t mem_size,
  187. void *const weight_ptr, const size_t weight_size);
  188. ///
  189. /// @ingroup ge
  190. /// @brief Load task list from ModelData with queue.
  191. /// @param [out] model_id: model id allocate from manager.
  192. /// @param [in] model_data: Model data load from offline model.
  193. /// @param [in] input_queue_ids: input queue ids create from user.
  194. /// @param [in] output_queue_ids: input queue ids create from user.
  195. /// @return: 0 for success / others for fail
  196. ///
  197. Status LoadModelWithQ(uint32_t &model_id, const ModelData &model_data, const std::vector<uint32_t> &input_queue_ids,
  198. const std::vector<uint32_t> &output_queue_ids);
  199. ///
  200. /// @ingroup ge
  201. /// @brief Load task list from ModelData with queue.
  202. /// @param [out] model_id: model id allocate from manager.
  203. /// @param [in] root_model: Instance of GeRootModel.
  204. /// @param [in] input_queue_ids: input queue ids create from user.
  205. /// @param [in] output_queue_ids: input queue ids create from user.
  206. /// @return: 0 for success / others for fail
  207. ///
  208. Status LoadModelWithQ(uint32_t &model_id, const std::shared_ptr<GeRootModel> &root_model,
  209. const std::vector<uint32_t> &input_queue_ids, const std::vector<uint32_t> &output_queue_ids);
  210. ///
  211. /// @ingroup ge
  212. /// @brief Synchronous execution of offline model(Do not create thread)
  213. /// @param [in] uint32_t model_id: Model ID to execute
  214. /// @param [in] void* stream: stream to execute
  215. /// @param [in] bool async_mode: is asynchronize mode.
  216. /// @param [in] const domi::InputData *input_data: Model input data
  217. /// @param [out] domi::OutputData *output_data: Model output data
  218. /// @return SUCCESS handle successfully / others handle failed
  219. ///
  220. Status ExecModel(const uint32_t model_id, void *const stream, const RunModelData &input_data,
  221. RunModelData &output_data, const bool async_mode = false);
  222. ///
  223. /// @ingroup ge
  224. /// @brief Load task list from root_model without input queue or output queue.
  225. /// @param [out] model_id: model id allocate from manager.
  226. /// @param [in] root_model: Instance of GeRootModel.
  227. /// @return: 0 for success / others for fail
  228. ///
  229. Status LoadModelWithoutQ(uint32_t &model_id, const std::shared_ptr<GeRootModel> &root_model) const;
  230. ///
  231. /// @ingroup ge
  232. /// @brief Synchronous execution of offline model(Do not create thread)
  233. /// @param [in] uint32_t model_id: Model ID to execute
  234. /// @param [in] void* stream: stream to execute
  235. /// @param [in] bool async_mode: is asynchronize mode.
  236. /// @param [in] const domi::InputData *input_data: Model input data
  237. /// @param [in] const std::vector<GeTensorDesc> &input_desc: description of model input data
  238. /// @param [out] domi::OutputData *output_data: Model output data
  239. /// @param [out] std::vector<GeTensorDesc> &output_desc: description of model output data
  240. /// @return SUCCESS handle successfully / others handle failed
  241. ///
  242. Status ExecModel(const uint32_t model_id, void *const stream, const RunModelData &run_input_data,
  243. const std::vector<GeTensorDesc> &input_desc, RunModelData &run_output_data,
  244. std::vector<GeTensorDesc> &output_desc, const bool async_mode = false);
  245. ///
  246. /// @ingroup ge
  247. /// @brief Get weight memory size from model file
  248. /// @param [in] const std::string &path: Offline model file path
  249. /// @param [out] size_t &mem_size Execution memory size
  250. /// @param [out] size_t &weight_size Weight memory space size
  251. /// @return SUCCESS handle successfully / others handle failed
  252. ///
  253. Status GetMemAndWeightSize(const std::string &path, size_t &mem_size, size_t &weight_size);
  254. ///
  255. /// @ingroup ge
  256. /// @brief Get weight memory size from model file
  257. /// @param [in] const void *model_data Offline model buffer
  258. /// @param [in] size_t model_size Offline model buffer length
  259. /// @param [out] size_t &mem_size Execution memory size
  260. /// @param [out] size_t &weight_size Weight memory space size
  261. /// @return SUCCESS handle successfully / others handle failed
  262. ///
  263. Status GetMemAndWeightSize(const void *const model_data, const size_t model_size, size_t &mem_size,
  264. size_t &weight_size);
  265. static Status LoadSingleOp(const std::string &model_name, const ModelData &model_data, void *const stream,
  266. SingleOp **const single_op);
  267. static Status LoadSingleOpV2(const std::string &model_name, const ModelData &model_data, void *const stream,
  268. SingleOp **const single_op, const uint64_t model_id);
  269. static Status ExecuteAsync(SingleOp *const executor, const std::vector<DataBuffer> &inputs,
  270. std::vector<DataBuffer> &outputs);
  271. static Status LoadDynamicSingleOp(const std::string &model_name, const ModelData &model_data, void *const stream,
  272. DynamicSingleOp **const single_op);
  273. static Status LoadDynamicSingleOpV2(const std::string &model_name, const ModelData &model_data, void *const stream,
  274. DynamicSingleOp **const single_op, const uint64_t model_id);
  275. static Status UnloadSingleOp(const uint64_t op_id);
  276. static Status UnloadDynamicSingleOp(const uint64_t op_id);
  277. static Status ExecuteAsync(DynamicSingleOp *const executor, const std::vector<GeTensorDesc> &input_desc,
  278. const std::vector<DataBuffer> &inputs, std::vector<GeTensorDesc> &output_desc,
  279. std::vector<DataBuffer> &outputs);
  280. static Status ReleaseSingleOpResource(void *const stream);
  281. static Status GetDeviceIdByModelId(const uint32_t model_id, uint32_t &device_id);
  282. Status GetBatchInfoSize(const uint32_t model_id, size_t &shape_count);
  283. Status GetOrigInputInfo(const uint32_t model_id, const uint32_t index, OriginInputInfo &orig_input_info);
  284. Status GetAllAippInputOutputDims(const uint32_t model_id, const uint32_t index,
  285. std::vector<InputOutputDims> &input_dims, std::vector<InputOutputDims> &output_dims);
  286. Status GetOpDescInfo(const uint32_t device_id, const uint32_t stream_id, const uint32_t task_id,
  287. OpDescInfo &op_desc_info);
  288. private:
  289. static std::atomic_bool is_inited_;
  290. };
  291. } // namespace ge
  292. #endif // INC_FRAMEWORK_EXECUTOR_GE_EXECUTOR_H_

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示