You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

single_op_model.cc 17 kB

5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "single_op/single_op_model.h"
  17. #include <atomic>
  18. #include <memory>
  19. #include <string>
  20. #include <vector>
  21. #include "framework/common/debug/ge_log.h"
  22. #include "graph/debug/ge_attr_define.h"
  23. #include "graph/load/new_model_manager/model_utils.h"
  24. #include "graph/utils/attr_utils.h"
  25. #include "graph/utils/graph_utils.h"
  26. #include "graph/utils/tensor_utils.h"
  27. #include "runtime/rt.h"
  28. #include "task/aicpu_task_builder.h"
  29. #include "task/aicpu_kernel_task_builder.h"
  30. #include "task/tbe_task_builder.h"
  31. static std::atomic<std::uint64_t> aicpu_sessionid(0);
  32. using domi::TaskDef;
  33. using std::unique_ptr;
  34. using std::vector;
  35. namespace ge {
  36. namespace {
  37. const size_t kDataOutputNum = 1;
  38. } // namespace
  39. SingleOpModel::SingleOpModel(const std::string &model_name, const void *model_data, uint32_t model_size)
  40. : model_name_(model_name), ori_model_data_(model_data), ori_model_size_(model_size) {}
  41. Status SingleOpModel::Init() {
  42. GE_CHK_STATUS_RET_NOLOG(InitModel());
  43. return LoadAllNodes();
  44. }
  45. Status SingleOpModel::InitModel() {
  46. ge::ModelData model;
  47. model.model_len = ori_model_size_;
  48. model.model_data = const_cast<void *>(ori_model_data_);
  49. auto ret = model_helper_.LoadModel(model);
  50. if (ret != SUCCESS) {
  51. GELOGE(ret, "LoadModel failed");
  52. return ret;
  53. }
  54. return SUCCESS;
  55. }
  56. void SingleOpModel::ParseOpModelParams(ModelHelper &model_helper, SingleOpModelParam &param) {
  57. int64_t value = 0;
  58. bool ret = false;
  59. std::shared_ptr<ge::GeModel> model = model_helper.GetGeModel();
  60. GE_CHECK_NOTNULL_JUST_RETURN(model);
  61. ret = ge::AttrUtils::GetInt(model, ATTR_MODEL_MEMORY_SIZE, value);
  62. param.memory_size = ret ? static_cast<uint64_t>(value) : 0;
  63. ret = ge::AttrUtils::GetInt(model, ATTR_MODEL_ZERO_COPY_MEMORY_SIZE, value);
  64. param.zero_copy_mem_size = ret ? static_cast<uint64_t>(value) : 0;
  65. ret = ge::AttrUtils::GetInt(model, ATTR_MODEL_WEIGHT_SIZE, value);
  66. param.weight_size = ret ? static_cast<uint64_t>(value) : 0;
  67. ret = ge::AttrUtils::GetInt(model, MODEL_ATTR_TASK_GEN_BASE_ADDR, value);
  68. param.base_addr = ret ? static_cast<uint64_t>(value) : 0;
  69. ret = ge::AttrUtils::GetInt(model, MODEL_ATTR_TASK_GEN_WEIGHT_ADDR, value);
  70. param.weight_addr = ret ? static_cast<uint64_t>(value) : 0;
  71. ret = ge::AttrUtils::GetInt(model, ATTR_MODEL_CORE_TYPE, value);
  72. param.core_type = ret ? value : 0;
  73. GELOGI("ParseOpModelParams(), total_memory_size:%lu, zero_copy_size:%lu, weight_size:%lu. core_type = %lu",
  74. param.memory_size, param.zero_copy_mem_size, param.weight_size, param.core_type);
  75. }
  76. Status SingleOpModel::InitModelMem(StreamResource &res) {
  77. ParseOpModelParams(model_helper_, model_params_);
  78. if (model_params_.memory_size > model_params_.zero_copy_mem_size) {
  79. const string purpose("malloc feature map memory on model execute.");
  80. GELOGI("total memory: %lu, zero_copy_mem: %lu", model_params_.memory_size, model_params_.zero_copy_mem_size);
  81. model_params_.mem_base = res.MallocMemory(purpose, model_params_.memory_size - model_params_.zero_copy_mem_size);
  82. if (model_params_.mem_base == nullptr) {
  83. return RT_FAILED;
  84. }
  85. }
  86. if (model_params_.weight_size > 0 && has_weight_) {
  87. const string purpose("malloc weights memory on model execute.");
  88. model_params_.weight_base = res.MallocWeight(purpose, model_params_.weight_size);
  89. if (model_params_.weight_base == nullptr) {
  90. // no need to free memory, for that was handled by StreamResources
  91. return RT_FAILED;
  92. }
  93. auto weight_buffer = model_helper_.GetGeModel()->GetWeight();
  94. GELOGI("To copy weight to device. weight size = %zu", weight_buffer.GetSize());
  95. GE_CHK_RT_RET(rtMemcpy(model_params_.weight_base, model_params_.weight_size, weight_buffer.GetData(),
  96. weight_buffer.GetSize(), RT_MEMCPY_HOST_TO_DEVICE));
  97. }
  98. return SUCCESS;
  99. }
  100. Status SingleOpModel::ParseInputNode(const OpDescPtr &op_desc) {
  101. vector<int64_t> offsets = op_desc->GetOutputOffset();
  102. if (offsets.size() != kDataOutputNum) {
  103. GELOGE(PARAM_INVALID, "Data op should have only one output, but got %zu", op_desc->GetOutputOffset().size());
  104. return PARAM_INVALID;
  105. }
  106. auto output_desc = op_desc->GetOutputDescPtr(0);
  107. GE_CHECK_NOTNULL(output_desc);
  108. int64_t tensor_size = 0;
  109. (void)TensorUtils::GetSize(*output_desc, tensor_size);
  110. input_offset_list_.emplace_back(offsets[0]);
  111. input_sizes_.emplace_back(tensor_size);
  112. GELOGI("[%s] parse input node: %s, size = %ld, offset = %u", model_name_.c_str(), op_desc->GetName().c_str(),
  113. tensor_size, static_cast<uint32_t>(offsets[0]));
  114. return SUCCESS;
  115. }
  116. void SingleOpModel::ParseOutputNode(const OpDescPtr &op_desc) {
  117. vector<int64_t> offsets = op_desc->GetInputOffset();
  118. for (uint32_t k = 0; k < static_cast<uint32_t>(offsets.size()); ++k) {
  119. auto input_desc = op_desc->GetInputDescPtr(k);
  120. if (input_desc == nullptr) {
  121. continue;
  122. }
  123. int64_t tensor_size = 0;
  124. (void)TensorUtils::GetSize(*input_desc, tensor_size);
  125. output_offset_list_.emplace_back(offsets[k]);
  126. output_sizes_.emplace_back(tensor_size);
  127. GELOGI("[%s] parse output node: %s, size = %ld, offset = %u", model_name_.c_str(), op_desc->GetName().c_str(),
  128. tensor_size, static_cast<uint32_t>(offsets[k]));
  129. }
  130. }
  131. Status SingleOpModel::LoadAllNodes() {
  132. auto ge_model = model_helper_.GetGeModel();
  133. GE_CHECK_NOTNULL(ge_model);
  134. Graph graph = ge_model->GetGraph();
  135. auto compute_graph = GraphUtils::GetComputeGraph(graph);
  136. if (compute_graph == nullptr) {
  137. GELOGE(PARAM_INVALID, "[%s] compute_graph is null", model_name_.c_str());
  138. return PARAM_INVALID;
  139. }
  140. auto nodes = compute_graph->GetDirectNode();
  141. size_t model_op_size = nodes.size();
  142. GELOGI("[%s] node size = %zu", model_name_.c_str(), model_op_size);
  143. for (size_t i = 0; i < model_op_size; ++i) {
  144. auto node = nodes.at(i);
  145. auto op_desc = node->GetOpDesc();
  146. GE_CHECK_NOTNULL(op_desc);
  147. op_list_[i] = node;
  148. auto op_type = op_desc->GetType();
  149. GELOGI("[%s] node[%zu] = %s, type = %s", model_name_.c_str(), i, node->GetName().c_str(), op_type.c_str());
  150. if (op_type == DATA_TYPE || op_type == AIPP_DATA_TYPE) {
  151. data_ops_.emplace_back(op_desc);
  152. continue;
  153. }
  154. if (op_type == CONSTANT || op_type == CONSTANTOP) {
  155. has_weight_ = true;
  156. continue;
  157. }
  158. if (op_type == NETOUTPUT) {
  159. netoutput_op_ = op_desc;
  160. continue;
  161. }
  162. ge_model->GetTBEKernelStore().LoadTBEKernelBinToOpDesc(op_desc);
  163. }
  164. return SUCCESS;
  165. }
  166. Status SingleOpModel::ParseInputsAndOutputs() {
  167. for (auto &op_desc : data_ops_) {
  168. GE_CHK_STATUS_RET_NOLOG(ParseInputNode(op_desc));
  169. }
  170. ParseOutputNode(netoutput_op_);
  171. return SUCCESS;
  172. }
  173. Status SingleOpModel::SetInputsAndOutputs(SingleOp &single_op) {
  174. int arg_index = 0;
  175. for (size_t i = 0; i < input_offset_list_.size(); ++i) {
  176. auto *addr = model_params_.mem_base + input_offset_list_[i];
  177. model_params_.addr_mapping_.emplace(reinterpret_cast<uintptr_t>(addr), arg_index++);
  178. single_op.input_sizes_.emplace_back(input_sizes_[i]);
  179. single_op.input_addr_list_.emplace_back(addr);
  180. }
  181. for (size_t i = 0; i < output_offset_list_.size(); ++i) {
  182. auto *addr = model_params_.mem_base + output_offset_list_[i];
  183. model_params_.addr_mapping_.emplace(reinterpret_cast<uintptr_t>(addr), arg_index++);
  184. single_op.output_sizes_.emplace_back(output_sizes_[i]);
  185. single_op.output_addr_list_.emplace_back(addr);
  186. }
  187. single_op.args_.resize(arg_index);
  188. return SUCCESS;
  189. }
  190. Status SingleOpModel::BuildTaskList(SingleOp &single_op) {
  191. auto ge_model = model_helper_.GetGeModel();
  192. GE_CHECK_NOTNULL(ge_model);
  193. auto tasks = ge_model->GetModelTaskDefPtr()->task();
  194. for (int i = 0; i < tasks.size(); ++i) {
  195. const TaskDef &task_def = tasks[i];
  196. GELOGI("[%s] Task[%d], type = %u, DebugString = %s", model_name_.c_str(), i, task_def.type(),
  197. task_def.DebugString().c_str());
  198. auto task_type = static_cast<rtModelTaskType_t>(task_def.type());
  199. if (task_type == RT_MODEL_TASK_KERNEL) {
  200. const domi::KernelDef &kernel_def = task_def.kernel();
  201. const auto &context = kernel_def.context();
  202. auto kernel_type = static_cast<cce::ccKernelType>(context.kernel_type());
  203. if (kernel_type == cce::ccKernelType::TE) {
  204. GELOGD("Building TBE task");
  205. TbeOpTask *tbe_task = nullptr;
  206. auto ret = BuildKernelTask(task_def.kernel(), &tbe_task);
  207. if (ret != SUCCESS) {
  208. return ret;
  209. }
  210. single_op.arg_table_.resize(single_op.input_sizes_.size() + single_op.output_sizes_.size());
  211. ParseArgTable(tbe_task, single_op);
  212. single_op.tasks_.emplace_back(tbe_task);
  213. } else if (kernel_type == cce::ccKernelType::AI_CPU) {
  214. GELOGD("Building AICPU_CC task");
  215. OpTask *task = nullptr;
  216. auto ret = BuildCpuKernelTask(task_def.kernel(), &task);
  217. if (ret != SUCCESS) {
  218. return ret;
  219. }
  220. single_op.tasks_.emplace_back(task);
  221. } else {
  222. GELOGE(UNSUPPORTED, "Only TBE kernel and AI_CPU kernel are supported, but got %u", context.kernel_type());
  223. return UNSUPPORTED;
  224. }
  225. } else if (task_type == RT_MODEL_TASK_KERNEL_EX) {
  226. GELOGD("Building AICPU_TF task");
  227. AiCpuTask *aicpu_task = nullptr;
  228. bool depend_compute_flag = false;
  229. uint64_t singleop_sessionid = aicpu_sessionid++;
  230. GELOGI("Build singleOp, sessionId = %lu", singleop_sessionid);
  231. auto ret = BuildKernelExTask(task_def.kernel_ex(), &aicpu_task, false, depend_compute_flag, singleop_sessionid);
  232. if (ret != SUCCESS) {
  233. return ret;
  234. }
  235. single_op.tasks_.emplace_back(aicpu_task);
  236. single_op.SetSessionID(singleop_sessionid);
  237. } else {
  238. // skip
  239. GELOGD("Skip task type: %d", static_cast<int>(task_type));
  240. }
  241. }
  242. return SUCCESS;
  243. }
  244. void SingleOpModel::ParseArgTable(TbeOpTask *task, SingleOp &op) {
  245. if (task == nullptr) {
  246. GELOGE(PARAM_INVALID, "tbe op task is nullptr");
  247. return;
  248. }
  249. // args: addr1, addr2, addr3 ...
  250. auto *args = const_cast<uintptr_t *>(reinterpret_cast<const uintptr_t *>(task->GetArgs()));
  251. size_t arg_size = task->GetArgSize();
  252. for (size_t i = 0; i < arg_size / sizeof(void *); ++i) {
  253. uintptr_t *ptr_to_addr = args + i;
  254. uintptr_t addr = *ptr_to_addr;
  255. auto iter = model_params_.addr_mapping_.find(addr);
  256. if (iter != model_params_.addr_mapping_.end()) {
  257. int arg_index = iter->second;
  258. GELOGI("%s args[%zu] mapped to user designated args[%d]", task->GetStubName().c_str(), i, arg_index);
  259. op.arg_table_[iter->second].emplace_back(ptr_to_addr);
  260. }
  261. }
  262. }
  263. Status SingleOpModel::BuildKernelTask(const domi::KernelDef &kernel_def, TbeOpTask **task) {
  264. GE_CHECK_NOTNULL(task);
  265. const auto &context = kernel_def.context();
  266. auto iter = op_list_.find(context.op_index());
  267. if (iter == op_list_.end()) {
  268. GELOGE(INTERNAL_ERROR, "op desc not found. op index = %u", context.op_index());
  269. return INTERNAL_ERROR;
  270. }
  271. auto *tbe_task = new (std::nothrow) TbeOpTask();
  272. if (tbe_task == nullptr) {
  273. GELOGE(MEMALLOC_FAILED, "create tbe op task failed");
  274. return MEMALLOC_FAILED;
  275. }
  276. auto builder = TbeTaskBuilder(model_name_, iter->second, kernel_def);
  277. auto ret = builder.BuildTask(*tbe_task, model_params_);
  278. if (ret != SUCCESS) {
  279. delete tbe_task;
  280. tbe_task = nullptr;
  281. return ret;
  282. }
  283. *task = tbe_task;
  284. return SUCCESS;
  285. }
  286. Status SingleOpModel::BuildKernelExTask(const domi::KernelExDef &kernel_def, AiCpuTask **task, bool dynamic_flag,
  287. bool &depend_compute_flag, uint64_t session_id) {
  288. auto iter = op_list_.find(kernel_def.op_index());
  289. if (iter == op_list_.end()) {
  290. GELOGE(INTERNAL_ERROR, "op desc not found. op index = %u", kernel_def.op_index());
  291. return INTERNAL_ERROR;
  292. }
  293. std::unique_ptr<AiCpuTask> aicpu_task(new (std::nothrow) AiCpuTask());
  294. if (aicpu_task == nullptr) {
  295. GELOGE(MEMALLOC_FAILED, "create aicpu_TF op task failed");
  296. return MEMALLOC_FAILED;
  297. }
  298. auto builder = AiCpuTaskBuilder(iter->second->GetOpDesc(), kernel_def);
  299. auto ret = builder.BuildTask(*aicpu_task, model_params_, dynamic_flag, session_id);
  300. if (ret != SUCCESS) {
  301. GELOGE(ret, "build aicpu_TF op task failed");
  302. return ret;
  303. }
  304. depend_compute_flag = (aicpu_task->GetUnknownType() == DEPEND_COMPUTE);
  305. *task = aicpu_task.release();
  306. return SUCCESS;
  307. }
  308. Status SingleOpModel::BuildCpuKernelTask(const domi::KernelDef &kernel_def, OpTask **task) {
  309. const auto &context = kernel_def.context();
  310. auto iter = op_list_.find(context.op_index());
  311. if (iter == op_list_.end()) {
  312. GELOGE(INTERNAL_ERROR, "op desc not found. op index = %u", context.op_index());
  313. return INTERNAL_ERROR;
  314. }
  315. std::unique_ptr<AiCpuCCTask> aicpucc_task(new (std::nothrow) AiCpuCCTask());
  316. if (aicpucc_task == nullptr) {
  317. GELOGE(MEMALLOC_FAILED, "create aicpu_CC op task failed");
  318. return MEMALLOC_FAILED;
  319. }
  320. auto builder = AiCpuCCTaskBuilder(iter->second->GetOpDesc(), kernel_def);
  321. auto ret = builder.BuildTask(*aicpucc_task);
  322. if (ret != SUCCESS) {
  323. GELOGE(ret, "build aicpu_CC op task failed");
  324. return ret;
  325. }
  326. *task = aicpucc_task.release();
  327. return SUCCESS;
  328. }
  329. Status SingleOpModel::BuildOp(StreamResource &resource, SingleOp &single_op) {
  330. GE_CHK_STATUS_RET_NOLOG(ParseInputsAndOutputs());
  331. GE_CHK_STATUS_RET_NOLOG(InitModelMem(resource));
  332. GE_CHK_STATUS_RET_NOLOG(SetInputsAndOutputs(single_op));
  333. return BuildTaskList(single_op);
  334. }
  335. Status SingleOpModel::BuildModelTaskKernel(const TaskDef &task_def, DynamicSingleOp &single_op) {
  336. const domi::KernelDef &kernel_def = task_def.kernel();
  337. const auto &context = kernel_def.context();
  338. auto kernel_type = static_cast<cce::ccKernelType>(context.kernel_type());
  339. if (kernel_type == cce::ccKernelType::TE) {
  340. GELOGD("Building TBE task");
  341. TbeOpTask *tbe_task = nullptr;
  342. GE_CHK_STATUS_RET_NOLOG(BuildKernelTask(task_def.kernel(), &tbe_task));
  343. single_op.op_task_.reset(tbe_task);
  344. } else if (kernel_type == cce::ccKernelType::AI_CPU) {
  345. GELOGD("Building AICPU_CC task");
  346. OpTask *task = nullptr;
  347. GE_CHK_STATUS_RET_NOLOG(BuildCpuKernelTask(task_def.kernel(), &task));
  348. single_op.op_task_.reset(task);
  349. } else {
  350. GELOGE(UNSUPPORTED, "Only TBE kernel and AI_CPU kernel are supported, but got %u", context.kernel_type());
  351. return UNSUPPORTED;
  352. }
  353. return SUCCESS;
  354. }
  355. Status SingleOpModel::BuildTaskListForDynamicOp(DynamicSingleOp &single_op) {
  356. auto ge_model = model_helper_.GetGeModel();
  357. GE_CHECK_NOTNULL(ge_model);
  358. auto tasks = ge_model->GetModelTaskDefPtr()->task();
  359. for (int i = 0; i < tasks.size(); ++i) {
  360. const TaskDef &task_def = tasks[i];
  361. GELOGI("[%s] Task[%d], type = %u, DebugString = %s", model_name_.c_str(), i, task_def.type(),
  362. task_def.DebugString().c_str());
  363. auto task_type = static_cast<rtModelTaskType_t>(task_def.type());
  364. if (task_type == RT_MODEL_TASK_KERNEL) {
  365. if (single_op.op_task_ != nullptr) {
  366. GELOGE(UNSUPPORTED, "Do not support dynamic op with multiple tasks.");
  367. return UNSUPPORTED;
  368. }
  369. GE_CHK_STATUS_RET_NOLOG(BuildModelTaskKernel(task_def, single_op));
  370. } else if (task_type == RT_MODEL_TASK_KERNEL_EX) {
  371. if (single_op.op_task_ != nullptr) {
  372. GELOGE(UNSUPPORTED, "Do not support dynamic op with multiple tasks.");
  373. return UNSUPPORTED;
  374. }
  375. GELOGD("Building AICPU_TF task");
  376. AiCpuTask *aicpu_task = nullptr;
  377. bool depend_compute_flag = false;
  378. uint64_t dynamic_singleop_sessionid = aicpu_sessionid++;
  379. GELOGI("Build dynamic singleOp, sessionId = %lu", dynamic_singleop_sessionid);
  380. GE_CHK_STATUS_RET_NOLOG(
  381. BuildKernelExTask(task_def.kernel_ex(), &aicpu_task, true, depend_compute_flag, dynamic_singleop_sessionid));
  382. if (depend_compute_flag) {
  383. if (i >= tasks.size() - 1) {
  384. GELOGE(FAILED, "The copy task of the fourth operator was not found.");
  385. return FAILED;
  386. }
  387. ++i;
  388. const TaskDef &copy_task_def = tasks[i];
  389. GE_CHK_STATUS_RET_NOLOG(aicpu_task->SetMemCopyTask(copy_task_def.kernel_ex()));
  390. }
  391. single_op.op_task_.reset(aicpu_task);
  392. single_op.SetSessionID(dynamic_singleop_sessionid);
  393. } else {
  394. // skip
  395. GELOGD("Skip task type: %d", static_cast<int>(task_type));
  396. }
  397. }
  398. return SUCCESS;
  399. }
  400. Status SingleOpModel::BuildDynamicOp(DynamicSingleOp &single_op) {
  401. single_op.num_inputs_ = data_ops_.size();
  402. single_op.num_outputs_ = netoutput_op_->GetAllInputsSize();
  403. ParseOpModelParams(model_helper_, model_params_);
  404. return BuildTaskListForDynamicOp(single_op);
  405. }
  406. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示