You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

execution_engine.cc 20 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "hybrid/executor/worker/execution_engine.h"
  17. #include "graph/runtime_inference_context.h"
  18. #include "graph/load/model_manager/model_manager.h"
  19. #include "hybrid/node_executor/node_executor.h"
  20. #include "hybrid/executor//worker//shape_inference_engine.h"
  21. #include "common/profiling/profiling_manager.h"
  22. namespace ge {
  23. namespace hybrid {
  24. namespace {
  25. constexpr int64_t kMaxPadding = 63;
  26. Status LogInputs(const NodeItem &node_item, const TaskContext &task_context) {
  27. for (auto i = 0; i < task_context.NumInputs(); ++i) {
  28. const auto &input_tensor = task_context.GetInput(i);
  29. GE_CHECK_NOTNULL(input_tensor);
  30. const auto &tensor_desc = task_context.GetInputDesc(i);
  31. GE_CHECK_NOTNULL(tensor_desc);
  32. GELOGD("[%s] Print task args. input[%d] = %s, shape = [%s]",
  33. node_item.NodeName().c_str(),
  34. i,
  35. input_tensor->DebugString().c_str(),
  36. tensor_desc->GetShape().ToString().c_str());
  37. }
  38. return SUCCESS;
  39. }
  40. Status LogOutputs(const NodeItem &node_item, const TaskContext &task_context) {
  41. for (auto i = 0; i < task_context.NumOutputs(); ++i) {
  42. const auto &output_tensor = task_context.GetOutput(i);
  43. GE_CHECK_NOTNULL(output_tensor);
  44. const auto &tensor_desc = node_item.MutableOutputDesc(i);
  45. GE_CHECK_NOTNULL(tensor_desc);
  46. GELOGD("[%s] Print task args. output[%d] = %s, shape = [%s]",
  47. node_item.NodeName().c_str(),
  48. i,
  49. output_tensor->DebugString().c_str(),
  50. tensor_desc->MutableShape().ToString().c_str());
  51. }
  52. return SUCCESS;
  53. }
  54. } // namespace
  55. NodeDoneCallback::NodeDoneCallback(GraphExecutionContext *graph_context,
  56. std::shared_ptr<TaskContext> task_context)
  57. : graph_context_(graph_context), context_(std::move(task_context)) {
  58. }
  59. Status NodeDoneCallback::PrepareConstInputs(const NodeItem &node_item) {
  60. for (auto output_idx : node_item.to_const_output_id_list) {
  61. RECORD_CALLBACK_EVENT(graph_context_, node_item.NodeName().c_str(),
  62. "[PrepareConstInputs] [index = %d] Start",
  63. output_idx);
  64. auto output_tensor = context_->GetOutput(output_idx);
  65. GE_CHECK_NOTNULL(output_tensor);
  66. Tensor tensor;
  67. auto ge_tensor_desc = node_item.MutableOutputDesc(output_idx);
  68. GE_CHECK_NOTNULL(ge_tensor_desc);
  69. tensor.SetTensorDesc(TensorAdapter::GeTensorDesc2TensorDesc(*ge_tensor_desc));
  70. int64_t tensor_size;
  71. GE_CHK_GRAPH_STATUS_RET(TensorUtils::GetTensorSizeInBytes(*ge_tensor_desc, tensor_size),
  72. "Failed to invoke GetTensorSizeInBytes");
  73. if (output_tensor->GetSize() < static_cast<size_t>(tensor_size)) {
  74. GELOGE(INTERNAL_ERROR,
  75. "[Check][Size][%s] Tensor size is not enough. output index = %d, required size = %ld, tensor = %s.",
  76. node_item.NodeName().c_str(), output_idx, tensor_size,
  77. output_tensor->DebugString().c_str());
  78. REPORT_INNER_ERROR("E19999",
  79. "[%s] Tensor size is not enough. output index = %d, required size = %ld, tensor = %s.",
  80. node_item.NodeName().c_str(), output_idx, tensor_size,
  81. output_tensor->DebugString().c_str());
  82. return INTERNAL_ERROR;
  83. }
  84. vector<uint8_t> host_buffer(static_cast<unsigned long>(tensor_size));
  85. GELOGD("[%s] To cache output[%d] to host, size = %zu",
  86. node_item.NodeName().c_str(),
  87. output_idx,
  88. output_tensor->GetSize());
  89. if (tensor_size > 0) {
  90. GE_CHK_RT_RET(rtMemcpy(host_buffer.data(),
  91. tensor_size,
  92. output_tensor->GetData(),
  93. tensor_size,
  94. RT_MEMCPY_DEVICE_TO_HOST));
  95. }
  96. tensor.SetData(std::move(host_buffer));
  97. string context_id = std::to_string(graph_context_->context_id);
  98. RuntimeInferenceContext *runtime_infer_ctx = nullptr;
  99. GE_CHK_GRAPH_STATUS_RET(RuntimeInferenceContext::GetContext(context_id, &runtime_infer_ctx),
  100. "Failed to get RuntimeInferenceContext, context_id = %s", context_id.c_str());
  101. GE_CHK_STATUS_RET(runtime_infer_ctx->SetTensor(node_item.node_id, output_idx, std::move(tensor)),
  102. "[Set][Tensor] Failed, node = %s, output_index = %d", node_item.NodeName().c_str(), output_idx);
  103. GELOGD("[%s] Output[%d] cached successfully in context: %s. node_id = %d, shape = [%s]",
  104. node_item.NodeName().c_str(),
  105. output_idx,
  106. context_id.c_str(),
  107. node_item.node_id,
  108. ge_tensor_desc->GetShape().ToString().c_str());
  109. RECORD_CALLBACK_EVENT(graph_context_, node_item.NodeName().c_str(),
  110. "[PrepareConstInputs] [index = %d] End",
  111. output_idx);
  112. }
  113. return SUCCESS;
  114. }
  115. Status NodeDoneCallback::GetTaskDescInfo(const NodePtr node, const HybridModel *model,
  116. std::vector<TaskDescInfo> &task_desc_info) {
  117. GE_CHECK_NOTNULL(node);
  118. GE_CHECK_NOTNULL(model);
  119. // only report aicpu and aicore node
  120. bool is_profiling_report = context_->GetNodeItem().is_profiling_report;
  121. if (!is_profiling_report) {
  122. GELOGD("Node[%s] is not aicore or aicpu, and no need to report data.", node->GetName().c_str());
  123. return SUCCESS;
  124. }
  125. GELOGD("GetTaskDescInfo of node [%s] start.", node->GetName().c_str());
  126. auto &prof_mgr = ProfilingManager::Instance();
  127. task_desc_info = context_->GetProfilingTaskDescInfo();
  128. context_->ClearProfilingTaskDescInfo();
  129. for (auto &tmp_task_desc : task_desc_info) {
  130. // save op input and output info
  131. auto op_desc = node->GetOpDesc();
  132. GE_CHECK_NOTNULL(op_desc);
  133. prof_mgr.GetOpInputOutputInfo(op_desc, tmp_task_desc);
  134. }
  135. return SUCCESS;
  136. }
  137. Status NodeDoneCallback::ProfilingReport() {
  138. auto node = context_->GetNodeItem().node;
  139. if (node == nullptr) {
  140. GELOGE(PARAM_INVALID, "[Get][Node] value is nullptr.");
  141. REPORT_INNER_ERROR("E19999", "TaskContext GetNodeItem value is nullptr.");
  142. return PARAM_INVALID;
  143. }
  144. const auto &op_type = node->GetType();
  145. if (op_type == PARTITIONEDCALL) {
  146. return SUCCESS;
  147. }
  148. GE_CHECK_NOTNULL(graph_context_);
  149. const HybridModel *model = graph_context_->model;
  150. GE_CHECK_NOTNULL(model);
  151. GELOGD("ProfilingReport of node [%s] model [%s] start.", node->GetName().c_str(), model->GetModelName().c_str());
  152. std::vector<TaskDescInfo> task_desc_info;
  153. auto profiling_ret = GetTaskDescInfo(node, model, task_desc_info);
  154. if (profiling_ret != RT_ERROR_NONE) {
  155. GELOGE(profiling_ret, "[Get][TaskDescInfo] of node:%s failed.", node->GetName().c_str());
  156. REPORT_CALL_ERROR("E19999", "GetTaskDescInfo of node:%s failed.", node->GetName().c_str());
  157. return profiling_ret;
  158. }
  159. auto &profiling_manager = ProfilingManager::Instance();
  160. profiling_manager.ReportProfilingData(model->GetModelId(), task_desc_info);
  161. return SUCCESS;
  162. }
  163. Status NodeDoneCallback::DumpDynamicNode() {
  164. auto node = context_->GetNodeItem().node;
  165. if (node == nullptr) {
  166. GELOGE(PARAM_INVALID, "[Get][Node] value is nullptr.");
  167. REPORT_INNER_ERROR("E19999", "get node value is nullptr.");
  168. return PARAM_INVALID;
  169. }
  170. auto op_desc = node->GetOpDesc();
  171. GE_CHECK_NOTNULL(graph_context_);
  172. const HybridModel *model = graph_context_->model;
  173. GE_CHECK_NOTNULL(model);
  174. std::string dynamic_model_name = model->GetModelName();
  175. std::string dynamic_om_name = model->GetOmName();
  176. uint32_t model_id = model->GetModelId();
  177. if (!context_->GetDumpProperties().IsLayerNeedDump(dynamic_model_name, dynamic_om_name, op_desc->GetName())) {
  178. GELOGI("[%s] is not in dump list, no need dump", op_desc->GetName().c_str());
  179. return SUCCESS;
  180. }
  181. dump_op_.SetDynamicModelInfo(dynamic_model_name, dynamic_om_name, model_id);
  182. auto stream = context_->GetStream();
  183. vector<uintptr_t> input_addrs;
  184. vector<uintptr_t> output_addrs;
  185. for (int i = 0; i < context_->NumInputs(); i++) {
  186. auto tensor_value = context_->GetInput(i);
  187. GE_CHK_BOOL_RET_STATUS(tensor_value != nullptr, PARAM_INVALID, "[Get][Tensor] value is nullptr.");
  188. uintptr_t input_addr = reinterpret_cast<uintptr_t>(tensor_value->GetData());
  189. input_addrs.emplace_back(input_addr);
  190. }
  191. for (int j = 0; j < context_->NumOutputs(); j++) {
  192. auto tensor_value = context_->GetOutput(j);
  193. GE_CHK_BOOL_RET_STATUS(tensor_value != nullptr, PARAM_INVALID, "[Get][Tensor] value is nullptr.");
  194. uintptr_t output_addr = reinterpret_cast<uintptr_t>(tensor_value->GetData());
  195. output_addrs.emplace_back(output_addr);
  196. }
  197. dump_op_.SetDumpInfo(context_->GetDumpProperties(), op_desc, input_addrs, output_addrs, stream);
  198. void *loop_per_iter = nullptr;
  199. TensorValue *varible_loop_per_iter = context_->GetVariable(NODE_NAME_FLOWCTRL_LOOP_PER_ITER);
  200. if (varible_loop_per_iter != nullptr) {
  201. loop_per_iter = const_cast<void *>(varible_loop_per_iter->GetData());
  202. }
  203. void *loop_cond = nullptr;
  204. TensorValue *varible_loop_cond = context_->GetVariable(NODE_NAME_FLOWCTRL_LOOP_COND);
  205. if (varible_loop_cond != nullptr) {
  206. loop_cond = const_cast<void *>(varible_loop_cond->GetData());
  207. }
  208. void *global_step = context_->GetExecutionContext()->global_step;
  209. dump_op_.SetLoopAddr(global_step, loop_per_iter, loop_cond);
  210. GE_CHK_STATUS_RET(dump_op_.LaunchDumpOp(), "[Launch][DumpOp] failed in hybird model.");
  211. auto rt_ret = rtStreamSynchronize(stream);
  212. if (rt_ret != RT_ERROR_NONE) {
  213. GELOGE(rt_ret, "[Call][rtStreamSynchronize] failed, ret = %d.", rt_ret);
  214. REPORT_CALL_ERROR("E19999", "call rtStreamSynchronize failed, ret = %d.", rt_ret);
  215. return rt_ret;
  216. }
  217. return SUCCESS;
  218. }
  219. Status NodeDoneCallback::SaveDumpOpInfo() {
  220. GE_CHECK_NOTNULL(graph_context_);
  221. GE_CHECK_NOTNULL(graph_context_->model);
  222. auto node = context_->GetNodeItem().node;
  223. if (node == nullptr) {
  224. GELOGE(PARAM_INVALID, "[Save][DumpOpInfo] Get node is nullptr.");
  225. return PARAM_INVALID;
  226. }
  227. auto op_desc = node->GetOpDesc();
  228. GE_CHECK_NOTNULL(op_desc);
  229. vector<void *> input_addrs;
  230. vector<void *> output_addrs;
  231. for (int i = 0; i < context_->NumInputs(); i++) {
  232. auto tensor_value = context_->GetInput(i);
  233. GE_CHK_BOOL_RET_STATUS(tensor_value != nullptr, PARAM_INVALID, "[Save][DumpOpInfo] Tensor value is nullptr.");
  234. void *input_addr = const_cast<void *>(tensor_value->GetData());
  235. input_addrs.emplace_back(input_addr);
  236. }
  237. for (int j = 0; j < context_->NumOutputs(); j++) {
  238. auto tensor_value = context_->GetOutput(j);
  239. GE_CHK_BOOL_RET_STATUS(tensor_value != nullptr, PARAM_INVALID, "[Save][DumpOpInfo] Tensor value is nullptr.");
  240. void *output_addr = const_cast<void *>(tensor_value->GetData());
  241. output_addrs.emplace_back(output_addr);
  242. }
  243. uint32_t stream_id = context_->GetStreamId();
  244. uint32_t task_id = context_->GetTaskId();
  245. graph_context_->exception_dumper.SaveDumpOpInfo(op_desc, task_id, stream_id, input_addrs, output_addrs);
  246. return SUCCESS;
  247. }
  248. Status NodeDoneCallback::OnNodeDone() {
  249. auto &node_item = context_->GetNodeItem();
  250. GELOGI("[%s] Start callback process.", node_item.NodeName().c_str());
  251. RECORD_CALLBACK_EVENT(graph_context_, context_->GetNodeName(), "[Compute] End");
  252. RECORD_CALLBACK_EVENT(graph_context_, context_->GetNodeName(), "[Callback] Start");
  253. const DumpProperties &dump_properties = context_->GetDumpProperties();
  254. if (dump_properties.IsDumpOpen() || context_->IsOverFlow()) {
  255. GELOGI("Start to dump dynamic shape op");
  256. GE_CHK_STATUS_RET(DumpDynamicNode(), "[Call][DumpDynamicNode] Failed.");
  257. }
  258. auto model_manager = ModelManager::GetInstance();
  259. GE_CHECK_NOTNULL(model_manager);
  260. if (model_manager->IsDumpExceptionOpen()) {
  261. GE_CHK_STATUS_RET(SaveDumpOpInfo(), "[Save][DumpOpInfo] Failed to dump op info.");
  262. }
  263. if (ProfilingManager::Instance().ProfilingModelLoadOn()) {
  264. GE_CHK_STATUS_RET(ProfilingReport(), "[Report][Profiling] of node[%s] failed.", node_item.NodeName().c_str());
  265. }
  266. // release workspace
  267. context_->ReleaseWorkspace();
  268. // release inputs
  269. for (int i = 0; i < context_->NumInputs(); ++i) {
  270. context_->ReleaseInput(i);
  271. }
  272. GE_CHK_STATUS_RET_NOLOG(PrepareConstInputs(node_item));
  273. if (node_item.shape_inference_type == DEPEND_SHAPE_RANGE || node_item.shape_inference_type == DEPEND_COMPUTE) {
  274. // update output tensor sizes
  275. const auto &guard = node_item.MutexGuard("OnNodeDone");
  276. GE_CHK_STATUS_RET_NOLOG(ShapeInferenceEngine::CalcOutputTensorSizes(node_item));
  277. GE_CHK_STATUS_RET_NOLOG(context_->GetNodeState()->GetShapeInferenceState().UpdateOutputDesc());
  278. (void)guard;
  279. }
  280. // PropagateOutputs for type == DEPEND_COMPUTE
  281. if (node_item.shape_inference_type == DEPEND_COMPUTE) {
  282. if (graph_context_->trace_enabled) {
  283. (void) LogOutputs(node_item, *context_);
  284. }
  285. GE_CHK_STATUS_RET(context_->PropagateOutputs(), "[Propagate][Outputs] of [%s] failed.",
  286. node_item.NodeName().c_str());
  287. RECORD_CALLBACK_EVENT(graph_context_, context_->GetNodeName(), "[PropagateOutputs] End");
  288. }
  289. // release condition variable
  290. if (node_item.has_observer) {
  291. GELOGI("[%s] Notify observer. node_id = %d", node_item.NodeName().c_str(), node_item.node_id);
  292. context_->NodeDone();
  293. }
  294. RECORD_CALLBACK_EVENT(graph_context_, context_->GetNodeName(), "[Callback] End");
  295. return SUCCESS;
  296. }
  297. Status ExecutionEngine::ExecuteAsync(NodeState &node_state,
  298. const std::shared_ptr<TaskContext> &task_context,
  299. GraphExecutionContext &execution_context,
  300. const std::function<void()> &callback) {
  301. GELOGI("[%s] Node is ready for execution", task_context->GetNodeName());
  302. RECORD_EXECUTION_EVENT(&execution_context, task_context->GetNodeName(), "Start");
  303. GE_CHK_STATUS_RET_NOLOG(DoExecuteAsync(node_state, *task_context, execution_context, callback));
  304. GE_CHK_STATUS_RET_NOLOG(PropagateOutputs(*node_state.GetNodeItem(), *task_context, execution_context));
  305. return SUCCESS;
  306. }
  307. Status ExecutionEngine::DoExecuteAsync(NodeState &node_state,
  308. TaskContext &task_context,
  309. GraphExecutionContext &context,
  310. const std::function<void()> &callback) {
  311. const auto &task = node_state.GetKernelTask();
  312. if (task == nullptr) {
  313. GELOGE(INTERNAL_ERROR, "[Get][KernelTask] of [%s] is null.", node_state.GetName().c_str());
  314. REPORT_INNER_ERROR("E19999", "GetKernelTask of %s failed.", node_state.GetName().c_str());
  315. return INTERNAL_ERROR;
  316. }
  317. // Wait for dependent nodes(DEPEND_COMPUTE), so that the input tensors are valid.
  318. RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[AwaitDependents] Start");
  319. HYBRID_CHK_STATUS_RET(node_state.AwaitInputTensors(context),
  320. "[%s] Failed to wait for dependent nodes.",
  321. node_state.GetName().c_str());
  322. const auto &node_item = *node_state.GetNodeItem();
  323. auto executor = node_item.node_executor;
  324. GE_CHECK_NOTNULL(executor);
  325. RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[PrepareTask] Start");
  326. node_state.UpdatePersistTensor();
  327. GE_CHK_STATUS_RET(executor->PrepareTask(*task, task_context), "[Prepare][Task] for [%s] failed.",
  328. node_state.GetName().c_str());
  329. RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[PrepareTask] End");
  330. GELOGD("[%s] Done task preparation successfully.", node_state.GetName().c_str());
  331. if (context.trace_enabled) {
  332. LogInputs(node_item, task_context);
  333. if (node_item.shape_inference_type != DEPEND_COMPUTE) {
  334. LogOutputs(node_item, task_context);
  335. }
  336. }
  337. GE_CHK_STATUS_RET(ValidateInputTensors(node_state, task_context), "[Validate][InputTensors] for %s failed.",
  338. node_state.GetName().c_str());
  339. RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[ValidateInputTensors] End");
  340. if (GraphExecutionContext::profiling_level > 0) {
  341. auto *ctx = &context;
  342. const string &name = node_state.GetName();
  343. (void)task_context.RegisterCallback([ctx, name]() {
  344. RECORD_CALLBACK_EVENT(ctx, name.c_str(), "[Compute] Start");
  345. });
  346. }
  347. RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[ExecuteTask] Start");
  348. HYBRID_CHK_STATUS_RET(node_item.node_executor->ExecuteTask(*task, task_context, callback),
  349. "[%s] Failed to execute task",
  350. node_state.GetName().c_str());
  351. RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[ExecuteTask] End");
  352. GELOGD("[%s] Done task launch successfully.", node_state.GetName().c_str());
  353. return SUCCESS;
  354. }
  355. Status ExecutionEngine::ValidateInputTensors(const NodeState &node_state, const TaskContext &task_context) {
  356. for (auto i = 0; i < task_context.NumInputs(); ++i) {
  357. const auto &input_tensor = task_context.GetInput(i);
  358. GE_CHECK_NOTNULL(input_tensor);
  359. if (input_tensor->GetData() == nullptr) {
  360. GELOGD("[%s] Skipping null input, index = %d", task_context.GetNodeName(), i);
  361. continue;
  362. }
  363. const auto &tensor_desc = task_context.MutableInputDesc(i);
  364. GE_CHECK_NOTNULL(tensor_desc);
  365. if (tensor_desc->GetDataType() == DT_STRING) {
  366. GELOGD("[%s] Skipping DT_STRING input, index = %d", task_context.GetNodeName(), i);
  367. continue;
  368. }
  369. if (input_tensor->GetData() == nullptr) {
  370. GELOGD("[%s] Skipping null input, index = %d", task_context.GetNodeName(), i);
  371. continue;
  372. }
  373. int64_t expected_size = 0;
  374. (void)TensorUtils::GetSize(*tensor_desc, expected_size);
  375. GELOGD("[%s] Input[%d] expects [%ld] bytes.", task_context.GetNodeName(), i, expected_size);
  376. auto size_diff = expected_size - static_cast<int64_t>(input_tensor->GetSize());
  377. if (size_diff > 0) {
  378. if (size_diff <= kMaxPadding) {
  379. GELOGW("[%s] Input[%d]: tensor size mismatches. expected: %ld, but given %zu",
  380. task_context.GetNodeName(),
  381. i,
  382. expected_size,
  383. input_tensor->GetSize());
  384. } else {
  385. GELOGE(INTERNAL_ERROR,
  386. "[Check][Size] for [%s] Input[%d]: tensor size mismatches. expected: %ld, but given %zu.",
  387. task_context.GetNodeName(), i, expected_size, input_tensor->GetSize());
  388. REPORT_INNER_ERROR("E19999", "[%s] Input[%d]: tensor size mismatches. expected: %ld, but given %zu.",
  389. task_context.GetNodeName(), i, expected_size, input_tensor->GetSize());
  390. return INTERNAL_ERROR;
  391. }
  392. }
  393. }
  394. return SUCCESS;
  395. }
  396. Status ExecutionEngine::PropagateOutputs(const NodeItem &node_item,
  397. TaskContext &task_context,
  398. GraphExecutionContext &context) {
  399. if (node_item.shape_inference_type != DEPEND_COMPUTE) {
  400. GE_CHK_STATUS_RET(task_context.PropagateOutputs(), "[Propagate][Outputs] for [%s] failed.",
  401. node_item.NodeName().c_str());
  402. RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[PropagateOutputs] End");
  403. GELOGD("[%s] Done propagating outputs successfully.", node_item.NodeName().c_str());
  404. }
  405. return SUCCESS;
  406. }
  407. } // namespace hybrid
  408. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示