You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

execution_engine.cc 20 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "hybrid/executor/worker/execution_engine.h"
  17. #include "graph/runtime_inference_context.h"
  18. #include "graph/utils/tensor_utils.h"
  19. #include "graph/utils/tensor_adapter.h"
  20. #include "graph/debug/ge_attr_define.h"
  21. #include "hybrid/node_executor/node_executor.h"
  22. #include "common/dump/dump_manager.h"
  23. #include "common/dump/dump_op.h"
  24. #include "common/types.h"
  25. #include "common/ge_types.h"
  26. #include "common/profiling/profiling_manager.h"
  27. #include "runtime/base.h"
  28. namespace ge {
  29. namespace hybrid {
  30. namespace {
  31. constexpr int64_t kMaxPadding = 63;
  32. Status LogInputs(const NodeItem &node_item, const TaskContext &task_context) {
  33. for (auto i = 0; i < task_context.NumInputs(); ++i) {
  34. const auto &input_tensor = task_context.GetInput(i);
  35. GE_CHECK_NOTNULL(input_tensor);
  36. const auto &tensor_desc = task_context.GetInputDesc(i);
  37. GE_CHECK_NOTNULL(tensor_desc);
  38. GELOGD("[%s] Print task args. input[%d] = %s, shape = [%s]",
  39. node_item.NodeName().c_str(),
  40. i,
  41. input_tensor->DebugString().c_str(),
  42. tensor_desc->GetShape().ToString().c_str());
  43. }
  44. return SUCCESS;
  45. }
  46. Status LogOutputs(const NodeItem &node_item, const TaskContext &task_context) {
  47. for (auto i = 0; i < task_context.NumOutputs(); ++i) {
  48. const auto &output_tensor = task_context.GetOutput(i);
  49. GE_CHECK_NOTNULL(output_tensor);
  50. const auto &tensor_desc = node_item.MutableOutputDesc(i);
  51. GE_CHECK_NOTNULL(tensor_desc);
  52. GELOGD("[%s] Print task args. output[%d] = %s, shape = [%s]",
  53. node_item.NodeName().c_str(),
  54. i,
  55. output_tensor->DebugString().c_str(),
  56. tensor_desc->MutableShape().ToString().c_str());
  57. }
  58. return SUCCESS;
  59. }
  60. } // namespace
  61. class NodeDoneCallback {
  62. public:
  63. NodeDoneCallback(GraphExecutionContext *graph_context, std::shared_ptr<TaskContext> task_context);
  64. ~NodeDoneCallback() = default;
  65. Status OnNodeDone();
  66. private:
  67. Status PrepareConstInputs(const NodeItem &node_item);
  68. Status DumpDynamicNode();
  69. Status ProfilingReport();
  70. Status GetGraphDescInfo(const NodePtr node, const HybridModel *model,
  71. std::vector<ComputeGraphDescInfo> &compute_graph_info);
  72. Status GetTaskDescInfo(const NodePtr node, const HybridModel *model,
  73. std::vector<TaskDescInfo> &task_desc_info);
  74. GraphExecutionContext *graph_context_;
  75. std::shared_ptr<TaskContext> context_;
  76. DumpOp dump_op_;
  77. };
  78. NodeDoneCallback::NodeDoneCallback(GraphExecutionContext *graph_context,
  79. std::shared_ptr<TaskContext> task_context)
  80. : graph_context_(graph_context), context_(std::move(task_context)) {
  81. }
  82. Status NodeDoneCallback::PrepareConstInputs(const NodeItem &node_item) {
  83. for (auto output_idx : node_item.to_const_output_id_list) {
  84. RECORD_CALLBACK_EVENT(graph_context_, node_item.NodeName().c_str(),
  85. "[PrepareConstInputs] [index = %d] Start",
  86. output_idx);
  87. auto output_tensor = context_->GetOutput(output_idx);
  88. GE_CHECK_NOTNULL(output_tensor);
  89. Tensor tensor;
  90. auto ge_tensor_desc = node_item.MutableOutputDesc(output_idx);
  91. GE_CHECK_NOTNULL(ge_tensor_desc);
  92. tensor.SetTensorDesc(TensorAdapter::GeTensorDesc2TensorDesc(*ge_tensor_desc));
  93. int64_t tensor_size;
  94. GE_CHK_GRAPH_STATUS_RET(TensorUtils::GetTensorSizeInBytes(*ge_tensor_desc, tensor_size),
  95. "Failed to invoke GetTensorSizeInBytes");
  96. if (output_tensor->GetSize() < static_cast<size_t>(tensor_size)) {
  97. GELOGE(INTERNAL_ERROR,
  98. "[%s] Tensor size is not enough. output index = %d, required size = %ld, tensor = %s",
  99. node_item.NodeName().c_str(),
  100. output_idx,
  101. tensor_size,
  102. output_tensor->DebugString().c_str());
  103. return INTERNAL_ERROR;
  104. }
  105. vector<uint8_t> host_buffer(static_cast<unsigned long>(tensor_size));
  106. GELOGD("[%s] To cache output[%d] to host, size = %zu",
  107. node_item.NodeName().c_str(),
  108. output_idx,
  109. output_tensor->GetSize());
  110. if (tensor_size > 0) {
  111. GE_CHK_RT_RET(rtMemcpy(host_buffer.data(),
  112. tensor_size,
  113. output_tensor->GetData(),
  114. tensor_size,
  115. RT_MEMCPY_DEVICE_TO_HOST));
  116. }
  117. tensor.SetData(std::move(host_buffer));
  118. string session_id = std::to_string(context_->GetSessionId());
  119. RuntimeInferenceContext *runtime_infer_ctx = nullptr;
  120. GE_CHK_GRAPH_STATUS_RET(RuntimeInferenceContext::GetContext(session_id, &runtime_infer_ctx),
  121. "Failed to get RuntimeInferenceContext, session_id = %s", session_id.c_str());
  122. GE_CHK_STATUS_RET(runtime_infer_ctx->SetTensor(node_item.node_id, output_idx, std::move(tensor)),
  123. "Failed to SetTensor, node = %s, output_index = %d", node_item.NodeName().c_str(), output_idx);
  124. GELOGD("[%s] Output[%d] cached successfully in session: %s. node_id = %d, shape = [%s]",
  125. node_item.NodeName().c_str(),
  126. output_idx,
  127. session_id.c_str(),
  128. node_item.node_id,
  129. ge_tensor_desc->GetShape().ToString().c_str());
  130. RECORD_CALLBACK_EVENT(graph_context_, node_item.NodeName().c_str(),
  131. "[PrepareConstInputs] [index = %d] End",
  132. output_idx);
  133. }
  134. return SUCCESS;
  135. }
  136. Status NodeDoneCallback::GetTaskDescInfo(const NodePtr node, const HybridModel *model,
  137. std::vector<TaskDescInfo> &task_desc_info) {
  138. GE_CHECK_NOTNULL(node);
  139. GE_CHECK_NOTNULL(model);
  140. GELOGD("GetTaskDescInfo of node [%s] start.", node->GetName().c_str());
  141. auto op_desc = node->GetOpDesc();
  142. std::string op_name = op_desc->GetName();
  143. std::string dynamic_model_name = model->GetModelName();
  144. uint32_t task_id = 0;
  145. uint32_t stream_id = 0;
  146. if (rtGetTaskIdAndStreamID(&task_id, &stream_id) != RT_ERROR_NONE) {
  147. GELOGE(PARAM_INVALID, "Get task_id and stream_id failed.");
  148. return PARAM_INVALID;
  149. }
  150. TaskDescInfo tmp_task_desc_info;
  151. tmp_task_desc_info.model_name = dynamic_model_name;
  152. tmp_task_desc_info.op_name = op_name;
  153. tmp_task_desc_info.block_dim = 0;
  154. auto task_defs = model->GetTaskDefs(node);
  155. if (task_defs != nullptr && (*task_defs).size() > 0) {
  156. const auto &task_def = (*task_defs)[0];
  157. tmp_task_desc_info.block_dim = task_def.kernel().block_dim();
  158. }
  159. tmp_task_desc_info.task_id = task_id;
  160. tmp_task_desc_info.stream_id = stream_id;
  161. GELOGD("GetTaskDescInfo of node [%s] end, task_id[%u], stream_id[%u]",
  162. node->GetName().c_str(), task_id, stream_id);
  163. task_desc_info.emplace_back(tmp_task_desc_info);
  164. return SUCCESS;
  165. }
  166. Status NodeDoneCallback::GetGraphDescInfo(const NodePtr node, const HybridModel *model,
  167. std::vector<ComputeGraphDescInfo> &compute_graph_info) {
  168. GE_CHECK_NOTNULL(node);
  169. GE_CHECK_NOTNULL(model);
  170. GELOGD("GetComputeGraphInfo of node [%s] start.", node->GetName().c_str());
  171. std::string dynamic_model_name = model->GetModelName();
  172. auto op_desc = node->GetOpDesc();
  173. if (op_desc == nullptr) {
  174. GELOGE(PARAM_INVALID, "op_desc is nullptr.");
  175. return PARAM_INVALID;
  176. }
  177. auto op_mode = static_cast<uint32_t>(domi::ImplyType::INVALID);
  178. if (AttrUtils::GetInt(op_desc, ATTR_NAME_IMPLY_TYPE, op_mode) &&
  179. op_mode == static_cast<uint32_t>(domi::ImplyType::TVM)) {
  180. ComputeGraphDescInfo tmp_compute_graph_info;
  181. tmp_compute_graph_info.model_name = dynamic_model_name;
  182. tmp_compute_graph_info.op_name = op_desc->GetName();
  183. tmp_compute_graph_info.op_type = op_desc->GetType();
  184. for (size_t i = 0; i < op_desc->GetAllInputsSize(); ++i) {
  185. GeTensorDescPtr input_desc = op_desc->MutableInputDesc(i);
  186. if (input_desc == nullptr) {
  187. continue;
  188. }
  189. tmp_compute_graph_info.input_format.emplace_back(input_desc->GetFormat());
  190. tmp_compute_graph_info.input_shape.emplace_back(input_desc->GetShape().GetDims());
  191. tmp_compute_graph_info.input_data_type.emplace_back(input_desc->GetDataType());
  192. }
  193. for (size_t j = 0; j < op_desc->GetOutputsSize(); ++j) {
  194. GeTensorDesc output_desc = op_desc->GetOutputDesc(j);
  195. tmp_compute_graph_info.output_format.emplace_back(output_desc.GetFormat());
  196. tmp_compute_graph_info.output_shape.emplace_back(output_desc.GetShape().GetDims());
  197. tmp_compute_graph_info.output_data_type.emplace_back(output_desc.GetDataType());
  198. }
  199. compute_graph_info.emplace_back(tmp_compute_graph_info);
  200. GELOGD("GetComputeGraphInfo of node [%s] end.", node->GetName().c_str());
  201. }
  202. return SUCCESS;
  203. }
  204. Status NodeDoneCallback::ProfilingReport() {
  205. auto node = context_->GetNodeItem().node;
  206. if (node == nullptr) {
  207. GELOGE(PARAM_INVALID, "Get node is nullptr");
  208. return PARAM_INVALID;
  209. }
  210. const auto &op_type = node->GetType();
  211. if (op_type == PARTITIONEDCALL) {
  212. return SUCCESS;
  213. }
  214. GE_CHECK_NOTNULL(graph_context_);
  215. const HybridModel *model = graph_context_->model;
  216. GE_CHECK_NOTNULL(model);
  217. GELOGD("ProfilingReport of node [%s] model [%s] start.", node->GetName().c_str(), model->GetModelName().c_str());
  218. std::vector<TaskDescInfo> task_desc_info;
  219. TaskDescInfo tmp_task_desc_info;
  220. auto profiling_ret = GetTaskDescInfo(node, model, task_desc_info);
  221. if (profiling_ret != RT_ERROR_NONE) {
  222. GELOGE(profiling_ret, "Get task info of node[%s] failed.", node->GetName().c_str());
  223. return profiling_ret;
  224. }
  225. std::vector<ComputeGraphDescInfo> compute_graph_info;
  226. profiling_ret = GetGraphDescInfo(node, model, compute_graph_info);
  227. if (profiling_ret != RT_ERROR_NONE) {
  228. GELOGE(profiling_ret, "Get graph info of node[%s] failed.", node->GetName().c_str());
  229. return profiling_ret;
  230. }
  231. auto &profiling_manager = ProfilingManager::Instance();
  232. profiling_manager.ReportProfilingData(model->GetModelId(), task_desc_info, compute_graph_info);
  233. return SUCCESS;
  234. }
  235. Status NodeDoneCallback::DumpDynamicNode() {
  236. auto node = context_->GetNodeItem().node;
  237. if (node == nullptr) {
  238. GELOGE(PARAM_INVALID, "Get node is nullptr");
  239. return PARAM_INVALID;
  240. }
  241. auto op_desc = node->GetOpDesc();
  242. auto stream = context_->GetStream();
  243. vector<uintptr_t> input_addrs;
  244. vector<uintptr_t> output_addrs;
  245. for (int i = 0; i < context_->NumInputs(); i++) {
  246. auto tensor_value = context_->GetInput(i);
  247. GE_CHK_BOOL_RET_STATUS(tensor_value != nullptr, PARAM_INVALID, "Tensor value is nullptr");
  248. uint64_t input_addr = reinterpret_cast<uintptr_t>(tensor_value->GetData());
  249. input_addrs.emplace_back(input_addr);
  250. }
  251. for (int j = 0; j < context_->NumOutputs(); j++) {
  252. auto tensor_value = context_->GetOutput(j);
  253. GE_CHK_BOOL_RET_STATUS(tensor_value != nullptr, PARAM_INVALID, "Tensor value is nullptr");
  254. uint64_t output_addr = reinterpret_cast<uintptr_t>(tensor_value->GetData());
  255. output_addrs.emplace_back(output_addr);
  256. }
  257. dump_op_.SetDumpInfo(context_->GetDumpProperties(), op_desc, input_addrs, output_addrs, stream);
  258. GE_CHECK_NOTNULL(graph_context_);
  259. const HybridModel *model = graph_context_->model;
  260. GE_CHECK_NOTNULL(model);
  261. std::string dynamic_model_name = model->GetModelName();
  262. uint32_t model_id = model->GetModelId();
  263. dump_op_.SetDynamicModelInfo(dynamic_model_name, model_id);
  264. void *global_step = nullptr;
  265. TensorValue *varible_global_step = context_->GetVariable(NODE_NAME_GLOBAL_STEP);
  266. if (varible_global_step != nullptr) {
  267. global_step = const_cast<void *>(varible_global_step->GetData());
  268. }
  269. void *loop_per_iter = nullptr;
  270. TensorValue *varible_loop_per_iter = context_->GetVariable(NODE_NAME_FLOWCTRL_LOOP_PER_ITER);
  271. if (varible_loop_per_iter != nullptr) {
  272. loop_per_iter = const_cast<void *>(varible_loop_per_iter->GetData());
  273. }
  274. void *loop_cond = nullptr;
  275. TensorValue *varible_loop_cond = context_->GetVariable(NODE_NAME_FLOWCTRL_LOOP_COND);
  276. if (varible_loop_cond != nullptr) {
  277. loop_cond = const_cast<void *>(varible_loop_cond->GetData());
  278. }
  279. dump_op_.SetLoopAddr(global_step, loop_per_iter, loop_cond);
  280. GE_CHK_STATUS_RET(dump_op_.LaunchDumpOp(), "Failed to launch dump op in hybird model");
  281. auto rt_ret = rtStreamSynchronize(stream);
  282. if (rt_ret != RT_ERROR_NONE) {
  283. GELOGE(rt_ret, "rtStreamSynchronize failed");
  284. return rt_ret;
  285. }
  286. return SUCCESS;
  287. }
  288. Status NodeDoneCallback::OnNodeDone() {
  289. auto &node_item = context_->GetNodeItem();
  290. GELOGI("[%s] Start callback process.", node_item.NodeName().c_str());
  291. RECORD_CALLBACK_EVENT(graph_context_, context_->GetNodeName(), "[Compute] End");
  292. RECORD_CALLBACK_EVENT(graph_context_, context_->GetNodeName(), "[Callback] Start");
  293. auto dump_path = context_->GetDumpProperties().GetDumpPath();
  294. if (!dump_path.empty()) {
  295. GELOGI("Start to dump dynamic shape,dump_path is %s", dump_path.c_str());
  296. GE_CHK_STATUS_RET(DumpDynamicNode(), "Failed to dump dynamic node");
  297. }
  298. if (ProfilingManager::Instance().ProfilingModelExecuteOn()) {
  299. GE_CHK_STATUS_RET(ProfilingReport(), "Report node[%s] to profiling failed.",
  300. node_item.NodeName().c_str());
  301. }
  302. // release inputs
  303. for (int i = 0; i < context_->NumInputs(); ++i) {
  304. context_->ReleaseInput(i);
  305. }
  306. GE_CHK_STATUS_RET_NOLOG(PrepareConstInputs(node_item));
  307. // PropagateOutputs for type == DEPEND_COMPUTE
  308. if (node_item.shape_inference_type == DEPEND_COMPUTE) {
  309. if (graph_context_->trace_enabled) {
  310. (void) LogOutputs(node_item, *context_);
  311. }
  312. GE_CHK_STATUS_RET(context_->PropagateOutputs(),
  313. "[%s] Failed to propagate outputs failed",
  314. node_item.NodeName().c_str());
  315. RECORD_CALLBACK_EVENT(graph_context_, context_->GetNodeName(), "[PropagateOutputs] End");
  316. }
  317. // release condition variable
  318. if (node_item.has_observer) {
  319. GELOGI("[%s] Notify observer. node_id = %d", node_item.NodeName().c_str(), node_item.node_id);
  320. context_->NodeDone();
  321. }
  322. RECORD_CALLBACK_EVENT(graph_context_, context_->GetNodeName(), "[Callback] End");
  323. return SUCCESS;
  324. }
  325. Status ExecutionEngine::ExecuteAsync(NodeState &node_state,
  326. const std::shared_ptr<TaskContext> &task_context,
  327. GraphExecutionContext &execution_context) {
  328. GELOGI("[%s] Node is ready for execution", task_context->GetNodeName());
  329. RECORD_EXECUTION_EVENT(&execution_context, task_context->GetNodeName(), "Start");
  330. auto cb = std::shared_ptr<NodeDoneCallback>(new(std::nothrow) NodeDoneCallback(&execution_context, task_context));
  331. GE_CHECK_NOTNULL(cb);
  332. auto callback = [&, cb]() {
  333. auto ret = cb->OnNodeDone();
  334. if (ret != SUCCESS) {
  335. task_context->OnError(ret);
  336. }
  337. };
  338. GE_CHK_STATUS_RET_NOLOG(DoExecuteAsync(node_state, *task_context, execution_context, callback));
  339. GE_CHK_STATUS_RET_NOLOG(PropagateOutputs(*node_state.GetNodeItem(), *task_context, execution_context));
  340. return SUCCESS;
  341. }
  342. Status ExecutionEngine::DoExecuteAsync(NodeState &node_state,
  343. TaskContext &task_context,
  344. GraphExecutionContext &context,
  345. const std::function<void()> &callback) {
  346. const auto &task = node_state.GetKernelTask();
  347. if (task == nullptr) {
  348. GELOGE(INTERNAL_ERROR, "[%s] NodeTask is null.", node_state.GetName().c_str());
  349. return INTERNAL_ERROR;
  350. }
  351. // Wait for dependent nodes(DEPEND_COMPUTE), so that the input tensors are valid.
  352. RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[AwaitDependents] Start");
  353. GE_CHK_STATUS_RET(node_state.AwaitInputTensors(context),
  354. "[%s] Failed to wait for dependent nodes.",
  355. node_state.GetName().c_str());
  356. const auto &node_item = *node_state.GetNodeItem();
  357. auto executor = node_item.node_executor;
  358. GE_CHECK_NOTNULL(executor);
  359. RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[PrepareTask] Start");
  360. GE_CHK_STATUS_RET(executor->PrepareTask(*task, task_context),
  361. "[%s] Failed to prepare task",
  362. node_state.GetName().c_str());
  363. RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[PrepareTask] End");
  364. GELOGD("[%s] Done task preparation successfully.", node_state.GetName().c_str());
  365. if (context.trace_enabled) {
  366. LogInputs(node_item, task_context);
  367. if (node_item.shape_inference_type != DEPEND_COMPUTE) {
  368. LogOutputs(node_item, task_context);
  369. }
  370. }
  371. GE_CHK_STATUS_RET(ValidateInputTensors(node_state, task_context), "Failed to validate input tensors.");
  372. RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[ValidateInputTensors] End");
  373. if (context.profiling_level > 0) {
  374. auto *ctx = &context;
  375. const string &name = node_state.GetName();
  376. (void)task_context.RegisterCallback([ctx, name]() {
  377. RECORD_CALLBACK_EVENT(ctx, name.c_str(), "[Compute] Start");
  378. });
  379. }
  380. RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[ExecuteTask] Start");
  381. GE_CHK_STATUS_RET(node_item.node_executor->ExecuteTask(*task, task_context, callback),
  382. "[%s] Failed to execute task",
  383. node_state.GetName().c_str());
  384. RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[ExecuteTask] End");
  385. GELOGD("[%s] Done task launch successfully.", node_state.GetName().c_str());
  386. return SUCCESS;
  387. }
  388. Status ExecutionEngine::ValidateInputTensors(const NodeState &node_state, const TaskContext &task_context) {
  389. for (auto i = 0; i < task_context.NumInputs(); ++i) {
  390. const auto &input_tensor = task_context.GetInput(i);
  391. GE_CHECK_NOTNULL(input_tensor);
  392. if (input_tensor->GetData() == nullptr) {
  393. GELOGD("[%s] Skipping null input, index = %d", task_context.GetNodeName(), i);
  394. continue;
  395. }
  396. const auto &tensor_desc = task_context.MutableInputDesc(i);
  397. GE_CHECK_NOTNULL(tensor_desc);
  398. if (tensor_desc->GetDataType() == DT_STRING) {
  399. GELOGD("[%s] Skipping DT_STRING input, index = %d", task_context.GetNodeName(), i);
  400. continue;
  401. }
  402. if (input_tensor->GetData() == nullptr) {
  403. GELOGD("[%s] Skipping null input, index = %d", task_context.GetNodeName(), i);
  404. continue;
  405. }
  406. int64_t expected_size;
  407. GE_CHK_GRAPH_STATUS_RET(TensorUtils::GetTensorMemorySizeInBytes(*tensor_desc, expected_size));
  408. GELOGD("[%s] Input[%d] expects [%ld] bytes.", task_context.GetNodeName(), i, expected_size);
  409. auto size_diff = expected_size - static_cast<int64_t>(input_tensor->GetSize());
  410. if (size_diff > 0) {
  411. if (size_diff <= kMaxPadding) {
  412. GELOGW("[%s] Input[%d]: tensor size mismatches. expected: %ld, but given %zu",
  413. task_context.GetNodeName(),
  414. i,
  415. expected_size,
  416. input_tensor->GetSize());
  417. } else {
  418. GELOGE(INTERNAL_ERROR,
  419. "[%s] Input[%d]: tensor size mismatches. expected: %ld, but given %zu",
  420. task_context.GetNodeName(),
  421. i,
  422. expected_size,
  423. input_tensor->GetSize());
  424. return INTERNAL_ERROR;
  425. }
  426. }
  427. }
  428. return SUCCESS;
  429. }
  430. Status ExecutionEngine::PropagateOutputs(const NodeItem &node_item,
  431. TaskContext &task_context,
  432. GraphExecutionContext &context) {
  433. if (node_item.shape_inference_type != DEPEND_COMPUTE) {
  434. GE_CHK_STATUS_RET(task_context.PropagateOutputs(),
  435. "[%s] Failed to propagate outputs.",
  436. node_item.NodeName().c_str());
  437. RECORD_EXECUTION_EVENT(&context, task_context.GetNodeName(), "[PropagateOutputs] End");
  438. GELOGD("[%s] Done propagating outputs successfully.", node_item.NodeName().c_str());
  439. }
  440. return SUCCESS;
  441. }
  442. } // namespace hybrid
  443. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示