You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

subgraph_context.cc 4.4 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "subgraph_context.h"
  17. #include "common/debug/log.h"
  18. namespace ge {
  19. namespace hybrid {
  20. SubgraphContext::SubgraphContext(const GraphItem *graph_item) : graph_item_(graph_item) {
  21. }
  22. Status SubgraphContext::Init() {
  23. GE_CHECK_NOTNULL(graph_item_);
  24. GELOGD("[%s] Start to init subgraph context. total inputs = %d, total outputs = %d",
  25. graph_item_->GetName().c_str(),
  26. graph_item_->TotalInputs(),
  27. graph_item_->TotalOutputs());
  28. all_inputs_.resize(static_cast<unsigned long>(graph_item_->TotalInputs()));
  29. all_outputs_.resize(static_cast<unsigned long>(graph_item_->TotalOutputs()));
  30. return SUCCESS;
  31. }
  32. NodeStatePtr SubgraphContext::GetOrCreateNodeState(const NodeItem *node_item) {
  33. std::lock_guard<std::mutex> lk(mu_);
  34. auto &node_state = node_states_[node_item];
  35. if (node_state == nullptr) {
  36. node_state.reset(new(std::nothrow)NodeState(*node_item, this));
  37. }
  38. return node_state;
  39. }
  40. Status SubgraphContext::SetInput(int index, const TensorValue &tensor) {
  41. if (static_cast<size_t>(index) >= all_inputs_.size()) {
  42. GELOGE(INTERNAL_ERROR,
  43. "output index output range. all input num = %zu, input index = %d",
  44. all_inputs_.size(),
  45. index);
  46. return INTERNAL_ERROR;
  47. }
  48. all_inputs_[index] = tensor;
  49. return SUCCESS;
  50. }
  51. Status SubgraphContext::SetInput(const NodeItem &node_item, int input_index, const TensorValue &tensor) {
  52. auto index = node_item.input_start + input_index;
  53. return SetInput(index, tensor);
  54. }
  55. Status SubgraphContext::SetOutput(const NodeItem &node_item, int output_index, const TensorValue &tensor) {
  56. auto index = node_item.output_start + output_index;
  57. if ((output_index >= node_item.num_outputs) || (static_cast<size_t>(index) >= all_outputs_.size())) {
  58. GELOGE(INTERNAL_ERROR,
  59. "output index output range. all output num = %zu, node_item = %s, output index = %d",
  60. all_outputs_.size(),
  61. node_item.DebugString().c_str(),
  62. output_index);
  63. return INTERNAL_ERROR;
  64. }
  65. all_outputs_[index] = tensor;
  66. return SUCCESS;
  67. }
  68. Status SubgraphContext::GetInput(int index, TensorValue &tensor) {
  69. GE_CHECK_GE(all_inputs_.size(), index + 1U);
  70. tensor = all_inputs_[index];
  71. return SUCCESS;
  72. }
  73. Status SubgraphContext::GetOutputs(std::vector<TensorValue> &outputs) {
  74. if (graph_item_->IsDynamic()) {
  75. GELOGD("[%s] graph is dynamic, get outputs from net output input tensors", graph_item_->GetName().c_str());
  76. // get from net output inputs
  77. auto output_node = graph_item_->GetOutputNode();
  78. if (output_node != nullptr) {
  79. for (int i = 0; i < output_node->num_inputs; ++i) {
  80. TensorValue tensor;
  81. GE_CHK_STATUS_RET_NOLOG(GetInput(output_node->input_start + i, tensor));
  82. GELOGD("[%s] Adding output tensor by input index [%d], tensor = %s",
  83. graph_item_->GetName().c_str(),
  84. output_node->input_start + i,
  85. tensor.DebugString().c_str());
  86. outputs.emplace_back(std::move(tensor));
  87. }
  88. }
  89. } else {
  90. GELOGD("[%s] graph is non-dynamic, get outputs from subgraph outputs", graph_item_->GetName().c_str());
  91. for (auto &tensor : all_outputs_) {
  92. GELOGD("[%s] Adding output tensor: %s", graph_item_->GetName().c_str(), tensor.DebugString().c_str());
  93. outputs.emplace_back(tensor);
  94. }
  95. }
  96. return SUCCESS;
  97. }
  98. bool SubgraphContext::Await(const NodePtr &node) {
  99. return node_done_manager_.Await(node);
  100. }
  101. void SubgraphContext::OnError(Status error) {
  102. GELOGE(error, "[%s] Error occurred while executing graph.", graph_item_->GetName().c_str());
  103. node_done_manager_.Destroy();
  104. }
  105. void SubgraphContext::NodeDone(const NodePtr &node) {
  106. node_done_manager_.NodeDone(node);
  107. }
  108. } // namespace hybrid
  109. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示