You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

subgraph_context.cc 8.9 kB

4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231
  1. /**
  2. * Copyright 2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "subgraph_context.h"
  17. #include "hybrid/executor/hybrid_model_executor.h"
  18. namespace ge {
  19. namespace hybrid {
  20. SubgraphContext::SubgraphContext(const GraphItem *graph_item, GraphExecutionContext *execution_context)
  21. : graph_item_(graph_item), execution_context_(execution_context) {
  22. }
  23. SubgraphContext::~SubgraphContext() {
  24. if (mmRWLockDestroy(&rw_lock_) != EN_OK) {
  25. REPORT_CALL_ERROR("E19999", "Destroy rw_lock failed");
  26. GELOGE(INTERNAL_ERROR, "[RWLock][Destroy] Destroy rw_lock failed");
  27. }
  28. }
  29. Status SubgraphContext::Init() {
  30. GE_CHECK_NOTNULL(graph_item_);
  31. GELOGD("[%s] Start to init subgraph context. total inputs = %d, total outputs = %d",
  32. graph_item_->GetName().c_str(),
  33. graph_item_->TotalInputs(),
  34. graph_item_->TotalOutputs());
  35. all_inputs_.resize(static_cast<unsigned long>(graph_item_->TotalInputs()));
  36. all_outputs_.resize(static_cast<unsigned long>(graph_item_->TotalOutputs()));
  37. if (mmRWLockInit(&rw_lock_) != EN_OK) {
  38. REPORT_CALL_ERROR("E19999", "Init rw_lock failed");
  39. GELOGE(INTERNAL_ERROR, "[RWLock][Init] Init rw_lock failed");
  40. return INTERNAL_ERROR;
  41. }
  42. return SUCCESS;
  43. }
  44. void SubgraphContext::SetGroup(int group) {
  45. group_ = group;
  46. }
  47. void SubgraphContext::ResetContext(const NodePtr &node) {
  48. node_done_manager_.Reset(node);
  49. }
  50. NodeStatePtr SubgraphContext::GetOrCreateNodeState(const NodeItem *node_item) {
  51. GELOGD("[%s] lock for read", node_item->NodeName().c_str());
  52. if (mmRWLockRDLock(&rw_lock_) != EN_OK) {
  53. REPORT_CALL_ERROR("E19999", "[Node:%s] Lock for read failed", node_item->NodeName().c_str());
  54. GELOGE(INTERNAL_ERROR, "[RWLock][Lock][Node:%s] Lock for read failed", node_item->NodeName().c_str());
  55. return nullptr;
  56. }
  57. const auto &iter = node_states_.find(node_item);
  58. if (iter != node_states_.end()) {
  59. auto state = iter->second;
  60. GELOGD("[%s] unlock for read", node_item->NodeName().c_str());
  61. if (mmRDLockUnLock(&rw_lock_) != EN_OK) {
  62. REPORT_CALL_ERROR("E19999", "[Node:%s] Unlock for read failed", node_item->NodeName().c_str());
  63. GELOGE(INTERNAL_ERROR, "[RWLock][Unlock][Node:%s] Unlock for read failed", node_item->NodeName().c_str());
  64. return nullptr;
  65. }
  66. return state;
  67. }
  68. GELOGD("[%s] unlock for read", node_item->NodeName().c_str());
  69. if (mmRDLockUnLock(&rw_lock_) != EN_OK) {
  70. REPORT_CALL_ERROR("E19999", "[Node:%s] Unlock for read failed", node_item->NodeName().c_str());
  71. GELOGE(INTERNAL_ERROR, "[RWLock][Unlock][Node:%s] Unlock for read failed", node_item->NodeName().c_str());
  72. return nullptr;
  73. }
  74. return CreateNodeState(node_item);
  75. }
  76. NodeStatePtr SubgraphContext::CreateNodeState(const NodeItem *node_item) {
  77. GELOGD("[%s] lock for write", node_item->NodeName().c_str());
  78. if (mmRWLockWRLock(&rw_lock_) != EN_OK) {
  79. REPORT_CALL_ERROR("E19999", "[Node:%s] Lock for write failed", node_item->NodeName().c_str());
  80. GELOGE(INTERNAL_ERROR, "[RWLock][Lock][Node:%s] Lock for write failed", node_item->NodeName().c_str());
  81. return nullptr;
  82. }
  83. auto &node_state = node_states_[node_item];
  84. do {
  85. if (node_state == nullptr) {
  86. const auto &guard = node_item->MutexGuard("GetOrCreateNodeState");
  87. node_state.reset(new(std::nothrow)NodeState(*node_item, this));
  88. if (node_state == nullptr || node_state->Init(group_, GetOrCreateFrameState(*node_item)) != SUCCESS) {
  89. GELOGE(INTERNAL_ERROR, "[Create][NodeState] failed for[%s].", node_item->NodeName().c_str());
  90. REPORT_CALL_ERROR("E19999", "Create NodeState failed for %s.", node_item->NodeName().c_str());
  91. break;
  92. }
  93. (void)guard;
  94. }
  95. } while (0);
  96. GELOGD("[%s] unlock for write", node_item->NodeName().c_str());
  97. if (mmWRLockUnLock(&rw_lock_) != EN_OK) {
  98. REPORT_CALL_ERROR("E19999", "[Node:%s] Unlock for write failed", node_item->NodeName().c_str());
  99. GELOGE(INTERNAL_ERROR, "[RWLock][Unlock][Node:%s] Unlock for write failed", node_item->NodeName().c_str());
  100. return nullptr;
  101. }
  102. return node_state;
  103. }
  104. FrameStatePtr SubgraphContext::GetOrCreateFrameState(const NodeItem &node_item) {
  105. auto &frame_state = frame_states_[node_item.frame_index_];
  106. if (frame_state == nullptr) {
  107. GELOGD("[%s] Create FrameState, frame index: %ld, parent frame index: %ld",
  108. node_item.node_name.c_str(), node_item.frame_index_, node_item.parent_frame_);
  109. frame_state.reset(new(std::nothrow)FrameState(node_item.frame_index_));
  110. if (node_item.frame_index_ != -1) { // -1 is root frame.
  111. frame_state->parent_frame_ = frame_states_[node_item.parent_frame_];
  112. }
  113. }
  114. return frame_state;
  115. }
  116. Status SubgraphContext::SetInput(int index, const TensorValue &tensor) {
  117. if (static_cast<size_t>(index) >= all_inputs_.size()) {
  118. GELOGE(INTERNAL_ERROR,
  119. "[Check][Param:index]input index out of range. all input num = %zu, input index = %d",
  120. all_inputs_.size(), index);
  121. REPORT_INNER_ERROR("E19999", "input param index out of range, all input num = %zu, input index = %d.",
  122. all_inputs_.size(), index);
  123. return INTERNAL_ERROR;
  124. }
  125. all_inputs_[index] = tensor;
  126. return SUCCESS;
  127. }
  128. Status SubgraphContext::SetInput(const NodeItem &node_item, int input_index, const TensorValue &tensor) {
  129. auto index = node_item.input_start + input_index;
  130. return SetInput(index, tensor);
  131. }
  132. Status SubgraphContext::SetOutput(const NodeItem &node_item, int output_index, const TensorValue &tensor) {
  133. auto index = node_item.output_start + output_index;
  134. if ((output_index >= node_item.num_outputs) || (static_cast<size_t>(index) >= all_outputs_.size())) {
  135. GELOGE(INTERNAL_ERROR, "[Check][Param:output_index]output index out of range. all output num = %zu,"
  136. "node_item = %s, output index = %d.",
  137. all_outputs_.size(), node_item.DebugString().c_str(), output_index);
  138. REPORT_INNER_ERROR("E19999", "output index out of range. all output num = %zu, node_item = %s, output index = %d.",
  139. all_outputs_.size(), node_item.DebugString().c_str(), output_index);
  140. return INTERNAL_ERROR;
  141. }
  142. all_outputs_[index] = tensor;
  143. return SUCCESS;
  144. }
  145. Status SubgraphContext::GetInput(int index, TensorValue &tensor) {
  146. GE_CHECK_GE(all_inputs_.size(), index + 1U);
  147. tensor = all_inputs_[index];
  148. return SUCCESS;
  149. }
  150. Status SubgraphContext::GetOutputs(std::vector<TensorValue> &outputs) {
  151. if (graph_item_->IsDynamic()) {
  152. GELOGD("[%s] graph is dynamic, get outputs from net output input tensors", graph_item_->GetName().c_str());
  153. // get from net output inputs
  154. auto output_node = graph_item_->GetOutputNode();
  155. if (output_node != nullptr) {
  156. for (int i = 0; i < output_node->num_inputs; ++i) {
  157. TensorValue tensor;
  158. GE_CHK_STATUS_RET_NOLOG(GetInput(output_node->input_start + i, tensor));
  159. GELOGD("[%s] Adding output tensor by input index [%d], tensor = %s",
  160. graph_item_->GetName().c_str(),
  161. output_node->input_start + i,
  162. tensor.DebugString().c_str());
  163. outputs.emplace_back(std::move(tensor));
  164. }
  165. }
  166. } else {
  167. GELOGD("[%s] graph is non-dynamic, get outputs from subgraph outputs", graph_item_->GetName().c_str());
  168. for (auto &tensor : all_outputs_) {
  169. GELOGD("[%s] Adding output tensor: %s", graph_item_->GetName().c_str(), tensor.DebugString().c_str());
  170. outputs.emplace_back(tensor);
  171. }
  172. }
  173. return SUCCESS;
  174. }
  175. Status SubgraphContext::Await(const NodePtr &node) {
  176. if (node_done_manager_.Await(node)) {
  177. return SUCCESS;
  178. }
  179. if (execution_context_->is_eos_) {
  180. return END_OF_SEQUENCE;
  181. }
  182. return FAILED;
  183. }
  184. void SubgraphContext::OnError(Status error) {
  185. if (error != END_OF_SEQUENCE) {
  186. GELOGE(error, "[Check][Param:error][%s] Error:%d occurred while executing graph.",
  187. graph_item_->GetName().c_str(), error);
  188. REPORT_INNER_ERROR("E19999", "[%s] Error:%d occurred while executing graph.",
  189. graph_item_->GetName().c_str(), error);
  190. }
  191. node_done_manager_.Destroy();
  192. }
  193. void SubgraphContext::NodeDone(const NodePtr &node) {
  194. node_done_manager_.NodeDone(node);
  195. }
  196. void SubgraphContext::Reset() {
  197. node_done_manager_.Reset();
  198. if (mmRWLockWRLock(&rw_lock_) == EN_OK) {
  199. node_states_.clear();
  200. (void)mmWRLockUnLock(&rw_lock_);
  201. }
  202. }
  203. } // namespace hybrid
  204. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示