You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

stream_resource.cc 8.0 kB

5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
4 years ago
5 years ago
5 years ago
5 years ago
5 years ago
5 years ago
4 years ago
5 years ago
4 years ago
4 years ago
4 years ago
5 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "single_op/stream_resource.h"
  17. #include "framework/common/debug/ge_log.h"
  18. #include "framework/common/debug/log.h"
  19. #include "runtime/rt.h"
  20. #include "single_op/single_op_model.h"
  21. namespace ge {
  22. namespace {
  23. // limit available device mem size 1M
  24. const uint32_t kFuzzDeviceBufferSize = 1 * 1024 * 1024;
  25. constexpr int kDefaultThreadNum = 4;
  26. }
  27. StreamResource::StreamResource(uintptr_t resource_id) : resource_id_(resource_id) {
  28. }
  29. StreamResource::~StreamResource() {
  30. for (auto mem : memory_list_) {
  31. if (mem != nullptr) {
  32. auto rt_ret = rtFree(mem);
  33. GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "[Free][Rt] failed."));
  34. }
  35. }
  36. for (auto weight : weight_list_) {
  37. if (weight != nullptr) {
  38. auto rt_ret = rtFree(weight);
  39. GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "[Free][Rt] failed."));
  40. }
  41. }
  42. if (device_buffer_ != nullptr) {
  43. auto rt_ret = rtFree(device_buffer_);
  44. GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "[Free][Rt] failed."));
  45. }
  46. }
  47. Status StreamResource::Init() {
  48. auto rt_ret = rtMalloc(&device_buffer_, kFuzzDeviceBufferSize, RT_MEMORY_HBM);
  49. GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "[Malloc][Rt] failed."));
  50. return SUCCESS;
  51. }
  52. SingleOp *StreamResource::GetOperator(const uint64_t key) {
  53. std::lock_guard<std::mutex> lk(mu_);
  54. auto it = op_map_.find(key);
  55. if (it == op_map_.end()) {
  56. return nullptr;
  57. }
  58. return it->second.get();
  59. }
  60. DynamicSingleOp *StreamResource::GetDynamicOperator(const uint64_t key) {
  61. std::lock_guard<std::mutex> lk(mu_);
  62. auto it = dynamic_op_map_.find(key);
  63. if (it == dynamic_op_map_.end()) {
  64. return nullptr;
  65. }
  66. return it->second.get();
  67. }
  68. rtStream_t StreamResource::GetStream() const {
  69. return stream_;
  70. }
  71. void StreamResource::SetStream(rtStream_t stream) {
  72. stream_ = stream;
  73. }
  74. uint8_t *StreamResource::DoMallocMemory(const std::string &purpose,
  75. size_t size,
  76. size_t &max_allocated,
  77. std::vector<uint8_t *> &allocated) {
  78. if (size == 0) {
  79. GELOGD("Mem size == 0");
  80. return nullptr;
  81. }
  82. if (size <= max_allocated && !allocated.empty()) {
  83. GELOGD("reuse last memory");
  84. return allocated.back();
  85. }
  86. if (!allocated.empty()) {
  87. uint8_t *current_buffer = allocated.back();
  88. allocated.pop_back();
  89. if (rtStreamSynchronize(stream_) != RT_ERROR_NONE) {
  90. GELOGW("Failed to invoke rtStreamSynchronize");
  91. }
  92. (void) rtFree(current_buffer);
  93. }
  94. uint8_t *buffer = nullptr;
  95. auto ret = rtMalloc(reinterpret_cast<void **>(&buffer), size, RT_MEMORY_HBM);
  96. if (ret != RT_ERROR_NONE) {
  97. GELOGE(RT_FAILED, "[RtMalloc][Memory] failed, size = %zu, ret = %d", size, ret);
  98. REPORT_INNER_ERROR("E19999", "rtMalloc failed, size = %zu, ret = %d.", size, ret);
  99. return nullptr;
  100. }
  101. GE_PRINT_DYNAMIC_MEMORY(rtMalloc, purpose.c_str(), size)
  102. ret = rtMemset(buffer, size, 0U, size);
  103. if (ret != RT_ERROR_NONE) {
  104. GELOGE(RT_FAILED, "[RtMemset][Memory] failed, ret = %d", ret);
  105. REPORT_INNER_ERROR("E19999", "rtMemset failed, ret = %d.", ret);
  106. auto rt_ret = rtFree(buffer);
  107. GE_IF_BOOL_EXEC(rt_ret != RT_ERROR_NONE, GELOGE(RT_FAILED, "[RtFree][Memory] failed"));
  108. return nullptr;
  109. }
  110. GELOGD("Malloc new memory succeeded. size = %zu", size);
  111. max_allocated = size;
  112. allocated.emplace_back(buffer);
  113. return buffer;
  114. }
  115. uint8_t *StreamResource::MallocMemory(const std::string &purpose, size_t size, bool holding_lock) {
  116. GELOGD("To Malloc memory, size = %zu", size);
  117. if (holding_lock) {
  118. return DoMallocMemory(purpose, size, max_memory_size_, memory_list_);
  119. } else {
  120. std::lock_guard<std::mutex> lk(stream_mu_);
  121. return DoMallocMemory(purpose, size, max_memory_size_, memory_list_);
  122. }
  123. }
  124. uint8_t *StreamResource::MallocWeight(const std::string &purpose, size_t size) {
  125. GELOGD("To Malloc weight, size = %zu", size);
  126. uint8_t *buffer = nullptr;
  127. auto ret = rtMalloc(reinterpret_cast<void **>(&buffer), size, RT_MEMORY_HBM);
  128. if (ret != RT_ERROR_NONE) {
  129. GELOGE(RT_FAILED, "[RtMalloc][Memory] failed, size = %zu, ret = %d", size, ret);
  130. REPORT_INNER_ERROR("E19999", "rtMalloc failed, size = %zu, ret = %d.", size, ret);
  131. return nullptr;
  132. }
  133. GE_PRINT_DYNAMIC_MEMORY(rtMalloc, purpose.c_str(), size)
  134. weight_list_.emplace_back(buffer);
  135. return buffer;
  136. }
  137. Status StreamResource::BuildDynamicOperator(const ModelData &model_data,
  138. DynamicSingleOp **single_op,
  139. const uint64_t model_id) {
  140. const string &model_name = std::to_string(model_id);
  141. std::lock_guard<std::mutex> lk(mu_);
  142. auto it = dynamic_op_map_.find(model_id);
  143. if (it != dynamic_op_map_.end()) {
  144. *single_op = it->second.get();
  145. return SUCCESS;
  146. }
  147. SingleOpModel model(model_name, model_data.model_data, model_data.model_len);
  148. auto ret = model.Init();
  149. if (ret != SUCCESS) {
  150. GELOGE(ret, "[Init][SingleOpModel] failed. model = %s, ret = %u", model_name.c_str(), ret);
  151. REPORT_CALL_ERROR("E19999", "SingleOpModel init failed, model = %s, ret = %u", model_name.c_str(), ret);
  152. return ret;
  153. }
  154. auto new_op = std::unique_ptr<DynamicSingleOp>(new(std::nothrow) DynamicSingleOp(resource_id_, &stream_mu_, stream_));
  155. GE_CHECK_NOTNULL(new_op);
  156. GELOGI("To build operator: %s", model_name.c_str());
  157. GE_CHK_STATUS_RET(model.BuildDynamicOp(*this, *new_op),
  158. "[Build][DynamicOp]failed. op = %s, ret = %u", model_name.c_str(), ret);
  159. *single_op = new_op.get();
  160. dynamic_op_map_[model_id] = std::move(new_op);
  161. return SUCCESS;
  162. }
  163. Status StreamResource::BuildOperator(const ModelData &model_data, SingleOp **single_op, const uint64_t model_id) {
  164. const string &model_name = std::to_string(model_id);
  165. std::lock_guard<std::mutex> lk(mu_);
  166. auto it = op_map_.find(model_id);
  167. if (it != op_map_.end()) {
  168. *single_op = it->second.get();
  169. return SUCCESS;
  170. }
  171. SingleOpModel model(model_name, model_data.model_data, model_data.model_len);
  172. auto ret = model.Init();
  173. if (ret != SUCCESS) {
  174. GELOGE(ret, "[Init][SingleOpModel] failed. model = %s, ret = %u", model_name.c_str(), ret);
  175. REPORT_CALL_ERROR("E19999", "SingleOpModel init failed, model = %s, ret = %u", model_name.c_str(), ret);
  176. return ret;
  177. }
  178. auto new_op = std::unique_ptr<SingleOp>(new(std::nothrow) SingleOp(this, &stream_mu_, stream_));
  179. if (new_op == nullptr) {
  180. GELOGE(ACL_ERROR_GE_MEMORY_ALLOCATION, "[New][SingleOp] failed.");
  181. REPORT_CALL_ERROR("E19999", "new SingleOp failed.");
  182. return ACL_ERROR_GE_MEMORY_ALLOCATION;
  183. }
  184. GELOGI("To build operator: %s", model_name.c_str());
  185. GE_CHK_STATUS_RET(model.BuildOp(*this, *new_op), "[Build][Op] failed. op = %s, ret = %u", model_name.c_str(), ret);
  186. *single_op = new_op.get();
  187. op_map_[model_id] = std::move(new_op);
  188. return SUCCESS;
  189. }
  190. Status StreamResource::GetThreadPool(ThreadPool **thread_pool) {
  191. GE_CHECK_NOTNULL(thread_pool);
  192. if (thread_pool_ == nullptr) {
  193. thread_pool_.reset(new (std::nothrow) ThreadPool(kDefaultThreadNum));
  194. GE_CHECK_NOTNULL(thread_pool_);
  195. }
  196. *thread_pool = thread_pool_.get();
  197. return SUCCESS;
  198. }
  199. const uint8_t *StreamResource::GetMemoryBase() const {
  200. if (memory_list_.empty()) {
  201. return nullptr;
  202. }
  203. return memory_list_.back();
  204. }
  205. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示