You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

graph_caching_allocator.cc 9.9 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include "graph/manager/graph_caching_allocator.h"
  17. #include <set>
  18. #include <string>
  19. #include <utility>
  20. #include "framework/common/debug/ge_log.h"
  21. #include "graph/manager/graph_mem_allocator.h"
  22. namespace ge {
  23. const size_t bin_ranges[kNumBins] = {kRoundBlockSize * kKByteSize,
  24. 8 * kMByteSize,
  25. 32 * kMByteSize,
  26. 128 * kMByteSize,
  27. kGByteSize,
  28. 4 * kGByteSize,
  29. 16 * kGByteSize,
  30. 26 * kGByteSize};
  31. static bool BlockComparator(const Block *left, const Block *right) {
  32. if (left->size != right->size) {
  33. return left->size < right->size;
  34. }
  35. return reinterpret_cast<uintptr_t>(left->ptr) < reinterpret_cast<uintptr_t>(right->ptr);
  36. }
  37. bool CanMerge(Block *block) {
  38. if (block == nullptr || block->allocated || !block->IsSplit()) {
  39. return false;
  40. }
  41. return true;
  42. }
  43. size_t GetBinIndex(size_t size) {
  44. size_t index = 0;
  45. for (auto range : bin_ranges) {
  46. if (size <= range) {
  47. break;
  48. }
  49. ++index;
  50. }
  51. if (index > kNumBins - 1) {
  52. index = kNumBins - 1;
  53. }
  54. return index;
  55. }
  56. size_t GetAllocationSize(size_t size) {
  57. size_t index = GetBinIndex(size);
  58. return bin_ranges[index];
  59. }
  60. ///
  61. /// @ingroup ge_graph
  62. /// @brief block size based on alignment
  63. /// @param [in] original malloc size
  64. /// @return allocation size
  65. ///
  66. size_t GetBlockSize(size_t size) {
  67. if (size == 0) {
  68. return kRoundBlockSize;
  69. }
  70. return kRoundBlockSize * ((size + kRoundBlockSize - 1) / kRoundBlockSize);
  71. }
  72. bool ShouldSplit(const Block *block, size_t size) {
  73. return static_cast<double>(size) <= (static_cast<double>(block->size) * kSplitThreshold);
  74. }
  75. CachingAllocator::CachingAllocator(rtMemType_t memory_type) : memory_type_(memory_type), memory_allocator_(nullptr) {
  76. for (uint32_t i = 0; i < kNumBins; ++i) {
  77. free_block_bins_[i] = nullptr;
  78. }
  79. }
  80. Status CachingAllocator::Initialize(uint32_t device_id) {
  81. GELOGI("Device id %u", device_id);
  82. // when redo Initialize free old memory
  83. FreeBlocks();
  84. std::lock_guard<std::recursive_mutex> lock(mutex_);
  85. for (uint32_t i = 0; i < kNumBins; ++i) {
  86. if (free_block_bins_[i] != nullptr) {
  87. continue;
  88. }
  89. auto bin_ptr = new (std::nothrow) BlockBin(BlockComparator);
  90. if (bin_ptr == nullptr) {
  91. GELOGE(ge::FAILED, "Alloc BlockBin failed.");
  92. return ge::FAILED;
  93. }
  94. free_block_bins_[i] = bin_ptr;
  95. }
  96. memory_allocator_ = MemManager::Instance(memory_type_);
  97. if (memory_allocator_ == nullptr) {
  98. return ge::FAILED;
  99. }
  100. return ge::SUCCESS;
  101. }
  102. void CachingAllocator::Finalize(uint32_t device_id) {
  103. GELOGI("Device id %u", device_id);
  104. FreeBlocks();
  105. FreeBlockBins();
  106. }
  107. uint8_t *CachingAllocator::Malloc(size_t size, uint8_t *org_ptr, uint32_t device_id) {
  108. uint8_t *ptr = nullptr;
  109. size = GetBlockSize(size);
  110. Block *block = FindFreeBlock(size, org_ptr, device_id);
  111. if (block != nullptr) {
  112. ptr = block->ptr;
  113. } else {
  114. if (ge::SUCCESS == TryExtendCache(size, device_id)) {
  115. block = FindFreeBlock(size, org_ptr, device_id);
  116. if (block != nullptr) {
  117. ptr = block->ptr;
  118. }
  119. }
  120. }
  121. if (ptr == nullptr) {
  122. GELOGE(FAILED, "Malloc failed device id = %u, size= %zu", device_id, size);
  123. }
  124. return ptr;
  125. }
  126. Status CachingAllocator::Free(uint8_t *ptr, uint32_t device_id) {
  127. GELOGI("Free device id = %u", device_id);
  128. if (ptr == nullptr) {
  129. GELOGE(PARAM_INVALID, "Invalid memory pointer");
  130. return ge::PARAM_INVALID;
  131. }
  132. std::lock_guard<std::recursive_mutex> lock(mutex_);
  133. auto it = allocated_blocks_.find(ptr);
  134. if (it == allocated_blocks_.end()) {
  135. GELOGE(PARAM_INVALID, "Invalid memory pointer");
  136. return ge::PARAM_INVALID;
  137. }
  138. Block *block = it->second;
  139. allocated_blocks_.erase(it);
  140. FreeBlock(block);
  141. return ge::SUCCESS;
  142. }
  143. void CachingAllocator::FreeBlock(Block *block) {
  144. if (block == nullptr || !block->allocated) {
  145. return;
  146. }
  147. GELOGI("Free block size = %zu", block->size);
  148. std::lock_guard<std::recursive_mutex> lock(mutex_);
  149. block->allocated = false;
  150. auto &bin = *block->bin;
  151. Block *merge_blocks[] = {block->prev, block->next};
  152. for (Block *merge_block : merge_blocks) {
  153. MergeBlocks(block, merge_block, bin);
  154. }
  155. bin.insert(block);
  156. }
  157. void CachingAllocator::MergeBlocks(Block *dst, Block *src, BlockBin &bin) {
  158. if (!CanMerge(dst) || !CanMerge(src)) {
  159. return;
  160. }
  161. if (dst->prev == src) {
  162. dst->ptr = src->ptr;
  163. dst->prev = src->prev;
  164. if (dst->prev != nullptr) {
  165. dst->prev->next = dst;
  166. }
  167. } else {
  168. dst->next = src->next;
  169. if (dst->next != nullptr) {
  170. dst->next->prev = dst;
  171. }
  172. }
  173. dst->size += src->size;
  174. bin.erase(src);
  175. delete src;
  176. }
  177. BlockBin *CachingAllocator::GetBlockBin(size_t size) {
  178. size_t index = GetBinIndex(size);
  179. return free_block_bins_[index];
  180. }
  181. Block *CachingAllocator::FindFreeBlock(size_t size, uint8_t *org_ptr, uint32_t device_id) {
  182. // org_ptr - 1, try to find ptr same as org_ptr
  183. Block key(device_id, size, (org_ptr == nullptr ? nullptr : org_ptr - 1));
  184. BlockBin *bin = GetBlockBin(size);
  185. if (bin == nullptr) {
  186. GELOGE(ge::FAILED, "Get block bin failed size = %zu", size);
  187. return nullptr;
  188. }
  189. std::lock_guard<std::recursive_mutex> lock(mutex_);
  190. auto it = bin->lower_bound(&key);
  191. if (it != bin->end()) {
  192. Block *block = *it;
  193. bin->erase(it);
  194. if (block != nullptr) {
  195. GELOGI("Find block size = %zu", block->size);
  196. if (ShouldSplit(block, size)) {
  197. block = SplitBlock(block, size, *bin, device_id);
  198. }
  199. if (block->ptr != nullptr) {
  200. block->allocated = true;
  201. allocated_blocks_[block->ptr] = block;
  202. GELOGI("Malloc device id = %u, size= %zu", device_id, size);
  203. }
  204. }
  205. return block;
  206. }
  207. return nullptr;
  208. }
  209. Block *CachingAllocator::SplitBlock(Block *block, size_t size, BlockBin &bin, uint32_t device_id) {
  210. // block has been checked, should not be nullptr
  211. Block *remaining = block;
  212. Block *new_block = new (std::nothrow) Block(device_id, size, &bin, block->ptr);
  213. if (new_block == nullptr) {
  214. GELOGE(ge::FAILED, "Alloc block failed size = %zu", size);
  215. return block;
  216. }
  217. new_block->prev = remaining->prev;
  218. if (new_block->prev != nullptr) {
  219. new_block->prev->next = new_block;
  220. }
  221. new_block->next = remaining;
  222. remaining->prev = new_block;
  223. remaining->ptr = remaining->ptr + size;
  224. remaining->size -= size;
  225. bin.insert(remaining);
  226. return new_block;
  227. }
  228. Status CachingAllocator::TryExtendCache(size_t size, uint32_t device_id) {
  229. auto memory_size = GetAllocationSize(size);
  230. const std::string purpose = "Memory for caching.";
  231. auto memory_addr = memory_allocator_->MallocMemory(purpose, memory_size, device_id);
  232. // try to free caches and malloc again when malloc memory failed
  233. if (memory_addr == nullptr) {
  234. FreeCachedBlocks();
  235. memory_addr = memory_allocator_->MallocMemory(purpose, memory_size, device_id);
  236. if (memory_addr == nullptr) {
  237. GELOGE(ge::FAILED, "TryExtendCache failed, no enough memory for size = %zu, device_id = %u", memory_size,
  238. device_id);
  239. return ge::FAILED;
  240. }
  241. }
  242. if (AddToBlockBin(memory_addr, memory_size, device_id) != ge::SUCCESS) {
  243. (void)memory_allocator_->FreeMemory(memory_addr);
  244. return ge::FAILED;
  245. }
  246. return ge::SUCCESS;
  247. }
  248. Status CachingAllocator::AddToBlockBin(uint8_t *ptr, size_t size, uint32_t device_id) {
  249. BlockBin *bin = GetBlockBin(size);
  250. if (bin == nullptr) {
  251. GELOGE(ge::FAILED, "Get block bin failed size = %zu", size);
  252. return ge::FAILED;
  253. }
  254. Block *block = new (std::nothrow) Block(device_id, size, bin, nullptr);
  255. if (block == nullptr) {
  256. GELOGE(ge::FAILED, "Alloc block failed size = %zu", size);
  257. return ge::FAILED;
  258. }
  259. GELOGI("Block size = %zu", size);
  260. block->ptr = ptr;
  261. block->size = size;
  262. std::lock_guard<std::recursive_mutex> lock(mutex_);
  263. bin->insert(block);
  264. return ge::SUCCESS;
  265. }
  266. void CachingAllocator::FreeCachedBlocks() {
  267. GELOGI("Free cached blocks");
  268. std::lock_guard<std::recursive_mutex> lock(mutex_);
  269. for (uint32_t i = 0; i < kNumBins; ++i) {
  270. auto pool = free_block_bins_[i];
  271. if (pool == nullptr) {
  272. continue;
  273. }
  274. for (auto it = pool->begin(); it != pool->end();) {
  275. Block *block = *it;
  276. // free block memory that has not been split
  277. if ((block != nullptr) && (block->ptr != nullptr) && (block->prev == nullptr) && (block->next == nullptr) &&
  278. (memory_allocator_->FreeMemory(block->ptr) == ge::SUCCESS)) {
  279. pool->erase(it++);
  280. delete block;
  281. continue;
  282. }
  283. ++it;
  284. }
  285. }
  286. }
  287. void CachingAllocator::FreeBlocks() {
  288. GELOGI("Free blocks");
  289. std::lock_guard<std::recursive_mutex> lock(mutex_);
  290. // free allocated blocks and put to cache
  291. for (auto &it : allocated_blocks_) {
  292. FreeBlock(it.second);
  293. }
  294. allocated_blocks_.clear();
  295. FreeCachedBlocks();
  296. }
  297. void CachingAllocator::FreeBlockBins() {
  298. GELOGI("Free block bins");
  299. std::lock_guard<std::recursive_mutex> lock(mutex_);
  300. for (uint32_t i = 0; i < kNumBins; ++i) {
  301. if (free_block_bins_[i] != nullptr) {
  302. delete free_block_bins_[i];
  303. free_block_bins_[i] = nullptr;
  304. }
  305. }
  306. }
  307. } // namespace ge

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示