You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

interpreter_impl.cpp 15 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464
  1. /**
  2. * \file imperative/src/impl/interpreter_impl.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "./interpreter_impl.h"
  12. #include "megbrain/common.h"
  13. using namespace mgb;
  14. using namespace imperative;
  15. using namespace interpreter;
  16. using namespace interpreter::intl;
  17. std::unique_ptr<Interpreter::Channel> InterpreterImpl::create_channel() {
  18. return std::make_unique<ChannelImpl>();
  19. }
  20. Interpreter& Interpreter::inst() {
  21. Tensor::_static_init();
  22. static InterpreterImpl inst_;
  23. return inst_;
  24. }
  25. void* ChannelImpl::put(const HostTensorND& value, bool no_cache) {
  26. auto info = alloc();
  27. info->desc.layout = value.layout();
  28. info->desc.comp_node = value.comp_node();
  29. info->desc.value = value.proxy_to_default_cpu();
  30. m_valid_handle.insert(info);
  31. m_worker.add_task(Put{info, value, no_cache});
  32. return info;
  33. }
  34. void* ChannelImpl::put(const DeviceTensorND& data) {
  35. auto info = alloc();
  36. info->desc.layout = data.layout();
  37. info->desc.comp_node = data.comp_node();
  38. info->ptr = Tensor::make(data);
  39. m_valid_handle.insert(info);
  40. return info;
  41. }
  42. void ChannelImpl::del(void* handle) {
  43. mgb_assert(m_valid_handle.erase(handle), "invalid handle: %p", handle);
  44. m_worker.add_task(Del{reinterpret_cast<TensorInfo*>(handle)});
  45. }
  46. void ChannelImpl::swap_in(void* handle) {
  47. if (m_enable_evict & SWAP) {
  48. mgb_assert(m_valid_handle.find(handle) != m_valid_handle.end(),
  49. "invalid handle: %p", handle);
  50. m_worker.add_task(SwapIn{reinterpret_cast<TensorInfo*>(handle)});
  51. }
  52. }
  53. void ChannelImpl::swap_out(void* handle) {
  54. if (m_enable_evict & SWAP) {
  55. mgb_assert(m_valid_handle.find(handle) != m_valid_handle.end(),
  56. "invalid handle: %p", handle);
  57. m_worker.add_task(SwapOut{reinterpret_cast<TensorInfo*>(handle)});
  58. }
  59. }
  60. void ChannelImpl::drop(void* handle) {
  61. if (m_enable_evict & DROP) {
  62. mgb_assert(m_valid_handle.find(handle) != m_valid_handle.end(),
  63. "invalid handle: %p", handle);
  64. m_worker.add_task(Drop{reinterpret_cast<TensorInfo*>(handle)});
  65. }
  66. }
  67. SmallVector<void*> ChannelImpl::apply_op(
  68. std::shared_ptr<OpDef> op,
  69. const SmallVector<void*>& inputs) {
  70. for (auto i : inputs) {
  71. mgb_assert(m_valid_handle.find(i) != m_valid_handle.end(),
  72. "invalid handle: %p", i);
  73. }
  74. SmallVector<TensorInfo*> input_infos;
  75. input_infos.reserve(inputs.size());
  76. SmallVector<LogicalTensorDesc> input_descs;
  77. input_descs.reserve(inputs.size());
  78. std::unique_lock<decltype(m_mutex)> lock(m_mutex);
  79. for (auto i : inputs) {
  80. auto info = reinterpret_cast<TensorInfo*>(i);
  81. mgb_assert(!info->invalid, "Invalid tensor, unable to apply_op!");
  82. input_infos.push_back(info);
  83. input_descs.push_back(info->desc);
  84. }
  85. lock.unlock();
  86. auto [output_descs, validated] = OpDef::infer_output_attrs_fallible(*op, input_descs);
  87. ApplyOp cmd{std::move(op)};
  88. cmd.inputs = std::move(input_infos);
  89. cmd.outputs.reserve(output_descs.size());
  90. SmallVector<void*> outputs;
  91. // FIXME: remove this check when op check is correct
  92. bool validated_bkp = true;
  93. for (size_t i = 0;i < output_descs.size();i ++) {
  94. auto&& desc = output_descs[i];
  95. if (desc.layout.ndim == 0) {
  96. validated_bkp = false;
  97. }
  98. auto info = alloc();
  99. info->desc = desc;
  100. m_valid_handle.insert(info);
  101. cmd.outputs.push_back(info);
  102. outputs.push_back(info);
  103. }
  104. if (m_enable_evict & DROP) {
  105. for (auto out : cmd.outputs) {
  106. out->path.op = cmd.op;
  107. for (auto out_ : cmd.outputs) {
  108. out->path.outputs.push_back(m_st.at(out_));
  109. }
  110. for (auto inp : cmd.inputs) {
  111. out->path.inputs.push_back(m_st.at(inp));
  112. inp->path.dep_outputs.push_back(m_st.at(out));
  113. }
  114. }
  115. }
  116. m_worker.add_task(std::move(cmd));
  117. if (!(validated && validated_bkp) && m_async_level == 1) {
  118. sync();
  119. } else if (m_async_level == 0) {
  120. sync();
  121. // check device error
  122. for (auto&& oup : outputs) {
  123. auto info = reinterpret_cast<TensorInfo*>(oup);
  124. info->ptr->comp_node().sync();
  125. }
  126. }
  127. return outputs;
  128. }
  129. HostTensorND ChannelImpl::get_value(void* handle) {
  130. mgb_assert(m_valid_handle.find(handle) != m_valid_handle.end(),
  131. "invalid handle: %p", handle);
  132. auto info = reinterpret_cast<TensorInfo*>(handle);
  133. std::unique_lock<decltype(m_mutex)> lock(m_mutex);
  134. mgb_assert(!m_waitee);
  135. if (!info->value_fetched) {
  136. mgb_assert(!info->invalid, "Invalid tensor, unable to get_value!");
  137. m_waitee = info;
  138. m_worker.add_task(GetValue{info});
  139. m_cv.wait(lock, [&]() {
  140. check_worker_exc_unsafe();
  141. return info->value_fetched;
  142. });
  143. m_waitee = nullptr;
  144. }
  145. mgb_assert(info->ptr->value_fetched());
  146. return info->ptr->get_value();
  147. }
  148. TensorShape ChannelImpl::get_shape(void* handle) {
  149. mgb_assert(m_valid_handle.find(handle) != m_valid_handle.end(),
  150. "invalid handle: %p", handle);
  151. auto info = reinterpret_cast<TensorInfo*>(handle);
  152. if (info->desc.layout.ndim != 0) {
  153. return info->desc.layout;
  154. }
  155. std::unique_lock<decltype(m_mutex)> lock(m_mutex);
  156. mgb_assert(!m_waitee);
  157. m_waitee = info;
  158. m_cv.wait(lock, [&]() {
  159. check_worker_exc_unsafe();
  160. return bool(info->ptr);
  161. });
  162. m_waitee = nullptr;
  163. TensorShape ret = info->ptr->layout();
  164. mgb_assert(ret.ndim != 0);
  165. return ret;
  166. }
  167. DType ChannelImpl::get_dtype(void* handle) {
  168. mgb_assert(m_valid_handle.find(handle) != m_valid_handle.end(),
  169. "invalid handle: %p", handle);
  170. auto info = reinterpret_cast<TensorInfo*>(handle);
  171. auto ret = info->desc.layout.dtype;
  172. mgb_assert(ret.valid());
  173. return ret;
  174. }
  175. CompNode ChannelImpl::get_device(void* handle) {
  176. mgb_assert(m_valid_handle.find(handle) != m_valid_handle.end(),
  177. "invalid handle: %p", handle);
  178. auto info = reinterpret_cast<TensorInfo*>(handle);
  179. auto ret = info->desc.comp_node;
  180. mgb_assert(ret.valid());
  181. return ret;
  182. }
  183. DeviceTensorND ChannelImpl::get_dev_tensor(void* handle) {
  184. mgb_assert(m_valid_handle.find(handle) != m_valid_handle.end(),
  185. "invalid handle: %p", handle);
  186. auto info = reinterpret_cast<TensorInfo*>(handle);
  187. std::unique_lock<decltype(m_mutex)> lock(m_mutex);
  188. mgb_assert(!m_waitee);
  189. m_waitee = info;
  190. m_cv.wait(lock, [&]() {
  191. check_worker_exc_unsafe();
  192. return bool(info->ptr);
  193. });
  194. m_waitee = nullptr;
  195. return info->ptr->dev_tensor();
  196. }
  197. void ChannelImpl::sync() {
  198. m_worker.wait_all_task_finish();
  199. MGB_LOCK_GUARD(m_mutex);
  200. check_worker_exc_unsafe();
  201. }
  202. void ChannelImpl::close() {
  203. sync();
  204. }
  205. void ChannelImpl::config_async_level(int level) {
  206. mgb_assert(level <= 2 and level >= 0, "async_level should be 0, 1 or 2");
  207. m_async_level = level;
  208. }
  209. int ChannelImpl::get_async_level() {
  210. return m_async_level;
  211. }
  212. TensorInfo* ChannelImpl::alloc() {
  213. MGB_LOCK_GUARD(m_mutex);
  214. auto info = m_pool.alloc();
  215. m_st.insert(info);
  216. return info;
  217. }
  218. void ChannelImpl::free(TensorInfo* ptr) {
  219. MGB_LOCK_GUARD(m_mutex);
  220. if (ptr->path.dep_outputs.size() > 0) {
  221. remove_dep(ptr);
  222. }
  223. m_st.erase(ptr);
  224. mgb_assert(ptr->allow_delete, "delete before ref_cnt = 0");
  225. m_pool.free(ptr);
  226. }
  227. ChannelImpl::~ChannelImpl() {
  228. close();
  229. }
  230. void ChannelImpl::produce_tensor(TensorInfo* dest, TensorPtr ptr, bool notice = true) {
  231. if (notice) {
  232. MGB_LOCK_GUARD(m_mutex);
  233. dest->value_fetched = ptr->value_fetched();
  234. // update tensor desc for static infer
  235. // if (dest->desc.layout.ndim) {
  236. // mgb_assert(dest->desc.layout.eq_shape(ptr->layout()));
  237. // }
  238. dest->desc.layout = ptr->layout();
  239. dest->desc.comp_node = ptr->comp_node();
  240. dest->ptr = std::move(ptr);
  241. if (m_waitee == dest) {
  242. m_cv.notify_all();
  243. }
  244. } else {
  245. dest->value_fetched = ptr->value_fetched();
  246. // update tensor desc for static infer
  247. dest->desc.layout = ptr->layout();
  248. dest->desc.comp_node = ptr->comp_node();
  249. dest->ptr = std::move(ptr);
  250. }
  251. }
  252. void ChannelImpl::do_swap_out(TensorInfo* dest) {
  253. if (dest->evict_type == DROP) {
  254. mgb_log_warn("the evict type of tensor %p was set to DROP, this SWAP operation will be ignored", dest);
  255. return;
  256. }
  257. if (!dest->ptr) {
  258. return;
  259. }
  260. dest->evict_type = SWAP;
  261. dest->value_fetched = false;
  262. // TODO: swap in parallel
  263. dest->h_value.copy_from(dest->ptr->dev_tensor()).sync();
  264. dest->ptr.reset();
  265. }
  266. void ChannelImpl::do_swap_in(TensorInfo* dest) {
  267. if (dest->ptr) {
  268. return;
  269. }
  270. if (dest->h_value.empty()) {
  271. mgb_log_error("backup of the tensor %p not found", dest);
  272. return;
  273. }
  274. produce_tensor(dest, Tensor::make(dest->h_value), false);
  275. dest->evict_type = NONE;
  276. }
  277. void ChannelImpl::remove_dep(TensorInfo* dest) {
  278. for (auto i : dest->path.dep_outputs) {
  279. auto out_ptr = i.lock();
  280. if (out_ptr) {
  281. regenerate(out_ptr.get(), true);
  282. }
  283. }
  284. }
  285. void ChannelImpl::do_drop(TensorInfo* dest) {
  286. if (dest->evict_type == SWAP) {
  287. mgb_log_warn("the evict type of tensor %p was set to SWAP, this DROP operation will be ignored", dest);
  288. return;
  289. }
  290. if (!dest->path.op) {
  291. mgb_log_warn("the input that produced tensor %p has been deleted, this drop operation will be ignored", dest);
  292. return;
  293. }
  294. if (dest->recompute_times >= m_max_recompute_time) {
  295. mgb_log_warn("the recomputation time for tensor %p exceeds the limit, this drop operation will be ignored", dest);
  296. return;
  297. }
  298. if (!dest->ptr) {
  299. return;
  300. }
  301. dest->evict_type = DROP;
  302. dest->value_fetched = false;
  303. dest->ptr.reset();
  304. }
  305. void ChannelImpl::set_swap_flag(bool flag) {
  306. if (flag) {
  307. m_enable_evict |= SWAP;
  308. } else {
  309. m_enable_evict &= ~SWAP;
  310. }
  311. }
  312. void ChannelImpl::set_drop_flag(bool flag) {
  313. if (flag) {
  314. m_enable_evict |= DROP;
  315. } else {
  316. m_enable_evict &= ~DROP;
  317. }
  318. }
  319. void ChannelImpl::regenerate(TensorInfo* info, bool must_drop = false) {
  320. if (!info->ptr && info->evict_type != NONE) {
  321. if (info->evict_type == SWAP) {
  322. do_swap_in(info);
  323. } else {
  324. mgb_assert(info->evict_type == DROP);
  325. mgb_assert(info->path.op, "recomputation path not found");
  326. auto path = info->path;
  327. SmallVector<TensorPtr> inputs;
  328. inputs.reserve(path.inputs.size());
  329. for (auto i : path.inputs) {
  330. mgb_assert(i, "invalid history input");
  331. if (!i->ptr) {
  332. regenerate(i.get(), must_drop);
  333. }
  334. inputs.push_back(i->ptr);
  335. }
  336. auto outputs = OpDef::apply_on_physical_tensor(*path.op, inputs);
  337. for (size_t i = 0; i < outputs.size(); i ++) {
  338. auto out_ptr = path.outputs[i].lock();
  339. if (out_ptr) {
  340. out_ptr->recompute_times ++;
  341. if (!out_ptr->ptr && out_ptr->evict_type == DROP) {
  342. produce_tensor(out_ptr.get(), std::move(outputs[i]), false);
  343. }
  344. }
  345. }
  346. }
  347. }
  348. if (must_drop) {
  349. if (info->path.op) {
  350. info->path.op.reset();
  351. info->path.inputs.clear();
  352. if (info->evict_type == DROP) {
  353. info->evict_type = NONE;
  354. }
  355. }
  356. }
  357. }
  358. void ChannelImpl::process_one_task(Command& cmd) {
  359. //TODO: remove std::visit for support osx 10.12
  360. std::visit([this](auto& cmd) {
  361. using T = std::remove_reference_t<decltype(cmd)>;
  362. try {
  363. if constexpr (std::is_same_v<T, Put>) {
  364. auto value = cmd.no_cache ? std::make_shared<Tensor>(cmd.value) : Tensor::make(cmd.value);
  365. produce_tensor(cmd.dest, std::move(value));
  366. } else if constexpr (std::is_same_v<T, ApplyOp>) {
  367. SmallVector<TensorPtr> tensor_inputs;
  368. tensor_inputs.reserve(cmd.inputs.size());
  369. for (auto i : cmd.inputs) {
  370. if (m_enable_evict && i->evict_type != NONE) {
  371. if (!i->ptr) {
  372. regenerate(i);
  373. }
  374. }
  375. mgb_assert(i->ptr, "Invalid input tensor ptr!");
  376. tensor_inputs.push_back(i->ptr);
  377. }
  378. auto tensor_outputs = OpDef::apply_on_physical_tensor(*cmd.op, tensor_inputs);
  379. mgb_assert(tensor_outputs.size() == cmd.outputs.size());
  380. for (size_t i = 0; i < tensor_outputs.size(); ++i) {
  381. produce_tensor(cmd.outputs[i], std::move(tensor_outputs[i]));
  382. }
  383. } else if constexpr (std::is_same_v<T, Del>) {
  384. free(cmd.dest);
  385. } else if constexpr (std::is_same_v<T, GetValue>) {
  386. if (m_enable_evict && cmd.dest->evict_type != NONE) {
  387. if (!cmd.dest->ptr) {
  388. regenerate(cmd.dest);
  389. }
  390. }
  391. mgb_assert(cmd.dest->ptr, "Invalid tensor ptr!");
  392. cmd.dest->ptr->fetch_value();
  393. MGB_LOCK_GUARD(m_mutex);
  394. cmd.dest->value_fetched = true;
  395. if (m_waitee == cmd.dest) {
  396. m_cv.notify_all();
  397. }
  398. } else if constexpr (std::is_same_v<T, SwapIn>) {
  399. do_swap_in(cmd.dest);
  400. } else if constexpr (std::is_same_v<T, SwapOut>) {
  401. do_swap_out(cmd.dest);
  402. } else if constexpr (std::is_same_v<T, Drop>) {
  403. do_drop(cmd.dest);
  404. } else {
  405. static_assert(!std::is_same_v<T, T>);
  406. }
  407. } catch (...) {
  408. MGB_LOCK_GUARD(m_mutex);
  409. if constexpr (std::is_same_v<T, ApplyOp>) {
  410. for (auto oup : cmd.outputs) {
  411. oup->invalid = true;
  412. }
  413. } else if constexpr (std::is_same_v<T, Put>) {
  414. cmd.dest->invalid = true;
  415. }
  416. m_worker_exc = std::current_exception();
  417. m_cv.notify_all();
  418. }
  419. }, cmd);
  420. }
  421. void ChannelImpl::check_worker_exc_unsafe() {
  422. if (m_worker_exc) {
  423. std::exception_ptr exc;
  424. std::swap(exc, m_worker_exc);
  425. std::rethrow_exception(exc);
  426. }
  427. }

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台