You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

physical_tensor.cpp 20 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647
  1. /**
  2. * \file imperative/src/impl/physical_tensor.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "megbrain/imperative/physical_tensor.h"
  12. #include "megbrain/common.h"
  13. #include "megbrain/comp_node.h"
  14. #include "megbrain/imperative.h"
  15. #include "megbrain/imperative/blob_manager.h"
  16. #include "megbrain/imperative/profiler.h"
  17. #include "megbrain/imperative/resource_manager.h"
  18. #include "./event_pool.h"
  19. #include "./profiler/events.h"
  20. #include <condition_variable>
  21. #include <cstdint>
  22. #include <deque>
  23. #include <map>
  24. #include <memory>
  25. #include <mutex>
  26. #include <type_traits>
  27. #include <unordered_map>
  28. #include <utility>
  29. #include <variant>
  30. #include <vector>
  31. #ifndef WIN32
  32. #include <pthread.h>
  33. #endif
  34. #include "range/v3/all.hpp"
  35. namespace views = ranges::views;
  36. namespace mgb {
  37. namespace imperative {
  38. namespace {
  39. struct CompNodeHash {
  40. auto operator()(CompNode cn) const { return mgb::hash(cn); }
  41. };
  42. template <typename T>
  43. struct NoThrowMovable : T {
  44. using T::T;
  45. NoThrowMovable(NoThrowMovable&&) noexcept = default;
  46. };
  47. template <typename... Ts>
  48. using Map = NoThrowMovable<std::map<Ts...>>;
  49. class CompNodeSyncManager {
  50. struct CompNodeData {
  51. template <typename T>
  52. class ReleaseQueue {
  53. Map<uint64_t, T> map;
  54. public:
  55. template <typename A>
  56. void emplace(uint64_t t, A&& a) {
  57. map.emplace_hint(map.end(), t, std::forward<A>(a));
  58. }
  59. void release(uint64_t t) {
  60. auto it = map.upper_bound(t);
  61. map.erase(map.begin(), it);
  62. }
  63. };
  64. //! next virtual event
  65. uint64_t next = 1;
  66. //! last completed virtual event
  67. uint64_t completed = 0;
  68. //! virtual event to real event
  69. Map<uint64_t, EventPtr> events;
  70. //! ordering information at some virtual events:
  71. //! what virtual events on other comp nodes is _sequenced before_ this virtual
  72. //! event
  73. Map<uint64_t, std::vector<uint64_t>> ordering;
  74. //! release queue for dev storage, keyed by releaser. this comp node is the
  75. //! **receiver**
  76. std::vector<ReleaseQueue<BlobPtr>> release_queues;
  77. //! release queue for host storage. this comp node is the **releaser**
  78. ReleaseQueue<HostTensorStorage::RawStorage> host_release_queue;
  79. };
  80. std::mutex m_mtx;
  81. std::condition_variable m_cv;
  82. bool m_should_stop = false;
  83. std::thread m_polling_thread;
  84. std::unordered_map<CompNode, size_t, CompNodeHash> m_cn2id;
  85. std::vector<CompNodeData> m_cndata;
  86. auto do_record(CompNode cn, size_t cnid, std::unique_lock<std::mutex>& lock) {
  87. // CAUSION: don't keep reference across locking boundary
  88. lock.unlock();
  89. auto e = EventPool::without_timer().alloc(cn);
  90. e->record();
  91. lock.lock();
  92. auto& cndata = m_cndata[cnid];
  93. return cndata.events.emplace_hint(cndata.events.end(), cndata.next++, e);
  94. }
  95. std::pair<uint64_t, CompNode::Event*> get_event(
  96. CompNode cn, size_t cnid, uint64_t t, std::unique_lock<std::mutex>& lock) {
  97. auto& cndata = m_cndata[cnid];
  98. auto it = cndata.events.lower_bound(t);
  99. if (it == cndata.events.end()) {
  100. it = do_record(cn, cnid, lock);
  101. }
  102. return {it->first, it->second.get()};
  103. }
  104. size_t get_cnid_unsafe(CompNode cn) {
  105. auto [it, unseen] = m_cn2id.try_emplace(cn, m_cndata.size());
  106. if (unseen) {
  107. m_cndata.emplace_back();
  108. }
  109. return it->second;
  110. }
  111. void monitor_events() {
  112. #if defined(__APPLE__)
  113. pthread_setname_np("CompNodeSync");
  114. #elif defined(__unix__)
  115. pthread_setname_np(pthread_self(), "CompNodeSync");
  116. #endif
  117. // poll events in rounds. sleep for a fixed duration between rounds.
  118. // number of events to query is decided by the number of successful queries in
  119. // last round, independently for each comp node:
  120. // a. all -> double
  121. // b. 0 -> 1
  122. // c. otherwise -> #successful
  123. struct Item {
  124. size_t cnid;
  125. decltype(CompNodeData::events)::iterator it;
  126. };
  127. struct Stat {
  128. size_t num_success = 0;
  129. size_t num_attempts = 0;
  130. // iterator to the last finished event
  131. decltype(CompNodeData::events)::iterator it;
  132. };
  133. std::vector<Stat> stats;
  134. std::vector<Item> todos;
  135. std::unique_lock lock(m_mtx);
  136. for (;;) {
  137. // copy events to a temporary storage so that we may unlock while polling
  138. stats.resize(m_cndata.size());
  139. for (size_t cnid = 0; cnid < m_cndata.size(); ++cnid) {
  140. // decide max number of events to query
  141. // rule c: #successful
  142. size_t n = stats[cnid].num_success;
  143. if (n == stats[cnid].num_attempts) {
  144. // rule a: double
  145. n *= 2;
  146. }
  147. if (n == 0) {
  148. // rule b: 1
  149. n = 1;
  150. }
  151. // now copy upto n events
  152. auto& events = m_cndata[cnid].events;
  153. size_t i = 0;
  154. for (auto it = events.begin(); i < n && it != events.end(); ++i, ++it) {
  155. todos.push_back({cnid, it});
  156. }
  157. // reset stats for this round
  158. stats[cnid].num_success = 0;
  159. stats[cnid].num_attempts = n;
  160. }
  161. lock.unlock();
  162. bool last_result = false;
  163. size_t last_cnid = -1;
  164. for (auto item : todos) {
  165. if (item.cnid == last_cnid && !last_result) {
  166. // previous failed, this one almost certainly should fail
  167. continue;
  168. }
  169. last_cnid = item.cnid;
  170. last_result = item.it->second->finished();
  171. if (last_result) {
  172. stats[item.cnid].num_success++;
  173. stats[item.cnid].it = item.it;
  174. }
  175. }
  176. todos.clear();
  177. lock.lock();
  178. // release dev storage
  179. for (size_t receiver_cnid = 0; receiver_cnid < m_cndata.size();
  180. ++receiver_cnid) {
  181. for (size_t releaser_cnid = 0;
  182. releaser_cnid < m_cndata[receiver_cnid].release_queues.size();
  183. ++releaser_cnid) {
  184. if (releaser_cnid >= stats.size() ||
  185. stats[releaser_cnid].num_success == 0) {
  186. continue;
  187. }
  188. auto& q = m_cndata[receiver_cnid].release_queues[releaser_cnid];
  189. q.release(stats[releaser_cnid].it->first);
  190. }
  191. }
  192. for (size_t cnid = 0; cnid < stats.size(); ++cnid) {
  193. if (stats[cnid].num_success == 0) {
  194. continue;
  195. }
  196. auto& cndata = m_cndata[cnid];
  197. auto it = stats[cnid].it;
  198. auto t = it->first;
  199. // update completed
  200. cndata.completed = t;
  201. // release host storage
  202. cndata.host_release_queue.release(t);
  203. // remove completed events
  204. auto& events = cndata.events;
  205. events.erase(events.begin(), std::next(it));
  206. }
  207. using namespace std::literals;
  208. if (m_cv.wait_for(lock, 10us, [&] { return m_should_stop; })) {
  209. return;
  210. }
  211. }
  212. }
  213. CompNodeSyncManager() {
  214. m_polling_thread = std::thread([this] { monitor_events(); });
  215. }
  216. public:
  217. ~CompNodeSyncManager() {
  218. {
  219. MGB_LOCK_GUARD(m_mtx);
  220. m_should_stop = true;
  221. m_cv.notify_all();
  222. }
  223. m_polling_thread.join();
  224. }
  225. static CompNodeSyncManager& inst();
  226. uint64_t record(CompNode cn, bool doitnow = false) {
  227. std::unique_lock lock(m_mtx);
  228. auto cnid = get_cnid_unsafe(cn);
  229. if (doitnow) {
  230. return do_record(cn, cnid, lock)->first;
  231. }
  232. return m_cndata[cnid].next++;
  233. }
  234. void async_release(CompNode cn, uint64_t t, BlobPtr blob) {
  235. MGB_LOCK_GUARD(m_mtx);
  236. auto releaser_cnid = get_cnid_unsafe(cn);
  237. if (t <= m_cndata[releaser_cnid].completed) {
  238. return;
  239. }
  240. auto receiver_cnid = get_cnid_unsafe(blob->comp_node());
  241. auto& qs = m_cndata[receiver_cnid].release_queues;
  242. if (releaser_cnid >= qs.size()) {
  243. qs.resize(releaser_cnid + 1);
  244. }
  245. auto& q = qs[releaser_cnid];
  246. q.emplace(t, std::move(blob));
  247. }
  248. void async_release(CompNode cn, uint64_t t, HostTensorStorage::RawStorage storage) {
  249. MGB_LOCK_GUARD(m_mtx);
  250. auto releaser_cnid = get_cnid_unsafe(cn);
  251. if (t <= m_cndata[releaser_cnid].completed) {
  252. return;
  253. }
  254. auto& q = m_cndata[releaser_cnid].host_release_queue;
  255. q.emplace(t, std::move(storage));
  256. }
  257. void device_wait(CompNode waiter, CompNode waitee, uint64_t t) {
  258. std::unique_lock lock(m_mtx);
  259. auto waiter_id = get_cnid_unsafe(waiter);
  260. auto waitee_id = get_cnid_unsafe(waitee);
  261. auto& waiter_data = m_cndata.at(waiter_id);
  262. auto& waitee_data = m_cndata.at(waitee_id);
  263. auto [t_waitee, e] = get_event(waitee, waitee_id, t, lock);
  264. // DO NOT unlock around this line! Event* could be invalidated!
  265. e->device_wait_by(waiter);
  266. auto t_waiter = waiter_data.next++;
  267. std::vector<uint64_t> ordering(m_cndata.size(), 0);
  268. if (!waiter_data.ordering.empty()) {
  269. auto& o = waiter_data.ordering.rbegin()->second;
  270. std::copy(o.begin(), o.end(), ordering.begin());
  271. }
  272. ordering[waitee_id] = t_waitee;
  273. ordering[waiter_id] = t_waiter;
  274. {
  275. auto it = waitee_data.ordering.lower_bound(t_waitee);
  276. if (it != waitee_data.ordering.begin()) {
  277. for (auto [a, b] : views::zip(ordering, std::prev(it)->second)) {
  278. static_assert(std::is_lvalue_reference_v<decltype(a)>);
  279. a = std::max(a, b);
  280. }
  281. }
  282. }
  283. waiter_data.ordering.emplace_hint(
  284. waiter_data.ordering.end(), t_waiter, ordering);
  285. for (auto [t, q] : views::zip(ordering, waiter_data.release_queues)) {
  286. q.release(t);
  287. }
  288. }
  289. };
  290. CompNodeSyncManager& CompNodeSyncManager::inst() {
  291. static std::mutex mtx;
  292. static std::unique_ptr<CompNodeSyncManager> inst;
  293. struct Guard final : CompNodeDepedentObject {
  294. std::shared_ptr<void> on_comp_node_finalize() override {
  295. MGB_LOCK_GUARD(mtx);
  296. inst.reset();
  297. return {};
  298. }
  299. };
  300. static std::optional<Guard> guard;
  301. #ifndef WIN32
  302. static bool broken = false;
  303. static struct ForkGuard {
  304. ForkGuard() {
  305. mgb_assert(0 == pthread_atfork(NULL, NULL, [] {
  306. if (inst) {
  307. inst.release(); // deliberate leak, unfixable
  308. broken = true;
  309. }
  310. }));
  311. }
  312. } fork_guard;
  313. #endif
  314. MGB_LOCK_GUARD(mtx);
  315. if (!inst) {
  316. #ifndef WIN32
  317. mgb_assert(!broken);
  318. #endif
  319. EventPool::without_timer();
  320. inst.reset(new CompNodeSyncManager);
  321. guard.emplace();
  322. }
  323. return *inst;
  324. }
  325. } // namespace
  326. uint64_t record_event(CompNode cn, bool doitnow) {
  327. return CompNodeSyncManager::inst().record(cn, doitnow);
  328. }
  329. void device_wait_event(CompNode waiter, CompNode waitee, uint64_t event) {
  330. CompNodeSyncManager::inst().device_wait(waiter, waitee, event);
  331. }
  332. void async_release(CompNode cn, uint64_t event, BlobPtr blob) {
  333. CompNodeSyncManager::inst().async_release(cn, event, std::move(blob));
  334. }
  335. void async_release(CompNode cn, uint64_t event, HostTensorStorage::RawStorage storage) {
  336. CompNodeSyncManager::inst().async_release(cn, event, std::move(storage));
  337. }
  338. void EventDeleter::operator()(CompNode::Event* event) {
  339. EventPool::without_timer().free(event);
  340. }
  341. namespace {
  342. std::atomic_uint64_t next_blob_id = 0;
  343. }
  344. OwnedBlob::OwnedBlob(const DeviceTensorStorage& s)
  345. : Blob(s.comp_node(), s.size() + s.offset()),
  346. m_storage{s.raw_storage()},
  347. m_id{next_blob_id++} {
  348. BlobManager::inst()->register_blob(this);
  349. }
  350. OwnedBlob::OwnedBlob(CompNode cn, size_t sz)
  351. : Blob(cn, sz), m_storage{}, m_id{next_blob_id++} {
  352. BlobManager::inst()->register_blob(this);
  353. }
  354. OwnedBlob::~OwnedBlob() {
  355. BlobManager::inst()->unregister_blob(this);
  356. }
  357. const Blob::RawStorage& OwnedBlob::storage() {
  358. if (!m_storage && m_size) {
  359. BlobManager::inst()->alloc_with_defrag(this, m_size);
  360. }
  361. return m_storage;
  362. }
  363. BlobPtr OwnedBlob::borrow_to(CompNode cn) {
  364. return std::make_shared<BorrowedBlob>(
  365. cn, std::static_pointer_cast<OwnedBlob>(shared_from_this()));
  366. }
  367. bool OwnedBlob::storage_is_unique() {
  368. return m_storage.unique();
  369. }
  370. void* OwnedBlob::raw_ptr_not_for_readwrite() {
  371. return m_storage.get();
  372. }
  373. BorrowedBlob::BorrowedBlob(CompNode cn, std::shared_ptr<OwnedBlob> owner)
  374. : Blob(cn, owner->size()),
  375. m_owner(std::move(owner)),
  376. m_event(record_event(m_owner->comp_node(), true)) {}
  377. BorrowedBlob::~BorrowedBlob() {
  378. async_release(m_comp_node, record_event(m_comp_node, true), std::move(m_owner));
  379. }
  380. const Blob::RawStorage& BorrowedBlob::storage() {
  381. {
  382. MGB_LOCK_GUARD(m_mtx);
  383. if (!m_initialized) {
  384. device_wait_event(m_comp_node, m_owner->comp_node(), m_event);
  385. m_initialized = true;
  386. }
  387. }
  388. return m_owner->storage();
  389. }
  390. BlobPtr BorrowedBlob::borrow_to(CompNode cn) {
  391. return std::make_shared<BorrowedBlob>(cn, m_owner);
  392. }
  393. bool BorrowedBlob::storage_is_unique() {
  394. return m_owner.unique() && m_owner->storage_is_unique();
  395. }
  396. void* BorrowedBlob::raw_ptr_not_for_readwrite() {
  397. return m_owner->raw_ptr_not_for_readwrite();
  398. }
  399. Tensor::Tensor(
  400. BlobPtr blob, const TensorLayout& layout, size_t offset, const HostTensorND& hv)
  401. : m_cn(blob->comp_node()),
  402. m_shape(layout),
  403. m_dtype(layout.dtype),
  404. m_layout(layout),
  405. m_blob(std::move(blob)),
  406. m_offset(offset),
  407. m_value(hv) {}
  408. Tensor::Tensor(const HostTensorND& hv) : Tensor(hv.layout(), hv.comp_node()) {
  409. constexpr int size_threshold = TensorShape::MAX_NDIM;
  410. size_t nr_elems = hv.layout().total_nr_elems();
  411. if (nr_elems <= size_threshold) {
  412. m_value = hv;
  413. }
  414. if (nr_elems) {
  415. MGB_RECORD_EVENT(
  416. profiler::HostToDeviceEvent, hv.layout(), hv.comp_node(), hv.raw_ptr(),
  417. dev_tensor().raw_ptr());
  418. dev_tensor(false).copy_from_fixlayout(hv);
  419. // even though hv is saved in m_value, Tensor itself could be
  420. // released before copy completes
  421. MGB_RECORD_EVENT(
  422. profiler::HostToDeviceFinishEvent, hv.layout(), hv.comp_node(),
  423. hv.raw_ptr(), dev_tensor().raw_ptr());
  424. async_release(hv);
  425. }
  426. }
  427. Tensor::Tensor(const DeviceTensorND& dv, const HostTensorND& hv)
  428. : m_offset(dv.storage().offset()),
  429. m_cn(dv.comp_node()),
  430. m_shape(dv.layout()),
  431. m_dtype(dv.layout().dtype),
  432. m_blob(Blob::make(dv.storage())),
  433. m_layout(dv.layout()) {
  434. if (!hv.empty()) {
  435. mgb_assert(dv.comp_node() == hv.comp_node());
  436. mgb_assert(dv.dtype() == hv.dtype());
  437. mgb_assert(dv.shape().eq_shape(hv.shape()));
  438. m_value = hv;
  439. }
  440. }
  441. Tensor::Tensor(const TensorLayout& layout, const CompNode& cn)
  442. : m_layout{layout},
  443. m_blob{Blob::make(cn, layout.span().dist_byte())},
  444. m_offset{0},
  445. m_cn(cn),
  446. m_shape(layout),
  447. m_dtype(layout.dtype) {}
  448. Tensor::Tensor(const BlobPtr blob, const size_t offset, const TensorLayout& layout)
  449. : m_layout{layout},
  450. m_blob{blob},
  451. m_offset{offset},
  452. m_cn(blob->comp_node()),
  453. m_shape(layout),
  454. m_dtype(layout.dtype) {}
  455. TensorPtr Tensor::make(const HostTensorND& hv) {
  456. auto&& blob = MultiCNConstTensorCache::inst().lookup(hv);
  457. if (blob) {
  458. return make(std::forward<decltype(blob)>(blob), hv.layout(), hv);
  459. }
  460. return std::make_shared<Tensor>(hv);
  461. }
  462. void Tensor::to_contiguous_inplace(VarNode::LayoutConstraintCallback& layout_checker) {
  463. MGB_LOCK_GUARD(m_blob_mtx);
  464. if (!m_layout.is_empty() && !layout_checker(m_layout)) {
  465. DeviceTensorStorage storage;
  466. storage.reset(m_cn, m_blob->size(), m_blob->storage());
  467. storage = storage.sub(m_offset);
  468. DeviceTensorND dv;
  469. dv.reset(storage, m_layout);
  470. DeviceTensorND dv_contig;
  471. dv_contig.copy_from(dv);
  472. m_layout = dv_contig.layout();
  473. std::atomic_store(&m_blob, BlobPtr(Blob::make(dv_contig.storage())));
  474. mgb_assert(m_layout.is_contiguous());
  475. m_offset = 0;
  476. }
  477. }
  478. void Tensor::to_contiguous_inplace() {
  479. static VarNode::LayoutConstraintCallback default_cb =
  480. [](const TensorLayout& layout) { return layout.is_contiguous(); };
  481. to_contiguous_inplace(default_cb);
  482. }
  483. void Tensor::assign_from_dev_tensor(DeviceTensorND dv) {
  484. MGB_LOCK_GUARD(m_blob_mtx);
  485. std::atomic_store(&m_blob, BlobPtr(Blob::make(dv.storage())));
  486. m_offset = dv.storage().offset();
  487. m_layout = dv.layout();
  488. }
  489. DeviceTensorND Tensor::dev_tensor(bool contiguous) {
  490. mgb_assert(m_blob, "uninitialized tensor.");
  491. if (contiguous) {
  492. to_contiguous_inplace();
  493. }
  494. MGB_LOCK_GUARD(m_blob_mtx);
  495. DeviceTensorStorage storage;
  496. storage.reset(m_cn, m_blob->size(), m_blob->storage());
  497. storage = storage.sub(m_offset);
  498. DeviceTensorND ret;
  499. ret.reset(storage, m_layout);
  500. return ret;
  501. }
  502. megdnn::TensorND Tensor::dnn_tensor() {
  503. mgb_assert(m_blob, "uninitialized tensor.");
  504. return {m_layout, {m_blob->storage().get(), m_offset}};
  505. }
  506. void Tensor::fetch_value() {
  507. MGB_LOCK_GUARD(m_value_mtx);
  508. if (m_value.empty()) {
  509. m_value.copy_from(dev_tensor(false));
  510. m_value_ready.reset(EventPool::without_timer().alloc(comp_node()));
  511. m_value_ready->record();
  512. }
  513. }
  514. bool Tensor::value_fetched() {
  515. MGB_LOCK_GUARD(m_value_mtx);
  516. return m_value.layout().ndim != 0;
  517. }
  518. const HostTensorND& Tensor::get_value() {
  519. fetch_value();
  520. if (m_value_ready) {
  521. m_value_ready->host_wait();
  522. }
  523. return m_value;
  524. }
  525. const HostTensorND* Tensor::try_get_value() {
  526. MGB_LOCK_GUARD(m_value_mtx);
  527. if (!m_value.empty() && (!m_value_ready || m_value_ready->finished())) {
  528. return &m_value;
  529. }
  530. return nullptr;
  531. }
  532. TensorPtr Tensor::make_scalar(DTypeScalar value, CompNode cn) {
  533. HostTensorND hv{cn, value.dtype()};
  534. hv.resize({1});
  535. memcpy(hv.raw_ptr(), value.storage(), value.dtype().size(1));
  536. return make(hv);
  537. }
  538. TensorPtr Tensor::sub(size_t offset, TensorShape shape) {
  539. TensorLayout layout(shape, m_dtype);
  540. return Tensor::make(m_blob, offset + m_offset, layout);
  541. }
  542. uint64_t Tensor::get_ready_event() {
  543. if (m_produced_at == 0) {
  544. m_produced_at = record_event(comp_node());
  545. }
  546. return m_produced_at;
  547. }
  548. bool Tensor::storage_is_unique() {
  549. return m_blob.unique() && m_blob->storage_is_unique();
  550. }
  551. void Tensor::static_initialize() {
  552. EventPool::with_timer();
  553. EventPool::without_timer();
  554. MultiCNConstTensorCache::inst();
  555. }
  556. } // namespace imperative
  557. } // namespace mgb
  558. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}