You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

network.cpp 20 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563
  1. #include "lite/network.h"
  2. #include "function_base.h"
  3. #include "network_impl_base.h"
  4. #include "parse_info/parse_info_base.h"
  5. #include "parse_model/model_parser.h"
  6. #include "type_info.h"
  7. #if LITE_BUILD_WITH_MGE
  8. #include "mge/function_dft.h"
  9. #include "mge/network_impl.h"
  10. #endif
  11. #include <fstream>
  12. #include <memory>
  13. using namespace lite;
  14. /**
  15. * \brief Construct the new work implement
  16. * the order must be :
  17. * 1. creeat the implement
  18. * 2. config and load
  19. * 3. set_io
  20. */
  21. Network::Network(const Config& config, const NetworkIO& network_io) {
  22. LITE_ERROR_HANDLER_BEGIN
  23. m_config = config;
  24. m_network_io = network_io;
  25. if (config.backend == LiteBackend::LITE_DEFAULT) {
  26. m_impl = call_func<
  27. NetworkImplDft, std::unique_ptr<lite::Network::NetworkImplBase>>(
  28. "create_network");
  29. }
  30. m_impl->set_config(config);
  31. m_impl->set_io(network_io);
  32. LITE_ERROR_HANDLER_END
  33. }
  34. Network::Network(const NetworkIO& network_io, const Config& config) {
  35. LITE_ERROR_HANDLER_BEGIN
  36. m_config = config;
  37. m_network_io = network_io;
  38. if (config.backend == LiteBackend::LITE_DEFAULT) {
  39. m_impl = call_func<
  40. NetworkImplDft, std::unique_ptr<lite::Network::NetworkImplBase>>(
  41. "create_network");
  42. }
  43. m_impl->set_config(config);
  44. m_impl->set_io(network_io);
  45. LITE_ERROR_HANDLER_END
  46. }
  47. void Network::load_model(void* model_mem, size_t size) {
  48. LITE_ERROR_HANDLER_BEGIN
  49. LITE_CHECK_NON_NULL_POINTER(m_impl);
  50. //! this model_mem is managed by user
  51. std::shared_ptr<void> model{model_mem, [](void*) {}};
  52. prase_model(model, size);
  53. LITE_ERROR_HANDLER_END
  54. }
  55. void Network::load_model(std::string model_path) {
  56. LITE_ERROR_HANDLER_BEGIN
  57. LITE_CHECK_NON_NULL_POINTER(m_impl);
  58. FILE* fin = fopen(model_path.c_str(), "rb");
  59. LITE_ASSERT(fin, "failed to open %s: %s", model_path.c_str(), strerror(errno));
  60. fseek(fin, 0, SEEK_END);
  61. size_t size = ftell(fin);
  62. fseek(fin, 0, SEEK_SET);
  63. void* ptr = malloc(size);
  64. std::shared_ptr<void> buf{ptr, ::free};
  65. auto nr = fread(buf.get(), 1, size, fin);
  66. LITE_ASSERT(nr == size);
  67. fclose(fin);
  68. prase_model(buf, size);
  69. LITE_ERROR_HANDLER_END
  70. }
  71. void Network::prase_model(std::shared_ptr<void> model_data, size_t size) {
  72. std::unordered_map<std::string, LiteAny> separate_config_map;
  73. ModelParser model_parser(model_data, size);
  74. //! parse the model info
  75. if (model_parser.parse_model_info(
  76. m_config, m_network_io, separate_config_map, m_extra_info,
  77. !m_extra_config.disable_configure_by_model_info)) {
  78. if (m_config.backend == LiteBackend::LITE_DEFAULT &&
  79. m_impl->get_backend_type() != LiteBackend::LITE_DEFAULT) {
  80. m_impl.reset(try_call_func<NetworkImplDft, lite::Network::NetworkImplBase*>(
  81. "parse_model"));
  82. }
  83. if (!m_extra_config.disable_configure_by_model_info) {
  84. m_impl->set_config(m_config);
  85. m_impl->set_io(m_network_io);
  86. }
  87. }
  88. //! decryption the model
  89. size_t model_length;
  90. auto&& model_shared_ptr = model_parser.parse_model(model_length, m_config);
  91. m_impl->load_model(model_shared_ptr, model_length, separate_config_map);
  92. m_loaded = true;
  93. update_from_implement();
  94. }
  95. Network::~Network() = default;
  96. void Network::update_from_implement() {
  97. m_config.device_type = m_impl->get_device_type();
  98. }
  99. void Network::compute_only_configured_output() {
  100. LITE_ERROR_HANDLER_BEGIN
  101. LITE_ASSERT(
  102. !m_loaded,
  103. "compute_only_configured_output should be used before model "
  104. "loaded.");
  105. LITE_CHECK_NON_NULL_POINTER(m_impl);
  106. return m_impl->compute_only_configured_output();
  107. LITE_ERROR_HANDLER_END
  108. }
  109. std::shared_ptr<Tensor> Network::get_io_tensor(
  110. std::string name, LiteTensorPhase phase) {
  111. LITE_ERROR_HANDLER_BEGIN
  112. LITE_ASSERT(m_loaded, "get_io_tensor should be used after model loaded.");
  113. LITE_CHECK_NON_NULL_POINTER(m_impl);
  114. return m_impl->get_io_tensor(name, phase);
  115. LITE_ERROR_HANDLER_END
  116. }
  117. std::shared_ptr<Tensor> Network::get_input_tensor(size_t index) {
  118. LITE_ERROR_HANDLER_BEGIN
  119. LITE_ASSERT(m_loaded, "get_input_tensor should be used after model loaded.");
  120. LITE_CHECK_NON_NULL_POINTER(m_impl);
  121. return m_impl->get_input_tensor(index);
  122. LITE_ERROR_HANDLER_END
  123. }
  124. std::shared_ptr<Tensor> Network::get_output_tensor(size_t index) {
  125. LITE_ERROR_HANDLER_BEGIN
  126. LITE_ASSERT(m_loaded, "get_output_tensor should be used after model loaded.");
  127. LITE_CHECK_NON_NULL_POINTER(m_impl);
  128. return m_impl->get_output_tensor(index);
  129. LITE_ERROR_HANDLER_END
  130. }
  131. Network& Network::set_async_callback(const AsyncCallback& callback) {
  132. LITE_ERROR_HANDLER_BEGIN
  133. LITE_ASSERT(
  134. !m_config.options.force_output_use_user_specified_memory,
  135. "Async mode can't run with force_output_use_user_specified_memory which "
  136. "output data is written to use specific memory.");
  137. LITE_CHECK_NON_NULL_POINTER(m_impl);
  138. m_impl->set_async_callback(std::move(callback));
  139. return *this;
  140. LITE_ERROR_HANDLER_END
  141. }
  142. Network& Network::set_start_callback(const StartCallback& callback) {
  143. LITE_ERROR_HANDLER_BEGIN
  144. LITE_CHECK_NON_NULL_POINTER(m_impl);
  145. m_impl->set_start_callback(std::move(callback));
  146. return *this;
  147. LITE_ERROR_HANDLER_END
  148. }
  149. Network& Network::set_finish_callback(const FinishCallback& callback) {
  150. LITE_ERROR_HANDLER_BEGIN
  151. LITE_CHECK_NON_NULL_POINTER(m_impl);
  152. m_impl->set_finish_callback(std::move(callback));
  153. return *this;
  154. LITE_ERROR_HANDLER_END
  155. }
  156. Network& Network::set_device_id(int device_id) {
  157. LITE_ERROR_HANDLER_BEGIN
  158. LITE_ASSERT(!m_loaded, "set_device_id should be used before model loaded.");
  159. LITE_CHECK_NON_NULL_POINTER(m_impl);
  160. m_impl->set_device_id(device_id);
  161. return *this;
  162. LITE_ERROR_HANDLER_END
  163. }
  164. Network& Network::set_stream_id(int stream_id) {
  165. LITE_ERROR_HANDLER_BEGIN
  166. LITE_ASSERT(!m_loaded, "set_stream_id should be used before model loaded.");
  167. LITE_CHECK_NON_NULL_POINTER(m_impl);
  168. m_impl->set_stream_id(stream_id);
  169. return *this;
  170. LITE_ERROR_HANDLER_END
  171. }
  172. void Network::forward() {
  173. LITE_ERROR_HANDLER_BEGIN
  174. LITE_ASSERT(m_loaded, "forward should be used after model loaded.");
  175. LITE_CHECK_NON_NULL_POINTER(m_impl.get());
  176. m_impl->forward();
  177. LITE_ERROR_HANDLER_END
  178. }
  179. void Network::wait() {
  180. LITE_ERROR_HANDLER_BEGIN
  181. LITE_ASSERT(m_loaded, "wait should be used after model loaded.");
  182. LITE_CHECK_NON_NULL_POINTER(m_impl);
  183. m_impl->wait();
  184. LITE_ERROR_HANDLER_END
  185. }
  186. std::string Network::get_input_name(size_t index) const {
  187. LITE_ERROR_HANDLER_BEGIN
  188. LITE_ASSERT(m_loaded, "get_input_name should be used after model loaded.");
  189. LITE_CHECK_NON_NULL_POINTER(m_impl);
  190. return m_impl->get_input_name(index);
  191. LITE_ERROR_HANDLER_END
  192. }
  193. std::string Network::get_output_name(size_t index) const {
  194. LITE_ERROR_HANDLER_BEGIN
  195. LITE_ASSERT(m_loaded, "get_output_name should be used after model loaded.");
  196. LITE_CHECK_NON_NULL_POINTER(m_impl);
  197. return m_impl->get_output_name(index);
  198. LITE_ERROR_HANDLER_END
  199. }
  200. std::vector<std::string> Network::get_all_input_name() const {
  201. LITE_ERROR_HANDLER_BEGIN
  202. LITE_ASSERT(m_loaded, "get_all_input_name should be used after model loaded.");
  203. LITE_CHECK_NON_NULL_POINTER(m_impl);
  204. auto all_input_name = m_impl->get_all_input_name();
  205. std::vector<std::string> all_names;
  206. for (auto& name : all_input_name) {
  207. all_names.push_back(name);
  208. }
  209. return all_names;
  210. LITE_ERROR_HANDLER_END
  211. }
  212. std::vector<std::string> Network::get_all_output_name() const {
  213. LITE_ERROR_HANDLER_BEGIN
  214. LITE_ASSERT(m_loaded, "get_all_output_name should be used after model loaded.");
  215. LITE_CHECK_NON_NULL_POINTER(m_impl);
  216. auto all_output_name = m_impl->get_all_output_name();
  217. std::vector<std::string> all_names;
  218. for (auto& name : all_output_name) {
  219. all_names.push_back(name);
  220. }
  221. return all_names;
  222. LITE_ERROR_HANDLER_END
  223. }
  224. int Network::get_device_id() const {
  225. LITE_ERROR_HANDLER_BEGIN
  226. LITE_CHECK_NON_NULL_POINTER(m_impl);
  227. return m_impl->get_device_id();
  228. LITE_ERROR_HANDLER_END
  229. }
  230. int Network::get_stream_id() const {
  231. LITE_ERROR_HANDLER_BEGIN
  232. LITE_CHECK_NON_NULL_POINTER(m_impl);
  233. return m_impl->get_stream_id();
  234. LITE_ERROR_HANDLER_END
  235. }
  236. void Network::enable_profile_performance(std::string profile_file_path) {
  237. LITE_ERROR_HANDLER_BEGIN
  238. m_impl->enable_profile_performance(profile_file_path);
  239. LITE_ERROR_HANDLER_END
  240. }
  241. const std::string& Network::get_model_extra_info() {
  242. LITE_ERROR_HANDLER_BEGIN
  243. return m_extra_info;
  244. LITE_ERROR_HANDLER_END
  245. }
  246. LiteDeviceType Network::get_device_type() const {
  247. LITE_ERROR_HANDLER_BEGIN
  248. return m_impl->get_device_type();
  249. LITE_ERROR_HANDLER_END
  250. }
  251. void Network::get_static_memory_alloc_info(const std::string& log_dir) const {
  252. LITE_ERROR_HANDLER_BEGIN
  253. #ifndef __IN_TEE_ENV__
  254. #if MGB_ENABLE_JSON
  255. LITE_ASSERT(m_loaded, "get_all_output_name should be used after model loaded.");
  256. m_impl->get_static_memory_alloc_info(log_dir);
  257. return;
  258. #endif
  259. #endif
  260. LITE_MARK_USED_VAR(log_dir);
  261. LITE_THROW("Doesn't support get_static_memory_alloc_info().Please check macro.");
  262. LITE_ERROR_HANDLER_END
  263. }
  264. void Network::extra_configure(const ExtraConfig& extra_config) {
  265. LITE_ERROR_HANDLER_BEGIN
  266. if (!extra_config.disable_configure_by_model_info) {
  267. LITE_ASSERT(
  268. !m_loaded,
  269. "disable_configure_by_model_info should be configured before model "
  270. "loaded.");
  271. }
  272. m_extra_config = extra_config;
  273. LITE_ERROR_HANDLER_END
  274. }
  275. /*********************** MGE special network function ***************/
  276. void Runtime::set_cpu_threads_number(
  277. std::shared_ptr<Network> network, size_t nr_threads) {
  278. LITE_ERROR_HANDLER_BEGIN
  279. auto network_impl = NetworkHelper::implement(network);
  280. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  281. LITE_ASSERT(
  282. !NetworkHelper::loaded(network),
  283. "set_cpu_threads_number should be used before model loaded.");
  284. call_func<NetworkImplDft, void>(
  285. "set_cpu_threads_number", network_impl, nr_threads);
  286. return;
  287. }
  288. LITE_THROW("set_cpu_threads_number is not aviliable in the backend.");
  289. LITE_ERROR_HANDLER_END
  290. }
  291. void Runtime::use_tensorrt(std::shared_ptr<Network> network) {
  292. LITE_ERROR_HANDLER_BEGIN
  293. auto network_impl = NetworkHelper::implement(network);
  294. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  295. LITE_ASSERT(
  296. !NetworkHelper::loaded(network),
  297. "use_tensorrt should be used before model loaded.");
  298. call_func<NetworkImplDft, void>("use_tensorrt", network_impl);
  299. return;
  300. }
  301. LITE_THROW("use_tensorrt is not aviliable in the backend.");
  302. LITE_ERROR_HANDLER_END
  303. }
  304. size_t Runtime::get_cpu_threads_number(const std::shared_ptr<Network> network) {
  305. LITE_ERROR_HANDLER_BEGIN
  306. auto network_impl = NetworkHelper::implement(network);
  307. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  308. return call_func<NetworkImplDft, size_t>(
  309. "get_cpu_threads_number", network_impl);
  310. }
  311. LITE_THROW("get_cpu_threads_number is not aviliable in the backend.");
  312. LITE_ERROR_HANDLER_END
  313. }
  314. void Runtime::set_runtime_thread_affinity(
  315. std::shared_ptr<Network> network,
  316. const ThreadAffinityCallback& thread_affinity_callback) {
  317. LITE_ERROR_HANDLER_BEGIN
  318. auto network_impl = NetworkHelper::implement(network);
  319. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  320. LITE_ASSERT(
  321. NetworkHelper::loaded(network),
  322. "set_runtime_thread_affinity should be used after model "
  323. "loaded.");
  324. call_func<NetworkImplDft, void>(
  325. "set_runtime_thread_affinity", network_impl, thread_affinity_callback);
  326. return;
  327. }
  328. LITE_THROW("set_runtime_thread_affinity is not aviliable in the backend.");
  329. LITE_ERROR_HANDLER_END
  330. }
  331. void Runtime::set_cpu_inplace_mode(std::shared_ptr<Network> network) {
  332. LITE_ERROR_HANDLER_BEGIN
  333. auto network_impl = NetworkHelper::implement(network);
  334. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  335. LITE_ASSERT(
  336. !NetworkHelper::loaded(network),
  337. "set_cpu_inplace_mode should be used before model loaded.");
  338. call_func<NetworkImplDft, void>("set_cpu_inplace_mode", network_impl);
  339. return;
  340. }
  341. LITE_THROW("set_cpu_inplace_mode is not aviliable in the backend.");
  342. LITE_ERROR_HANDLER_END
  343. }
  344. bool Runtime::is_cpu_inplace_mode(const std::shared_ptr<Network> network) {
  345. LITE_ERROR_HANDLER_BEGIN
  346. auto network_impl = NetworkHelper::implement(network);
  347. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  348. return call_func<NetworkImplDft, bool>("is_cpu_inplace_mode", network_impl);
  349. }
  350. LITE_THROW("is_cpu_inplace_mode is not aviliable in the backend.");
  351. LITE_ERROR_HANDLER_END
  352. }
  353. //! set opr algorithm selection strategy in the network
  354. void Runtime::set_network_algo_policy(
  355. std::shared_ptr<Network> network, LiteAlgoSelectStrategy strategy,
  356. uint32_t shared_batch_size, bool binary_equal_between_batch) {
  357. LITE_ERROR_HANDLER_BEGIN
  358. auto network_impl = NetworkHelper::implement(network);
  359. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  360. call_func<NetworkImplDft, void>(
  361. "set_network_algo_policy", network_impl, strategy, shared_batch_size,
  362. binary_equal_between_batch);
  363. return;
  364. }
  365. LITE_THROW("set_network_algo_policy is not aviliable in the backend.");
  366. LITE_ERROR_HANDLER_END
  367. }
  368. //! set opr algorithm selection strategy in the network
  369. void Runtime::set_network_algo_workspace_limit(
  370. std::shared_ptr<Network> network, size_t workspace_limit) {
  371. LITE_ERROR_HANDLER_BEGIN
  372. auto network_impl = NetworkHelper::implement(network);
  373. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  374. LITE_ASSERT(
  375. NetworkHelper::loaded(network),
  376. "set_network_algo_policy should be used after model "
  377. "loaded.");
  378. call_func<NetworkImplDft, void>(
  379. "set_network_algo_workspace_limit", network_impl, workspace_limit);
  380. return;
  381. }
  382. LITE_THROW(
  383. "set_network_algo_workspace_limit is not aviliable in the "
  384. "backend.");
  385. LITE_ERROR_HANDLER_END
  386. }
  387. //! set the network memroy allocator, the allocator is defined by user
  388. void Runtime::set_memory_allocator(
  389. std::shared_ptr<Network> network, std::shared_ptr<Allocator> user_allocator) {
  390. LITE_ERROR_HANDLER_BEGIN
  391. auto network_impl = NetworkHelper::implement(network);
  392. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  393. LITE_ASSERT(
  394. !NetworkHelper::loaded(network),
  395. "set_memory_allocator should be used before model loaded.");
  396. call_func<NetworkImplDft, void>(
  397. "set_memory_allocator", network_impl, user_allocator);
  398. return;
  399. }
  400. LITE_THROW("set_memory_allocator is not aviliable in the backend.");
  401. LITE_ERROR_HANDLER_END
  402. }
  403. void Runtime::share_runtime_memory_with(
  404. std::shared_ptr<Network> dst_network, std::shared_ptr<Network> src_network) {
  405. LITE_ERROR_HANDLER_BEGIN
  406. auto network_impl_dst = NetworkHelper::implement(dst_network);
  407. if (network_impl_dst->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  408. LITE_ASSERT(
  409. !NetworkHelper::loaded(dst_network),
  410. "share_runtime_memory_with should be used before model "
  411. "loaded.");
  412. call_func<NetworkImplDft, void>(
  413. "share_runtime_memory_with", network_impl_dst,
  414. NetworkHelper::implement(src_network));
  415. return;
  416. }
  417. LITE_THROW("share_runtime_memory_with is not aviliable in the backend.");
  418. LITE_ERROR_HANDLER_END
  419. }
  420. void Runtime::enable_io_txt_dump(
  421. std::shared_ptr<Network> network, std::string io_txt_out_file) {
  422. LITE_ERROR_HANDLER_BEGIN
  423. auto network_impl = NetworkHelper::implement(network);
  424. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  425. call_func<NetworkImplDft, void>(
  426. "enable_io_txt_dump", network_impl, io_txt_out_file);
  427. return;
  428. }
  429. LITE_THROW("enable_io_txt_dump is not aviliable in the backend.");
  430. LITE_ERROR_HANDLER_END
  431. }
  432. void Runtime::enable_io_bin_dump(
  433. std::shared_ptr<Network> network, std::string io_bin_out_dir) {
  434. LITE_ERROR_HANDLER_BEGIN
  435. auto network_impl = NetworkHelper::implement(network);
  436. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  437. call_func<NetworkImplDft, void>(
  438. "enable_io_bin_dump", network_impl, io_bin_out_dir);
  439. return;
  440. }
  441. LITE_THROW("enable_io_bin_dump is not aviliable in the backend.");
  442. LITE_ERROR_HANDLER_END
  443. }
  444. void Runtime::shared_weight_with_network(
  445. std::shared_ptr<Network> dst_network,
  446. const std::shared_ptr<Network> src_network) {
  447. LITE_ERROR_HANDLER_BEGIN
  448. auto network_impl_dst = NetworkHelper::implement(dst_network);
  449. if (network_impl_dst->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  450. LITE_ASSERT(
  451. NetworkHelper::loaded(src_network),
  452. "shared_weight_with_network should be used after the src "
  453. "network "
  454. "loaded.");
  455. auto src_implment = NetworkHelper::implement(src_network);
  456. call_func<NetworkImplDft, void>(
  457. "shared_weight_with", network_impl_dst, src_implment);
  458. NetworkHelper::loaded(dst_network, true);
  459. return;
  460. }
  461. LITE_THROW("shared_weight_with_network is not aviliable in the backend.");
  462. LITE_ERROR_HANDLER_END
  463. }
  464. void Runtime::enable_global_layout_transform(std::shared_ptr<Network> network) {
  465. LITE_ERROR_HANDLER_BEGIN
  466. auto network_impl = NetworkHelper::implement(network);
  467. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  468. LITE_ASSERT(
  469. !NetworkHelper::loaded(network),
  470. "enable_global_layout_transform should be used before model loaded.");
  471. call_func<NetworkImplDft, void>("enable_global_layout_transform", network_impl);
  472. return;
  473. }
  474. LITE_THROW("enable_global_layout_transform is not aviliable in the backend.");
  475. LITE_ERROR_HANDLER_END
  476. }
  477. void Runtime::dump_layout_transform_model(
  478. std::shared_ptr<Network> network, std::string optimized_model_path) {
  479. LITE_ERROR_HANDLER_BEGIN
  480. auto network_impl = NetworkHelper::implement(network);
  481. if (network_impl->get_backend_type() == LiteBackend::LITE_DEFAULT) {
  482. LITE_ASSERT(
  483. NetworkHelper::loaded(network),
  484. "dump_layout_transform_model should be used after model loaded.");
  485. call_func<NetworkImplDft, void>(
  486. "dump_layout_transform_model", network_impl, optimized_model_path);
  487. return;
  488. }
  489. LITE_THROW("dump_layout_transform_model is not aviliable in the backend.");
  490. LITE_ERROR_HANDLER_END
  491. }
  492. NetworkIO Runtime::get_model_io_info(
  493. const std::string& model_path, const Config& config) {
  494. LITE_ERROR_HANDLER_BEGIN
  495. if (config.backend == LiteBackend::LITE_DEFAULT) {
  496. return call_func<NetworkImplDft, NetworkIO>(
  497. "get_model_io_info", model_path, config);
  498. }
  499. LITE_THROW("get_model_io_info is not aviliable in the backend.");
  500. LITE_ERROR_HANDLER_END
  501. }
  502. NetworkIO Runtime::get_model_io_info(
  503. const void* model_mem, size_t size, const Config& config) {
  504. LITE_ERROR_HANDLER_BEGIN
  505. if (config.backend == LiteBackend::LITE_DEFAULT) {
  506. return call_func<NetworkImplDft, NetworkIO>(
  507. "get_model_io_info", model_mem, size, config);
  508. }
  509. LITE_THROW("get_model_io_info is not aviliable in the backend.");
  510. LITE_ERROR_HANDLER_END
  511. }
  512. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}