You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_network_c.cpp 38 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083
  1. #include "../src/misc.h"
  2. #if LITE_BUILD_WITH_MGE
  3. #include "../src/common.h"
  4. #include "../src/mge/network_impl.h"
  5. #include "../lite-c/src/common.h"
  6. #include "lite-c/global_c.h"
  7. #include "lite-c/network_c.h"
  8. #include "lite-c/tensor_c.h"
  9. #include "./test_common.h"
  10. #include "megbrain/tensor.h"
  11. #include <string.h>
  12. #include <chrono>
  13. #include <memory>
  14. #include <random>
  15. #include <unordered_map>
  16. namespace {
  17. int affinity_set = false;
  18. int single_thread_affinity(int) {
  19. affinity_set = true;
  20. return 0;
  21. }
  22. std::atomic_size_t m_nr_left{0};
  23. std::atomic_size_t m_nr_allocated{0};
  24. void* allocate(LiteDeviceType device, int, size_t size, size_t align) {
  25. LITE_ASSERT(device == LiteDeviceType::LITE_CPU);
  26. m_nr_left++;
  27. m_nr_allocated++;
  28. #ifdef WIN32
  29. return _aligned_malloc(size, align);
  30. #elif defined(__ANDROID__) || defined(ANDROID)
  31. return memalign(align, size);
  32. #else
  33. void* ptr = nullptr;
  34. auto err = posix_memalign(&ptr, align, size);
  35. mgb_assert(!err, "failed to malloc %zu bytes with align %zu", size, align);
  36. return ptr;
  37. #endif
  38. }
  39. void free(LiteDeviceType device, int, void* ptr) {
  40. m_nr_left--;
  41. LITE_ASSERT(device == LiteDeviceType::LITE_CPU);
  42. #ifdef WIN32
  43. _aligned_free(ptr);
  44. #else
  45. ::free(ptr);
  46. #endif
  47. };
  48. #define NUMBER_THREDS (4)
  49. std::vector<std::thread::id> thread_ids(NUMBER_THREDS);
  50. int multi_thread_affinity(int id) {
  51. thread_ids[id] = std::this_thread::get_id();
  52. return 0;
  53. };
  54. volatile bool finished = false;
  55. int async_callback() {
  56. finished = true;
  57. return 0;
  58. }
  59. volatile bool finished_with_data = false;
  60. int async_callback_with_data(void* user_data) {
  61. if (user_data != NULL) {
  62. std::cout << "async_callback user_data addr=" << std::hex << user_data
  63. << std::endl;
  64. }
  65. finished_with_data = true;
  66. return 0;
  67. }
  68. volatile bool start_checked = false;
  69. int start_callback(const LiteIO* inputs, const LiteTensor* input_tensors, size_t size) {
  70. start_checked = true;
  71. auto check_func = [&]() {
  72. ASSERT_EQ(size, 1);
  73. ASSERT_EQ(std::string(inputs->name), "data");
  74. LiteLayout layout;
  75. LITE_get_tensor_layout(*input_tensors, &layout);
  76. ASSERT_EQ(layout.ndim, 4);
  77. ASSERT_EQ(layout.shapes[1], 3);
  78. ASSERT_EQ(layout.shapes[2], 224);
  79. ASSERT_EQ(layout.shapes[3], 224);
  80. };
  81. check_func();
  82. return 0;
  83. }
  84. volatile bool start_checked_with_data = false;
  85. int start_callback_with_data(
  86. const LiteIO* inputs, const LiteTensor* input_tensors, size_t size,
  87. void* user_data) {
  88. start_checked_with_data = true;
  89. auto check_func = [&]() {
  90. if (user_data != NULL) {
  91. std::cout << "start_callback user_data addr=" << std::hex << user_data
  92. << std::endl;
  93. }
  94. ASSERT_EQ(size, 1);
  95. ASSERT_EQ(std::string(inputs->name), "data");
  96. LiteLayout layout;
  97. LITE_get_tensor_layout(*input_tensors, &layout);
  98. ASSERT_EQ(layout.ndim, 4);
  99. ASSERT_EQ(layout.shapes[1], 3);
  100. ASSERT_EQ(layout.shapes[2], 224);
  101. ASSERT_EQ(layout.shapes[3], 224);
  102. };
  103. check_func();
  104. return 0;
  105. }
  106. volatile bool finish_checked = false;
  107. int finish_callback(
  108. const LiteIO* outputs, const LiteTensor* output_tensors, size_t size) {
  109. finish_checked = true;
  110. auto check_func = [&]() {
  111. ASSERT_EQ(size, 1);
  112. ASSERT_EQ(
  113. std::string(outputs->name),
  114. "TRUE_DIV(EXP[12065],reduce0[12067])[12077]");
  115. LiteLayout layout;
  116. LITE_get_tensor_layout(*output_tensors, &layout);
  117. ASSERT_EQ(layout.shapes[1], 1000);
  118. };
  119. check_func();
  120. return 0;
  121. }
  122. volatile bool finish_checked_with_data = false;
  123. int finish_callback_with_data(
  124. const LiteIO* outputs, const LiteTensor* output_tensors, size_t size,
  125. void* user_data) {
  126. finish_checked_with_data = true;
  127. auto check_func = [&]() {
  128. if (user_data != NULL) {
  129. std::cout << "finish_callback user_data addr=" << std::hex << user_data
  130. << std::endl;
  131. }
  132. ASSERT_EQ(size, 1);
  133. ASSERT_EQ(
  134. std::string(outputs->name),
  135. "TRUE_DIV(EXP[12065],reduce0[12067])[12077]");
  136. LiteLayout layout;
  137. LITE_get_tensor_layout(*output_tensors, &layout);
  138. ASSERT_EQ(layout.shapes[1], 1000);
  139. };
  140. check_func();
  141. return 0;
  142. }
  143. } // namespace
  144. #define LITE_CAPI_CHECK(_expr) \
  145. do { \
  146. int _ret = (_expr); \
  147. if (_ret) { \
  148. LITE_THROW(LITE_get_last_error()); \
  149. } \
  150. } while (0)
  151. #define ForwardMgb \
  152. lite::Config config; \
  153. auto lite_tensor = lite::get_input_data("./input_data.npy"); \
  154. size_t data_length_in_byte = lite_tensor->get_tensor_total_size_in_byte(); \
  155. std::string model_path = "./shufflenet.mge"; \
  156. auto result_mgb = mgb_lar(model_path, config, "data", lite_tensor)
  157. #define MakeNetwork \
  158. LiteNetwork c_network; \
  159. LITE_CAPI_CHECK( \
  160. LITE_make_network(&c_network, *default_config(), *default_network_io()))
  161. #define LoadNetwork \
  162. LITE_CAPI_CHECK(LITE_load_model_from_path(c_network, model_path.c_str()))
  163. #define SetInput \
  164. LiteTensor c_input_tensor, c_output_tensor; \
  165. LITE_CAPI_CHECK( \
  166. LITE_get_io_tensor(c_network, "data", LITE_INPUT, &c_input_tensor)); \
  167. LITE_CAPI_CHECK(LITE_reset_tensor_memory( \
  168. c_input_tensor, lite_tensor->get_memory_ptr(), data_length_in_byte))
  169. #define ForwardNetwork \
  170. LITE_CAPI_CHECK(LITE_forward(c_network)); \
  171. LITE_CAPI_CHECK(LITE_wait(c_network))
  172. #define GetOutput \
  173. const char* output_name; \
  174. LITE_CAPI_CHECK(LITE_get_output_name(c_network, 0, &output_name)); \
  175. LITE_CAPI_CHECK(LITE_get_io_tensor( \
  176. c_network, output_name, LITE_OUTPUT, &c_output_tensor)); \
  177. void* output_ptr; \
  178. LITE_CAPI_CHECK(LITE_get_tensor_memory(c_output_tensor, &output_ptr))
  179. #define CompareResult \
  180. EXPECT_TRUE(lite::compare_memory<float>( \
  181. output_ptr, result_mgb->get_memory_ptr(), \
  182. result_mgb->get_tensor_total_size_in_byte() / sizeof(float)))
  183. TEST(TestCapiNetWork, BasicResetInput) {
  184. ForwardMgb;
  185. LiteNetwork c_network;
  186. LITE_CAPI_CHECK(LITE_make_default_network(&c_network));
  187. LoadNetwork;
  188. SetInput;
  189. ForwardNetwork;
  190. GetOutput;
  191. CompareResult;
  192. LITE_destroy_network(c_network);
  193. }
  194. TEST(TestCapiNetWork, GetAllName) {
  195. std::string model_path = "./shufflenet.mge";
  196. LiteNetwork c_network;
  197. LITE_CAPI_CHECK(LITE_make_default_network(&c_network));
  198. LoadNetwork;
  199. size_t input_size, output_size;
  200. LITE_get_all_input_name(c_network, &input_size, nullptr);
  201. LITE_get_all_output_name(c_network, &output_size, nullptr);
  202. std::vector<const char*> input_names(input_size);
  203. LITE_get_all_input_name(c_network, nullptr, input_names.data());
  204. ASSERT_EQ(input_names.size(), 1);
  205. ASSERT_TRUE(std::string(input_names[0]) == "data");
  206. std::vector<const char*> output_names(output_size);
  207. LITE_get_all_output_name(c_network, nullptr, output_names.data());
  208. ASSERT_TRUE(
  209. std::string(output_names[0]) ==
  210. "TRUE_DIV(EXP[12065],reduce0[12067])[12077]");
  211. ASSERT_EQ(output_names.size(), 1);
  212. LITE_destroy_network(c_network);
  213. }
  214. TEST(TestCapiNetWork, GetAllNameAhead) {
  215. std::string model_path = "./shufflenet.mge";
  216. LiteNetworkIO ios, ios_mem;
  217. LITE_CAPI_CHECK(LITE_get_model_io_info_by_path(
  218. model_path.c_str(), *default_config(), &ios));
  219. FILE* fin = fopen(model_path.c_str(), "rb");
  220. ASSERT_TRUE(fin);
  221. fseek(fin, 0, SEEK_END);
  222. size_t size = ftell(fin);
  223. fseek(fin, 0, SEEK_SET);
  224. void* ptr = malloc(size);
  225. std::shared_ptr<void> buf{ptr, ::free};
  226. auto nr = fread(buf.get(), 1, size, fin);
  227. LITE_ASSERT(nr == size);
  228. fclose(fin);
  229. LITE_CAPI_CHECK(
  230. LITE_get_model_io_info_by_memory(ptr, size, *default_config(), &ios_mem));
  231. ASSERT_EQ(ios.input_size, 1);
  232. ASSERT_EQ(ios.output_size, 1);
  233. ASSERT_EQ(ios_mem.input_size, 1);
  234. ASSERT_EQ(ios_mem.output_size, 1);
  235. ASSERT_TRUE(std::string(ios.inputs->name) == "data");
  236. ASSERT_TRUE(ios.inputs->config_layout.ndim == 4);
  237. ASSERT_TRUE(ios.inputs->config_layout.shapes[1] == 3);
  238. ASSERT_TRUE(ios.inputs->config_layout.shapes[2] == 224);
  239. ASSERT_TRUE(ios.inputs->config_layout.shapes[3] == 224);
  240. ASSERT_TRUE(
  241. std::string(ios.outputs->name) ==
  242. "TRUE_DIV(EXP[12065],reduce0[12067])[12077]");
  243. ASSERT_TRUE(ios.outputs->config_layout.ndim == 2);
  244. ASSERT_TRUE(ios.outputs->config_layout.shapes[0] == 1);
  245. ASSERT_TRUE(ios.outputs->config_layout.shapes[1] == 1000);
  246. ASSERT_TRUE(std::string(ios_mem.inputs->name) == "data");
  247. ASSERT_TRUE(ios_mem.inputs->config_layout.ndim == 4);
  248. ASSERT_TRUE(ios_mem.inputs->config_layout.shapes[1] == 3);
  249. ASSERT_TRUE(ios_mem.inputs->config_layout.shapes[2] == 224);
  250. ASSERT_TRUE(ios_mem.inputs->config_layout.shapes[3] == 224);
  251. ASSERT_TRUE(
  252. std::string(ios_mem.outputs->name) ==
  253. "TRUE_DIV(EXP[12065],reduce0[12067])[12077]");
  254. ASSERT_TRUE(ios_mem.outputs->config_layout.ndim == 2);
  255. ASSERT_TRUE(ios_mem.outputs->config_layout.shapes[0] == 1);
  256. ASSERT_TRUE(ios_mem.outputs->config_layout.shapes[1] == 1000);
  257. }
  258. TEST(TestCapiNetWork, Discrete_Input) {
  259. std::vector<std::shared_ptr<lite::Tensor>> datas;
  260. datas.push_back(lite::get_input_data("./data0.npy"));
  261. datas.push_back(lite::get_input_data("./data1.npy"));
  262. datas.push_back(lite::get_input_data("./data2.npy"));
  263. size_t data_length_in_byte = datas[0]->get_tensor_total_size_in_byte();
  264. LiteIO input_io = default_io;
  265. input_io.is_host = true;
  266. input_io.name = "data";
  267. LiteLayout d_ly;
  268. d_ly.ndim = 4;
  269. d_ly.data_type = LiteDataType::LITE_FLOAT;
  270. std::vector<size_t> input_shape = {3, 3, 224, 224};
  271. for (size_t i = 0; i < d_ly.ndim; i++) {
  272. d_ly.shapes[i] = input_shape[i];
  273. }
  274. input_io.config_layout = d_ly;
  275. LiteNetworkIO network_io = *default_network_io();
  276. network_io.inputs = &input_io;
  277. network_io.input_size = 1;
  278. LiteConfig c_config = *default_config();
  279. c_config.discrete_input_name = "data";
  280. LiteNetwork c_network;
  281. LITE_CAPI_CHECK(LITE_make_network(&c_network, c_config, network_io));
  282. std::string model_path = "./test_discrete_input.mge";
  283. LITE_CAPI_CHECK(LITE_load_model_from_path(c_network, model_path.c_str()));
  284. std::vector<LiteTensor> c_data_tensors(3, nullptr);
  285. for (size_t i = 0; i < 3; i++) {
  286. LITE_CAPI_CHECK(LITE_get_io_tensors(
  287. c_network, "data", i, LITE_INPUT, &c_data_tensors[i]));
  288. LITE_CAPI_CHECK(LITE_reset_tensor_memory(
  289. c_data_tensors[i], datas[i]->get_memory_ptr(), data_length_in_byte));
  290. }
  291. ForwardNetwork;
  292. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  293. }
  294. #if LITE_BUILD_WITH_RKNPU
  295. static int GetTop(
  296. float* pfProb, float* pfMaxProb, uint32_t* pMaxClass, uint32_t outputCount,
  297. uint32_t topNum) {
  298. uint32_t i, j;
  299. #define MAX_TOP_NUM 20
  300. if (topNum > MAX_TOP_NUM)
  301. return 0;
  302. memset(pfMaxProb, 0, sizeof(float) * topNum);
  303. memset(pMaxClass, 0xff, sizeof(float) * topNum);
  304. for (j = 0; j < topNum; j++) {
  305. for (i = 0; i < outputCount; i++) {
  306. if ((i == *(pMaxClass + 0)) || (i == *(pMaxClass + 1)) ||
  307. (i == *(pMaxClass + 2)) || (i == *(pMaxClass + 3)) ||
  308. (i == *(pMaxClass + 4))) {
  309. continue;
  310. }
  311. if (pfProb[i] > *(pfMaxProb + j)) {
  312. *(pfMaxProb + j) = pfProb[i];
  313. *(pMaxClass + j) = i;
  314. }
  315. }
  316. }
  317. return 1;
  318. }
  319. TEST(TestCapiNetWork, rknntest_set_info) {
  320. #define SET_INFO_SIZE 2
  321. #define TENSOR_TYPE_UINT8 3
  322. #define TENSOR_FORMAT_NHWC 1
  323. LiteConfig config;
  324. config.backend = LiteBackend::LITE_RK_NPU;
  325. config.device_type = LiteDeviceType::LITE_NPU;
  326. config.bare_model_cryption_name = nullptr;
  327. auto lite_tensor = lite::get_input_data("./model/cat_224x224.npy");
  328. auto true_tensor = lite::get_input_data("./output_data.npy");
  329. auto rknn_model = "./model/mobilenet_v1.rknn";
  330. LiteNetwork c_network;
  331. LITE_CAPI_CHECK(LITE_make_network_config(&c_network, config));
  332. LITE_CAPI_CHECK(LITE_load_model_from_path(c_network, rknn_model));
  333. size_t input_size, output_size;
  334. LITE_get_all_input_name(c_network, &input_size, nullptr);
  335. LITE_get_all_output_name(c_network, &output_size, nullptr);
  336. std::vector<const char*> input_names(input_size);
  337. std::vector<const char*> output_names(output_size);
  338. LiteTensor c_input_tensor, c_output_tensor;
  339. LITE_get_all_input_name(c_network, nullptr, input_names.data());
  340. LITE_get_all_output_name(c_network, nullptr, output_names.data());
  341. LITE_CAPI_CHECK(
  342. LITE_get_io_tensor(c_network, input_names[0], LITE_IO, &c_input_tensor));
  343. size_t input_length = 0;
  344. LITE_get_tensor_total_size_in_byte(c_input_tensor, &input_length);
  345. size_t data_length_in_byte = lite_tensor->get_tensor_total_size_in_byte();
  346. {
  347. LiteLayout input_layout;
  348. LITE_get_tensor_layout(c_input_tensor, &input_layout);
  349. ASSERT_TRUE(input_layout.data_type == LITE_INT8);
  350. std::vector<int> input_shape = {1, 224, 224, 3};
  351. for (size_t i = 0; i < input_layout.ndim; i++) {
  352. ASSERT_TRUE(input_layout.shapes[i] = input_shape[i]);
  353. }
  354. }
  355. {
  356. int size_attr = 0;
  357. LITE_CAPI_CHECK(LITE_get_tensor_attribute(
  358. c_input_tensor, nullptr, nullptr, &size_attr));
  359. ASSERT_TRUE(size_attr > 0);
  360. const char* keys[size_attr];
  361. void* values[size_attr];
  362. LITE_CAPI_CHECK(
  363. LITE_get_tensor_attribute(c_input_tensor, keys, values, &size_attr));
  364. ASSERT_TRUE(size_attr > 5);
  365. std::unordered_map<std::string, uint32_t> result_map = {
  366. {"zp", 0}, {"index", 0}, {"size_with_stride", 150528},
  367. {"stride", 224}, {"n_size", 150528}, {"n_elems", 150528},
  368. {"qnt_type", 2}, {"n_dims", 4}, {"type", 2},
  369. {"fmt", 1}, {"dims0", 1}, {"dims1", 224},
  370. {"dims2", 224}, {"dims3", 3},
  371. };
  372. for (int i = 0; i < size_attr; i++) {
  373. std::string key(keys[i]);
  374. if (key == "names") {
  375. ASSERT_TRUE(
  376. std::string("input") ==
  377. std::string(static_cast<const char*>(values[i])));
  378. } else if (key == "scale") {
  379. float scale = *static_cast<float*>(values[i]);
  380. ASSERT_TRUE(std::fabs(scale - 0.007812) < 0.00001);
  381. } else if (key == "fl" || key == "pass_through") {
  382. uint8_t val = *static_cast<uint8_t*>(values[i]);
  383. if (key == "fl") {
  384. ASSERT_TRUE(val == 0);
  385. } else {
  386. ASSERT_TRUE(val == 1);
  387. }
  388. } else {
  389. uint32_t val = *static_cast<uint32_t*>(values[i]);
  390. ASSERT_TRUE(result_map[std::string(keys[i])] == val);
  391. }
  392. }
  393. }
  394. const char* keys[] = {"type", "fmt"};
  395. int info_size = SET_INFO_SIZE;
  396. int type = TENSOR_TYPE_UINT8;
  397. int fmt = TENSOR_FORMAT_NHWC;
  398. void* values[] = {static_cast<void*>(&type), static_cast<void*>(&fmt)};
  399. LITE_CAPI_CHECK(
  400. LITE_set_tensor_information(c_input_tensor, keys, values, info_size));
  401. ASSERT_TRUE(
  402. std::string(output_names[0]) ==
  403. std::string("MobilenetV1/Predictions/Reshape_1"));
  404. LITE_CAPI_CHECK(
  405. LITE_get_io_tensor(c_network, output_names[0], LITE_IO, &c_output_tensor));
  406. LITE_CAPI_CHECK(LITE_reset_tensor_memory(
  407. c_input_tensor, lite_tensor->get_memory_ptr(), data_length_in_byte));
  408. LITE_CAPI_CHECK(
  409. LITE_get_io_tensor(c_network, output_names[0], LITE_IO, &c_output_tensor));
  410. // LiteLayout tmp_output_layout;
  411. // LITE_get_tensor_layout(c_output_tensor, &tmp_output_layout);
  412. // tmp_output_layout.data_type = LiteDataType::LITE_FLOAT;
  413. // LITE_set_tensor_layout(c_output_tensor, tmp_output_layout);
  414. {
  415. const char* keys[] = {"want_float"};
  416. uint8_t want_float = 1;
  417. void* values[] = {static_cast<void*>(&want_float)};
  418. LITE_CAPI_CHECK(LITE_set_tensor_information(c_output_tensor, keys, values, 1));
  419. }
  420. LITE_CAPI_CHECK(LITE_forward(c_network));
  421. LITE_CAPI_CHECK(LITE_wait(c_network));
  422. ASSERT_TRUE(std::string(output_names[0]) == "MobilenetV1/Predictions/Reshape_1");
  423. ASSERT_EQ(output_names.size(), 1);
  424. {
  425. LiteLayout output_layout;
  426. LITE_get_tensor_layout(c_output_tensor, &output_layout);
  427. ASSERT_TRUE(output_layout.data_type == LITE_FLOAT);
  428. int size_attr = 0;
  429. LITE_CAPI_CHECK(LITE_get_tensor_attribute(
  430. c_output_tensor, nullptr, nullptr, &size_attr));
  431. ASSERT_TRUE(size_attr > 0);
  432. const char* keys[size_attr];
  433. void* values[size_attr];
  434. LITE_CAPI_CHECK(
  435. LITE_get_tensor_attribute(c_output_tensor, keys, values, &size_attr));
  436. ASSERT_TRUE(size_attr > 5);
  437. std::unordered_map<std::string, uint32_t> result_map = {
  438. {"zp", 0}, {"index", 0}, {"size_with_stride", 2002},
  439. {"stride", 0}, {"n_size", 2002}, {"n_elems", 1001},
  440. {"qnt_type", 2}, {"n_dims", 2}, {"type", 0},
  441. {"fmt", 2}, {"dims0", 1}, {"dims1", 1001},
  442. };
  443. for (int i = 0; i < size_attr; i++) {
  444. std::string key(keys[i]);
  445. if (key == "names") {
  446. ASSERT_TRUE(
  447. "MobilenetV1/Predictions/Reshape_1" ==
  448. std::string(static_cast<const char*>(values[i])));
  449. } else if (key == "scale") {
  450. float scale = *static_cast<float*>(values[i]);
  451. ASSERT_TRUE(std::fabs(scale - 1.0) < 0.00001);
  452. } else if (key == "fl" || key == "pass_through") {
  453. uint8_t val = *static_cast<uint8_t*>(values[i]);
  454. ASSERT_TRUE(val == 0);
  455. } else {
  456. uint32_t val = *static_cast<uint32_t*>(values[i]);
  457. ASSERT_TRUE(result_map[std::string(keys[i])] == val);
  458. }
  459. }
  460. }
  461. {
  462. uint32_t MaxClass[5];
  463. float fMaxProb[5];
  464. void* output_ptr;
  465. LITE_get_tensor_memory(c_output_tensor, &output_ptr);
  466. float* buffer = (float*)output_ptr;
  467. uint32_t sz = true_tensor->get_tensor_total_size_in_byte() / sizeof(float);
  468. GetTop(buffer, fMaxProb, MaxClass, sz, 5);
  469. std::vector<uint32_t> result_class = {
  470. 286, 464, 282, 357, 285,
  471. };
  472. std::vector<float> result_prob = {
  473. 0.407227, 0.365723, 0.090454, 0.018051, 0.013069,
  474. };
  475. for (int i = 0; i < 5; i++) {
  476. ASSERT_TRUE(result_class[i] == MaxClass[i]);
  477. ASSERT_TRUE(std::fabs(result_prob[i] - fMaxProb[i]) < 0.0001);
  478. }
  479. }
  480. {
  481. float* true_data = static_cast<float*>(true_tensor->get_memory_ptr());
  482. void* output_ptr;
  483. LITE_get_tensor_memory(c_output_tensor, &output_ptr);
  484. float* data1 = static_cast<float*>(output_ptr);
  485. size_t length = true_tensor->get_tensor_total_size_in_byte() / sizeof(float);
  486. for (size_t i = 0; i < length; i++) {
  487. ASSERT_LT(std::abs(data1[i] - true_data[i]), 1e-3);
  488. }
  489. }
  490. LITE_destroy_network(c_network);
  491. #undef SET_INFO_SIZE
  492. #undef TENSOR_FORMAT_NHWC
  493. #undef TENSOR_TYPE_UINT8
  494. }
  495. TEST(TestCapiNetWork, rknntest_set_info_two_input) {
  496. #define SET_INFO_SIZE 2
  497. #define TENSOR_TYPE_UINT8 3
  498. #define TENSOR_FORMAT_NHWC 1
  499. LiteConfig config;
  500. config.backend = LiteBackend::LITE_RK_NPU;
  501. config.device_type = LiteDeviceType::LITE_NPU;
  502. config.bare_model_cryption_name = nullptr;
  503. auto lite_tensor = lite::get_input_data("./model/cat_224x224.npy");
  504. auto lite_tensor_dog = lite::get_input_data("./model/dog_224x224.npy");
  505. auto true_tensor = lite::get_input_data("./output_data.npy");
  506. auto rknn_model = "./model/mobilenet_v1.rknn";
  507. LiteNetwork c_network;
  508. LITE_CAPI_CHECK(LITE_make_network_config(&c_network, config));
  509. LITE_CAPI_CHECK(LITE_load_model_from_path(c_network, rknn_model));
  510. size_t input_size, output_size;
  511. LITE_get_all_input_name(c_network, &input_size, nullptr);
  512. LITE_get_all_output_name(c_network, &output_size, nullptr);
  513. std::vector<const char*> input_names(input_size);
  514. std::vector<const char*> output_names(output_size);
  515. LiteTensor c_input_tensor, c_output_tensor;
  516. LITE_get_all_input_name(c_network, nullptr, input_names.data());
  517. LITE_get_all_output_name(c_network, nullptr, output_names.data());
  518. LITE_CAPI_CHECK(
  519. LITE_get_io_tensor(c_network, input_names[0], LITE_IO, &c_input_tensor));
  520. size_t input_length = 0;
  521. LITE_get_tensor_total_size_in_byte(c_input_tensor, &input_length);
  522. size_t data_length_in_byte = lite_tensor->get_tensor_total_size_in_byte();
  523. {
  524. LiteLayout input_layout;
  525. LITE_get_tensor_layout(c_input_tensor, &input_layout);
  526. ASSERT_TRUE(input_layout.data_type == LITE_INT8);
  527. std::vector<int> input_shape = {1, 224, 224, 3};
  528. for (size_t i = 0; i < input_layout.ndim; i++) {
  529. ASSERT_TRUE(input_layout.shapes[i] = input_shape[i]);
  530. }
  531. }
  532. const char* keys[] = {"type", "fmt"};
  533. int info_size = SET_INFO_SIZE;
  534. int type = TENSOR_TYPE_UINT8;
  535. int fmt = TENSOR_FORMAT_NHWC;
  536. void* values[] = {static_cast<void*>(&type), static_cast<void*>(&fmt)};
  537. LITE_CAPI_CHECK(
  538. LITE_set_tensor_information(c_input_tensor, keys, values, info_size));
  539. ASSERT_TRUE(
  540. std::string(output_names[0]) ==
  541. std::string("MobilenetV1/Predictions/Reshape_1"));
  542. LITE_CAPI_CHECK(
  543. LITE_get_io_tensor(c_network, output_names[0], LITE_IO, &c_output_tensor));
  544. LITE_CAPI_CHECK(LITE_reset_tensor_memory(
  545. c_input_tensor, lite_tensor->get_memory_ptr(), data_length_in_byte));
  546. LITE_CAPI_CHECK(
  547. LITE_get_io_tensor(c_network, output_names[0], LITE_IO, &c_output_tensor));
  548. {
  549. const char* keys[] = {"want_float"};
  550. uint8_t want_float = 1;
  551. void* values[] = {static_cast<void*>(&want_float)};
  552. LITE_CAPI_CHECK(LITE_set_tensor_information(c_output_tensor, keys, values, 1));
  553. }
  554. LITE_CAPI_CHECK(LITE_forward(c_network));
  555. LITE_CAPI_CHECK(LITE_wait(c_network));
  556. ASSERT_TRUE(std::string(output_names[0]) == "MobilenetV1/Predictions/Reshape_1");
  557. ASSERT_EQ(output_names.size(), 1);
  558. {
  559. uint32_t MaxClass[5];
  560. float fMaxProb[5];
  561. void* output_ptr;
  562. LITE_get_tensor_memory(c_output_tensor, &output_ptr);
  563. float* buffer = (float*)output_ptr;
  564. uint32_t sz = true_tensor->get_tensor_total_size_in_byte() / sizeof(float);
  565. GetTop(buffer, fMaxProb, MaxClass, sz, 5);
  566. std::vector<uint32_t> result_class = {
  567. 286, 464, 282, 357, 285,
  568. };
  569. std::vector<float> result_prob = {
  570. 0.407227, 0.365723, 0.090454, 0.018051, 0.013069,
  571. };
  572. for (int i = 0; i < 5; i++) {
  573. ASSERT_TRUE(result_class[i] == MaxClass[i]);
  574. ASSERT_TRUE(std::fabs(result_prob[i] - fMaxProb[i]) < 0.0001);
  575. }
  576. }
  577. {
  578. float* true_data = static_cast<float*>(true_tensor->get_memory_ptr());
  579. void* output_ptr;
  580. LITE_get_tensor_memory(c_output_tensor, &output_ptr);
  581. float* data1 = static_cast<float*>(output_ptr);
  582. size_t length = true_tensor->get_tensor_total_size_in_byte() / sizeof(float);
  583. for (size_t i = 0; i < length; i++) {
  584. ASSERT_LT(std::abs(data1[i] - true_data[i]), 1e-3);
  585. }
  586. }
  587. LITE_CAPI_CHECK(LITE_reset_tensor_memory(
  588. c_input_tensor, lite_tensor_dog->get_memory_ptr(), data_length_in_byte));
  589. LITE_CAPI_CHECK(LITE_forward(c_network));
  590. LITE_CAPI_CHECK(LITE_wait(c_network));
  591. ASSERT_TRUE(std::string(output_names[0]) == "MobilenetV1/Predictions/Reshape_1");
  592. ASSERT_EQ(output_names.size(), 1);
  593. {
  594. uint32_t MaxClass[5];
  595. float fMaxProb[5];
  596. void* output_ptr;
  597. LITE_get_tensor_memory(c_output_tensor, &output_ptr);
  598. float* buffer = (float*)output_ptr;
  599. uint32_t sz = true_tensor->get_tensor_total_size_in_byte() / sizeof(float);
  600. GetTop(buffer, fMaxProb, MaxClass, sz, 5);
  601. std::vector<float> result_prob = {
  602. 0.407227, 0.365723, 0.090454, 0.018051, 0.013069,
  603. };
  604. for (int i = 0; i < 5; i++) {
  605. ASSERT_FALSE(std::fabs(result_prob[i] - fMaxProb[i]) < 0.0001);
  606. }
  607. }
  608. LITE_destroy_network(c_network);
  609. #undef SET_INFO_SIZE
  610. #undef TENSOR_FORMAT_NHWC
  611. #undef TENSOR_TYPE_UINT8
  612. }
  613. #endif
  614. TEST(TestCapiNetWork, BasicResetOutput) {
  615. ForwardMgb;
  616. LiteNetwork c_network;
  617. LITE_CAPI_CHECK(LITE_make_default_network(&c_network));
  618. LoadNetwork;
  619. SetInput;
  620. LiteLayout output_layout{{1, 1000}, 2, LiteDataType::LITE_FLOAT};
  621. std::shared_ptr<float> ptr(new float[1000], [](float* ptr) { delete[] ptr; });
  622. const char* output_name;
  623. LITE_CAPI_CHECK(LITE_get_output_name(c_network, 0, &output_name));
  624. LITE_CAPI_CHECK(
  625. LITE_get_io_tensor(c_network, output_name, LITE_IO, &c_output_tensor));
  626. LITE_CAPI_CHECK(LITE_reset_tensor(c_output_tensor, output_layout, ptr.get()));
  627. ForwardNetwork;
  628. EXPECT_TRUE(lite::compare_memory<float>(
  629. ptr.get(), result_mgb->get_memory_ptr(),
  630. result_mgb->get_tensor_total_size_in_byte() / sizeof(float)));
  631. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  632. }
  633. TEST(TestCapiNetWork, BasicInplaceAndSingleThreadAffinity) {
  634. ForwardMgb;
  635. MakeNetwork;
  636. //! config the network with cpu inplace mode
  637. LITE_CAPI_CHECK(LITE_set_cpu_inplace_mode(c_network));
  638. LoadNetwork;
  639. //! set single thread affinith callback
  640. LITE_CAPI_CHECK(
  641. LITE_set_runtime_thread_affinity(c_network, single_thread_affinity));
  642. SetInput;
  643. ForwardNetwork;
  644. ASSERT_EQ(affinity_set, true);
  645. affinity_set = false;
  646. GetOutput;
  647. CompareResult;
  648. LITE_destroy_network(c_network);
  649. }
  650. TEST(TestCapiNetWork, UserAllocator) {
  651. ForwardMgb;
  652. MakeNetwork;
  653. LITE_CAPI_CHECK(LITE_set_memory_allocator(c_network, allocate, free));
  654. LoadNetwork;
  655. SetInput;
  656. ForwardNetwork;
  657. ASSERT_GE(m_nr_allocated, 1);
  658. GetOutput;
  659. CompareResult;
  660. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  661. ASSERT_EQ(m_nr_left, 0);
  662. }
  663. TEST(TestCapiNetWork, BasicMultiThread) {
  664. ForwardMgb;
  665. MakeNetwork;
  666. LITE_CAPI_CHECK(LITE_set_cpu_threads_number(c_network, NUMBER_THREDS));
  667. LoadNetwork;
  668. LITE_CAPI_CHECK(LITE_set_runtime_thread_affinity(c_network, multi_thread_affinity));
  669. SetInput;
  670. ForwardNetwork;
  671. for (size_t i = 0; i < NUMBER_THREDS; i++) {
  672. for (size_t j = i + 1; j < NUMBER_THREDS; j++) {
  673. ASSERT_NE(thread_ids[i], thread_ids[j]);
  674. }
  675. }
  676. for (size_t i = 0; i < NUMBER_THREDS; i++) {
  677. thread_ids[i] = std::thread::id();
  678. }
  679. GetOutput;
  680. CompareResult;
  681. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  682. }
  683. TEST(TestCapiNetWork, DeviceIO) {
  684. ForwardMgb;
  685. LiteNetwork c_network;
  686. LiteIO input_io = default_io;
  687. input_io.is_host = true;
  688. input_io.name = "data";
  689. LiteNetworkIO network_io = *default_network_io();
  690. network_io.inputs = &input_io;
  691. network_io.input_size = 1;
  692. LITE_CAPI_CHECK(LITE_make_network(&c_network, *default_config(), network_io));
  693. LoadNetwork;
  694. SetInput;
  695. ForwardNetwork;
  696. GetOutput;
  697. CompareResult;
  698. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  699. }
  700. TEST(TestCapiNetWork, StartCallBack) {
  701. ForwardMgb;
  702. MakeNetwork;
  703. LoadNetwork;
  704. LITE_CAPI_CHECK(LITE_set_start_callback(c_network, start_callback));
  705. SetInput;
  706. ForwardNetwork;
  707. GetOutput;
  708. CompareResult;
  709. ASSERT_TRUE(start_checked);
  710. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  711. }
  712. TEST(TestCapiNetWork, StartCallBackWithData) {
  713. ForwardMgb;
  714. MakeNetwork;
  715. LoadNetwork;
  716. size_t user_data = 1;
  717. LITE_CAPI_CHECK(LITE_set_start_callback_with_userdata(
  718. c_network, start_callback_with_data, &user_data));
  719. SetInput;
  720. ForwardNetwork;
  721. GetOutput;
  722. CompareResult;
  723. ASSERT_TRUE(start_checked_with_data);
  724. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  725. }
  726. TEST(TestCapiNetWork, FinishCallBack) {
  727. ForwardMgb;
  728. MakeNetwork;
  729. LoadNetwork;
  730. LITE_CAPI_CHECK(LITE_set_finish_callback(c_network, finish_callback));
  731. SetInput;
  732. ForwardNetwork;
  733. GetOutput;
  734. CompareResult;
  735. ASSERT_TRUE(finish_checked);
  736. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  737. }
  738. TEST(TestCapiNetWork, FinishCallBackWtihData) {
  739. ForwardMgb;
  740. MakeNetwork;
  741. LoadNetwork;
  742. size_t user_data = 1;
  743. LITE_CAPI_CHECK(LITE_set_finish_callback_with_userdata(
  744. c_network, finish_callback_with_data, &user_data));
  745. SetInput;
  746. ForwardNetwork;
  747. GetOutput;
  748. CompareResult;
  749. ASSERT_TRUE(finish_checked_with_data);
  750. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  751. }
  752. TEST(TestCapiNetWork, BasicCryptAes) {
  753. ForwardMgb;
  754. LiteConfig c_config = *default_config();
  755. c_config.bare_model_cryption_name = "AES_default";
  756. LiteNetwork c_network;
  757. LITE_CAPI_CHECK(LITE_make_network(&c_network, c_config, *default_network_io()));
  758. std::string model_crypt_path = "./shufflenet_crypt_aes.mge";
  759. LITE_CAPI_CHECK(LITE_load_model_from_path(c_network, model_crypt_path.c_str()));
  760. SetInput;
  761. ForwardNetwork;
  762. GetOutput;
  763. CompareResult;
  764. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  765. }
  766. TEST(TestCapiNetWork, PackedCryptRc4) {
  767. ForwardMgb;
  768. MakeNetwork;
  769. std::string model_crypt_path = "./test_packed_model_rc4.lite";
  770. LITE_CAPI_CHECK(LITE_load_model_from_path(c_network, model_crypt_path.c_str()));
  771. SetInput;
  772. ForwardNetwork;
  773. GetOutput;
  774. CompareResult;
  775. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  776. }
  777. TEST(TestCapiNetWork, AsyncExec) {
  778. finished = false;
  779. ForwardMgb;
  780. LiteNetwork c_network;
  781. LiteConfig c_config = *default_config();
  782. c_config.options.var_sanity_check_first_run = false;
  783. LITE_CAPI_CHECK(LITE_make_network(&c_network, c_config, *default_network_io()));
  784. LITE_CAPI_CHECK(LITE_set_async_callback(c_network, async_callback));
  785. LoadNetwork;
  786. SetInput;
  787. LITE_forward(c_network);
  788. size_t count = 0;
  789. while (finished == false) {
  790. count++;
  791. }
  792. ASSERT_GT(count, 0);
  793. finished = false;
  794. GetOutput;
  795. CompareResult;
  796. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  797. }
  798. TEST(TestCapiNetWork, AsyncExecWithData) {
  799. finished = false;
  800. ForwardMgb;
  801. LiteNetwork c_network;
  802. LiteConfig c_config = *default_config();
  803. c_config.options.var_sanity_check_first_run = false;
  804. LITE_CAPI_CHECK(LITE_make_network(&c_network, c_config, *default_network_io()));
  805. size_t user_data = 1;
  806. LITE_CAPI_CHECK(LITE_set_async_callback_with_userdata(
  807. c_network, async_callback_with_data, &user_data));
  808. LoadNetwork;
  809. SetInput;
  810. LITE_forward(c_network);
  811. size_t count = 0;
  812. while (finished_with_data == false) {
  813. count++;
  814. }
  815. ASSERT_GT(count, 0);
  816. finished_with_data = false;
  817. GetOutput;
  818. CompareResult;
  819. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  820. }
  821. TEST(TestCapiNetWork, OutputShapeOnly) {
  822. ForwardMgb;
  823. LiteNetwork c_network;
  824. LiteNetworkIO c_network_io = *default_network_io();
  825. LiteIO io_output = default_io;
  826. io_output.io_type = LiteIOType::LITE_IO_SHAPE;
  827. io_output.name = "TRUE_DIV(EXP[12065],reduce0[12067])[12077]";
  828. c_network_io.outputs = &io_output;
  829. c_network_io.output_size = 1;
  830. LITE_CAPI_CHECK(LITE_make_network(&c_network, *default_config(), c_network_io));
  831. LoadNetwork;
  832. SetInput;
  833. ForwardNetwork;
  834. GetOutput;
  835. size_t length = 0;
  836. LITE_CAPI_CHECK(LITE_get_tensor_total_size_in_byte(c_output_tensor, &length));
  837. ASSERT_EQ(length / sizeof(float), 1000);
  838. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  839. }
  840. TEST(TestCapiNetWork, ProfileIOdump) {
  841. ForwardMgb;
  842. MakeNetwork;
  843. LITE_CAPI_CHECK(LITE_enable_profile_performance(c_network, "./profile.json"));
  844. LoadNetwork;
  845. SetInput;
  846. ForwardNetwork;
  847. ASSERT_TRUE(fopen("./profile.json", "r"));
  848. LITE_CAPI_CHECK(LITE_enable_io_txt_dump(c_network, "./io_txt_dump.txt"));
  849. ForwardNetwork;
  850. ASSERT_TRUE(fopen("./io_txt_dump.txt", "r"));
  851. GetOutput;
  852. CompareResult;
  853. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  854. }
  855. TEST(TestCapiNetWork, GlabalLayoutTransform) {
  856. ForwardMgb;
  857. MakeNetwork;
  858. LITE_CAPI_CHECK(LITE_enable_global_layout_transform(c_network));
  859. LoadNetwork;
  860. LITE_CAPI_CHECK(LITE_dump_layout_transform_model(
  861. c_network, "./shufflenet_after_trans.mge"));
  862. SetInput;
  863. ForwardNetwork;
  864. ASSERT_TRUE(fopen("./shufflenet_after_trans.mge", "r"));
  865. GetOutput;
  866. CompareResult;
  867. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  868. }
  869. TEST(TestCapiNetWork, GetDeviceType) {
  870. lite::Config config;
  871. auto lite_tensor = lite::get_input_data("./input_data.npy");
  872. std::string model_path = "./shufflenet.mge";
  873. MakeNetwork;
  874. LoadNetwork;
  875. LiteDeviceType devicetype;
  876. LITE_CAPI_CHECK(LITE_get_device_type(c_network, &devicetype));
  877. ASSERT_TRUE(devicetype == LiteDeviceType::LITE_CPU);
  878. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  879. }
  880. TEST(TestCapiNetWork, GetModelExtraInfo) {
  881. lite::Config config;
  882. std::string model_path = "./track_640_320_pack_model_rc4_with_info.lite";
  883. MakeNetwork;
  884. LITE_load_model_from_path(c_network, model_path.c_str());
  885. const char* info = nullptr;
  886. int info_size = 0;
  887. LITE_CAPI_CHECK(LITE_get_model_extra_info(c_network, &info, &info_size));
  888. ASSERT_TRUE(info_size > 0);
  889. printf("info %s \n", info);
  890. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  891. }
  892. TEST(TestCapiNetWork, TestWorkSpaceLimit) {
  893. lite::Config config;
  894. auto lite_tensor = lite::get_input_data("./input_data.npy");
  895. size_t data_length_in_byte = lite_tensor->get_tensor_total_size_in_byte();
  896. std::string model_path = "./shufflenet.mge";
  897. MakeNetwork;
  898. LoadNetwork;
  899. printf("go to config workspace limit\n");
  900. LITE_CAPI_CHECK(LITE_set_network_algo_workspace_limit(c_network, 1000));
  901. SetInput;
  902. ForwardNetwork;
  903. GetOutput;
  904. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  905. }
  906. TEST(TestCapiNetWork, TestShareWeights) {
  907. ForwardMgb;
  908. MakeNetwork;
  909. LoadNetwork;
  910. SetInput;
  911. ForwardNetwork;
  912. GetOutput;
  913. CompareResult;
  914. LiteNetwork c_network2;
  915. LITE_CAPI_CHECK(
  916. LITE_make_network(&c_network2, *default_config(), *default_network_io()));
  917. LITE_CAPI_CHECK(LITE_set_cpu_inplace_mode(c_network2));
  918. LITE_CAPI_CHECK(LITE_shared_weight_with_network(c_network2, c_network));
  919. int is_cpu_inplace_mode = false;
  920. LITE_CAPI_CHECK(LITE_is_cpu_inplace_mode(c_network2, &is_cpu_inplace_mode));
  921. ASSERT_EQ(is_cpu_inplace_mode, true);
  922. LiteTensor c_input_tensor2, c_output_tensor2;
  923. LITE_CAPI_CHECK(LITE_get_io_tensor(c_network2, "data", LITE_IO, &c_input_tensor2));
  924. LITE_CAPI_CHECK(LITE_reset_tensor_memory(
  925. c_input_tensor2, lite_tensor->get_memory_ptr(),
  926. lite_tensor->get_tensor_total_size_in_byte()));
  927. LITE_CAPI_CHECK(LITE_forward(c_network2));
  928. LITE_CAPI_CHECK(LITE_wait(c_network2));
  929. LITE_CAPI_CHECK(
  930. LITE_get_io_tensor(c_network2, output_name, LITE_IO, &c_output_tensor2));
  931. void* output_ptr2;
  932. LITE_CAPI_CHECK(LITE_get_tensor_memory(c_output_tensor2, &output_ptr2));
  933. EXPECT_TRUE(lite::compare_memory<float>(
  934. output_ptr2, result_mgb->get_memory_ptr(),
  935. result_mgb->get_tensor_total_size_in_byte() / sizeof(float)));
  936. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  937. LITE_CAPI_CHECK(LITE_destroy_network(c_network2));
  938. }
  939. TEST(TestCapiNetWork, GlobalHolder) {
  940. std::string model_path = "./shufflenet.mge";
  941. LiteNetwork c_network;
  942. LITE_CAPI_CHECK(
  943. LITE_make_network(&c_network, *default_config(), *default_network_io()));
  944. auto destroy_network = c_network;
  945. LITE_CAPI_CHECK(
  946. LITE_make_network(&c_network, *default_config(), *default_network_io()));
  947. //! make sure destroy_network is destroyed by LITE_make_network
  948. LITE_destroy_network(destroy_network);
  949. ASSERT_EQ(LITE_destroy_network(destroy_network), 0);
  950. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  951. }
  952. #endif
  953. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}