You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_network_c.cpp 34 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974
  1. /**
  2. * \file test/test_network_c.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "../src/misc.h"
  12. #if LITE_BUILD_WITH_MGE
  13. #include "../src/common.h"
  14. #include "../src/mge/network_impl.h"
  15. #include "../lite-c/src/common.h"
  16. #include "lite-c/global_c.h"
  17. #include "lite-c/network_c.h"
  18. #include "lite-c/tensor_c.h"
  19. #include "./test_common.h"
  20. #include "megbrain/tensor.h"
  21. #include <string.h>
  22. #include <chrono>
  23. #include <memory>
  24. #include <random>
  25. #include <unordered_map>
  26. namespace {
  27. int affinity_set = false;
  28. int single_thread_affinity(int) {
  29. affinity_set = true;
  30. return 0;
  31. }
  32. std::atomic_size_t m_nr_left{0};
  33. std::atomic_size_t m_nr_allocated{0};
  34. void* allocate(LiteDeviceType device, int, size_t size, size_t align) {
  35. LITE_ASSERT(device == LiteDeviceType::LITE_CPU);
  36. m_nr_left++;
  37. m_nr_allocated++;
  38. #ifdef WIN32
  39. return _aligned_malloc(size, align);
  40. #elif defined(__ANDROID__) || defined(ANDROID)
  41. return memalign(align, size);
  42. #else
  43. void* ptr = nullptr;
  44. auto err = posix_memalign(&ptr, align, size);
  45. mgb_assert(!err, "failed to malloc %zu bytes with align %zu", size, align);
  46. return ptr;
  47. #endif
  48. }
  49. void free(LiteDeviceType device, int, void* ptr) {
  50. m_nr_left--;
  51. LITE_ASSERT(device == LiteDeviceType::LITE_CPU);
  52. #ifdef WIN32
  53. _aligned_free(ptr);
  54. #else
  55. ::free(ptr);
  56. #endif
  57. };
  58. #define NUMBER_THREDS (4)
  59. std::vector<std::thread::id> thread_ids(NUMBER_THREDS);
  60. int multi_thread_affinity(int id) {
  61. thread_ids[id] = std::this_thread::get_id();
  62. return 0;
  63. };
  64. volatile bool finished = false;
  65. int async_callback() {
  66. finished = true;
  67. return 0;
  68. }
  69. volatile bool finished_with_data = false;
  70. int async_callback_with_data(void* user_data) {
  71. if (user_data != NULL) {
  72. std::cout << "async_callback user_data addr=" << std::hex << user_data
  73. << std::endl;
  74. }
  75. finished_with_data = true;
  76. return 0;
  77. }
  78. volatile bool start_checked = false;
  79. int start_callback(const LiteIO* inputs, const LiteTensor* input_tensors, size_t size) {
  80. start_checked = true;
  81. auto check_func = [&]() {
  82. ASSERT_EQ(size, 1);
  83. ASSERT_EQ(std::string(inputs->name), "data");
  84. LiteLayout layout;
  85. LITE_get_tensor_layout(*input_tensors, &layout);
  86. ASSERT_EQ(layout.ndim, 4);
  87. ASSERT_EQ(layout.shapes[1], 3);
  88. ASSERT_EQ(layout.shapes[2], 224);
  89. ASSERT_EQ(layout.shapes[3], 224);
  90. };
  91. check_func();
  92. return 0;
  93. }
  94. volatile bool start_checked_with_data = false;
  95. int start_callback_with_data(
  96. const LiteIO* inputs, const LiteTensor* input_tensors, size_t size,
  97. void* user_data) {
  98. start_checked_with_data = true;
  99. auto check_func = [&]() {
  100. if (user_data != NULL) {
  101. std::cout << "start_callback user_data addr=" << std::hex << user_data
  102. << std::endl;
  103. }
  104. ASSERT_EQ(size, 1);
  105. ASSERT_EQ(std::string(inputs->name), "data");
  106. LiteLayout layout;
  107. LITE_get_tensor_layout(*input_tensors, &layout);
  108. ASSERT_EQ(layout.ndim, 4);
  109. ASSERT_EQ(layout.shapes[1], 3);
  110. ASSERT_EQ(layout.shapes[2], 224);
  111. ASSERT_EQ(layout.shapes[3], 224);
  112. };
  113. check_func();
  114. return 0;
  115. }
  116. volatile bool finish_checked = false;
  117. int finish_callback(
  118. const LiteIO* outputs, const LiteTensor* output_tensors, size_t size) {
  119. finish_checked = true;
  120. auto check_func = [&]() {
  121. ASSERT_EQ(size, 1);
  122. ASSERT_EQ(
  123. std::string(outputs->name),
  124. "TRUE_DIV(EXP[12065],reduce0[12067])[12077]");
  125. LiteLayout layout;
  126. LITE_get_tensor_layout(*output_tensors, &layout);
  127. ASSERT_EQ(layout.shapes[1], 1000);
  128. };
  129. check_func();
  130. return 0;
  131. }
  132. volatile bool finish_checked_with_data = false;
  133. int finish_callback_with_data(
  134. const LiteIO* outputs, const LiteTensor* output_tensors, size_t size,
  135. void* user_data) {
  136. finish_checked_with_data = true;
  137. auto check_func = [&]() {
  138. if (user_data != NULL) {
  139. std::cout << "finish_callback user_data addr=" << std::hex << user_data
  140. << std::endl;
  141. }
  142. ASSERT_EQ(size, 1);
  143. ASSERT_EQ(
  144. std::string(outputs->name),
  145. "TRUE_DIV(EXP[12065],reduce0[12067])[12077]");
  146. LiteLayout layout;
  147. LITE_get_tensor_layout(*output_tensors, &layout);
  148. ASSERT_EQ(layout.shapes[1], 1000);
  149. };
  150. check_func();
  151. return 0;
  152. }
  153. } // namespace
  154. #define LITE_CAPI_CHECK(_expr) \
  155. do { \
  156. int _ret = (_expr); \
  157. if (_ret) { \
  158. LITE_THROW(LITE_get_last_error()); \
  159. } \
  160. } while (0)
  161. #define ForwardMgb \
  162. lite::Config config; \
  163. auto lite_tensor = lite::get_input_data("./input_data.npy"); \
  164. size_t data_length_in_byte = lite_tensor->get_tensor_total_size_in_byte(); \
  165. std::string model_path = "./shufflenet.mge"; \
  166. auto result_mgb = mgb_lar(model_path, config, "data", lite_tensor)
  167. #define MakeNetwork \
  168. LiteNetwork c_network; \
  169. LITE_CAPI_CHECK( \
  170. LITE_make_network(&c_network, *default_config(), *default_network_io()))
  171. #define LoadNetwork \
  172. LITE_CAPI_CHECK(LITE_load_model_from_path(c_network, model_path.c_str()))
  173. #define SetInput \
  174. LiteTensor c_input_tensor, c_output_tensor; \
  175. LITE_CAPI_CHECK( \
  176. LITE_get_io_tensor(c_network, "data", LITE_INPUT, &c_input_tensor)); \
  177. LITE_CAPI_CHECK(LITE_reset_tensor_memory( \
  178. c_input_tensor, lite_tensor->get_memory_ptr(), data_length_in_byte))
  179. #define ForwardNetwork \
  180. LITE_CAPI_CHECK(LITE_forward(c_network)); \
  181. LITE_CAPI_CHECK(LITE_wait(c_network))
  182. #define GetOutput \
  183. const char* output_name; \
  184. LITE_CAPI_CHECK(LITE_get_output_name(c_network, 0, &output_name)); \
  185. LITE_CAPI_CHECK(LITE_get_io_tensor( \
  186. c_network, output_name, LITE_OUTPUT, &c_output_tensor)); \
  187. void* output_ptr; \
  188. LITE_CAPI_CHECK(LITE_get_tensor_memory(c_output_tensor, &output_ptr))
  189. #define CompareResult \
  190. EXPECT_TRUE(lite::compare_memory<float>( \
  191. output_ptr, result_mgb->get_memory_ptr(), \
  192. result_mgb->get_tensor_total_size_in_byte() / sizeof(float)))
  193. TEST(TestCapiNetWork, BasicResetInput) {
  194. ForwardMgb;
  195. LiteNetwork c_network;
  196. LITE_CAPI_CHECK(LITE_make_default_network(&c_network));
  197. LoadNetwork;
  198. SetInput;
  199. ForwardNetwork;
  200. GetOutput;
  201. CompareResult;
  202. LITE_destroy_network(c_network);
  203. }
  204. TEST(TestCapiNetWork, GetAllName) {
  205. std::string model_path = "./shufflenet.mge";
  206. LiteNetwork c_network;
  207. LITE_CAPI_CHECK(LITE_make_default_network(&c_network));
  208. LoadNetwork;
  209. size_t input_size, output_size;
  210. LITE_get_all_input_name(c_network, &input_size, nullptr);
  211. LITE_get_all_output_name(c_network, &output_size, nullptr);
  212. std::vector<const char*> input_names(input_size);
  213. LITE_get_all_input_name(c_network, nullptr, input_names.data());
  214. ASSERT_EQ(input_names.size(), 1);
  215. ASSERT_TRUE(std::string(input_names[0]) == "data");
  216. std::vector<const char*> output_names(output_size);
  217. LITE_get_all_output_name(c_network, nullptr, output_names.data());
  218. ASSERT_TRUE(
  219. std::string(output_names[0]) ==
  220. "TRUE_DIV(EXP[12065],reduce0[12067])[12077]");
  221. ASSERT_EQ(output_names.size(), 1);
  222. LITE_destroy_network(c_network);
  223. }
  224. #if LITE_BUILD_WITH_RKNPU
  225. static int GetTop(
  226. float* pfProb, float* pfMaxProb, uint32_t* pMaxClass, uint32_t outputCount,
  227. uint32_t topNum) {
  228. uint32_t i, j;
  229. #define MAX_TOP_NUM 20
  230. if (topNum > MAX_TOP_NUM)
  231. return 0;
  232. memset(pfMaxProb, 0, sizeof(float) * topNum);
  233. memset(pMaxClass, 0xff, sizeof(float) * topNum);
  234. for (j = 0; j < topNum; j++) {
  235. for (i = 0; i < outputCount; i++) {
  236. if ((i == *(pMaxClass + 0)) || (i == *(pMaxClass + 1)) ||
  237. (i == *(pMaxClass + 2)) || (i == *(pMaxClass + 3)) ||
  238. (i == *(pMaxClass + 4))) {
  239. continue;
  240. }
  241. if (pfProb[i] > *(pfMaxProb + j)) {
  242. *(pfMaxProb + j) = pfProb[i];
  243. *(pMaxClass + j) = i;
  244. }
  245. }
  246. }
  247. return 1;
  248. }
  249. TEST(TestCapiNetWork, rknntest_set_info) {
  250. #define SET_INFO_SIZE 2
  251. #define TENSOR_TYPE_UINT8 3
  252. #define TENSOR_FORMAT_NHWC 1
  253. LiteConfig config;
  254. config.backend = LiteBackend::LITE_RK_NPU;
  255. config.device_type = LiteDeviceType::LITE_NPU;
  256. config.bare_model_cryption_name = nullptr;
  257. auto lite_tensor = lite::get_input_data("./model/cat_224x224.npy");
  258. auto true_tensor = lite::get_input_data("./output_data.npy");
  259. auto rknn_model = "./model/mobilenet_v1.rknn";
  260. LiteNetwork c_network;
  261. LITE_CAPI_CHECK(LITE_make_network_config(&c_network, config));
  262. LITE_CAPI_CHECK(LITE_load_model_from_path(c_network, rknn_model));
  263. size_t input_size, output_size;
  264. LITE_get_all_input_name(c_network, &input_size, nullptr);
  265. LITE_get_all_output_name(c_network, &output_size, nullptr);
  266. std::vector<const char*> input_names(input_size);
  267. std::vector<const char*> output_names(output_size);
  268. LiteTensor c_input_tensor, c_output_tensor;
  269. LITE_get_all_input_name(c_network, nullptr, input_names.data());
  270. LITE_get_all_output_name(c_network, nullptr, output_names.data());
  271. LITE_CAPI_CHECK(
  272. LITE_get_io_tensor(c_network, input_names[0], LITE_IO, &c_input_tensor));
  273. size_t input_length = 0;
  274. LITE_get_tensor_total_size_in_byte(c_input_tensor, &input_length);
  275. size_t data_length_in_byte = lite_tensor->get_tensor_total_size_in_byte();
  276. {
  277. LiteLayout input_layout;
  278. LITE_get_tensor_layout(c_input_tensor, &input_layout);
  279. ASSERT_TRUE(input_layout.data_type == LITE_INT8);
  280. std::vector<int> input_shape = {1, 224, 224, 3};
  281. for (size_t i = 0; i < input_layout.ndim; i++) {
  282. ASSERT_TRUE(input_layout.shapes[i] = input_shape[i]);
  283. }
  284. }
  285. {
  286. int size_attr = 0;
  287. LITE_CAPI_CHECK(LITE_get_tensor_attribute(
  288. c_input_tensor, nullptr, nullptr, &size_attr));
  289. ASSERT_TRUE(size_attr > 0);
  290. const char* keys[size_attr];
  291. void* values[size_attr];
  292. LITE_CAPI_CHECK(
  293. LITE_get_tensor_attribute(c_input_tensor, keys, values, &size_attr));
  294. ASSERT_TRUE(size_attr > 5);
  295. std::unordered_map<std::string, uint32_t> result_map = {
  296. {"zp", 0}, {"index", 0}, {"size_with_stride", 150528},
  297. {"stride", 224}, {"n_size", 150528}, {"n_elems", 150528},
  298. {"qnt_type", 2}, {"n_dims", 4}, {"type", 2},
  299. {"fmt", 1}, {"dims0", 1}, {"dims1", 224},
  300. {"dims2", 224}, {"dims3", 3},
  301. };
  302. for (int i = 0; i < size_attr; i++) {
  303. std::string key(keys[i]);
  304. if (key == "names") {
  305. ASSERT_TRUE(
  306. std::string("input") ==
  307. std::string(static_cast<const char*>(values[i])));
  308. } else if (key == "scale") {
  309. float scale = *static_cast<float*>(values[i]);
  310. ASSERT_TRUE(std::fabs(scale - 0.007812) < 0.00001);
  311. } else if (key == "fl" || key == "pass_through") {
  312. uint8_t val = *static_cast<uint8_t*>(values[i]);
  313. if (key == "fl") {
  314. ASSERT_TRUE(val == 0);
  315. } else {
  316. ASSERT_TRUE(val == 1);
  317. }
  318. } else {
  319. uint32_t val = *static_cast<uint32_t*>(values[i]);
  320. ASSERT_TRUE(result_map[std::string(keys[i])] == val);
  321. }
  322. }
  323. }
  324. const char* keys[] = {"type", "fmt"};
  325. int info_size = SET_INFO_SIZE;
  326. int type = TENSOR_TYPE_UINT8;
  327. int fmt = TENSOR_FORMAT_NHWC;
  328. void* values[] = {static_cast<void*>(&type), static_cast<void*>(&fmt)};
  329. LITE_CAPI_CHECK(
  330. LITE_set_tensor_information(c_input_tensor, keys, values, info_size));
  331. ASSERT_TRUE(
  332. std::string(output_names[0]) ==
  333. std::string("MobilenetV1/Predictions/Reshape_1"));
  334. LITE_CAPI_CHECK(
  335. LITE_get_io_tensor(c_network, output_names[0], LITE_IO, &c_output_tensor));
  336. LITE_CAPI_CHECK(LITE_reset_tensor_memory(
  337. c_input_tensor, lite_tensor->get_memory_ptr(), data_length_in_byte));
  338. LITE_CAPI_CHECK(
  339. LITE_get_io_tensor(c_network, output_names[0], LITE_IO, &c_output_tensor));
  340. // LiteLayout tmp_output_layout;
  341. // LITE_get_tensor_layout(c_output_tensor, &tmp_output_layout);
  342. // tmp_output_layout.data_type = LiteDataType::LITE_FLOAT;
  343. // LITE_set_tensor_layout(c_output_tensor, tmp_output_layout);
  344. {
  345. const char* keys[] = {"want_float"};
  346. uint8_t want_float = 1;
  347. void* values[] = {static_cast<void*>(&want_float)};
  348. LITE_CAPI_CHECK(LITE_set_tensor_information(c_output_tensor, keys, values, 1));
  349. }
  350. LITE_CAPI_CHECK(LITE_forward(c_network));
  351. LITE_CAPI_CHECK(LITE_wait(c_network));
  352. ASSERT_TRUE(std::string(output_names[0]) == "MobilenetV1/Predictions/Reshape_1");
  353. ASSERT_EQ(output_names.size(), 1);
  354. {
  355. LiteLayout output_layout;
  356. LITE_get_tensor_layout(c_output_tensor, &output_layout);
  357. ASSERT_TRUE(output_layout.data_type == LITE_FLOAT);
  358. int size_attr = 0;
  359. LITE_CAPI_CHECK(LITE_get_tensor_attribute(
  360. c_output_tensor, nullptr, nullptr, &size_attr));
  361. ASSERT_TRUE(size_attr > 0);
  362. const char* keys[size_attr];
  363. void* values[size_attr];
  364. LITE_CAPI_CHECK(
  365. LITE_get_tensor_attribute(c_output_tensor, keys, values, &size_attr));
  366. ASSERT_TRUE(size_attr > 5);
  367. std::unordered_map<std::string, uint32_t> result_map = {
  368. {"zp", 0}, {"index", 0}, {"size_with_stride", 2002},
  369. {"stride", 0}, {"n_size", 2002}, {"n_elems", 1001},
  370. {"qnt_type", 2}, {"n_dims", 2}, {"type", 0},
  371. {"fmt", 2}, {"dims0", 1}, {"dims1", 1001},
  372. };
  373. for (int i = 0; i < size_attr; i++) {
  374. std::string key(keys[i]);
  375. if (key == "names") {
  376. ASSERT_TRUE(
  377. "MobilenetV1/Predictions/Reshape_1" ==
  378. std::string(static_cast<const char*>(values[i])));
  379. } else if (key == "scale") {
  380. float scale = *static_cast<float*>(values[i]);
  381. ASSERT_TRUE(std::fabs(scale - 1.0) < 0.00001);
  382. } else if (key == "fl" || key == "pass_through") {
  383. uint8_t val = *static_cast<uint8_t*>(values[i]);
  384. ASSERT_TRUE(val == 0);
  385. } else {
  386. uint32_t val = *static_cast<uint32_t*>(values[i]);
  387. ASSERT_TRUE(result_map[std::string(keys[i])] == val);
  388. }
  389. }
  390. }
  391. {
  392. uint32_t MaxClass[5];
  393. float fMaxProb[5];
  394. void* output_ptr;
  395. LITE_get_tensor_memory(c_output_tensor, &output_ptr);
  396. float* buffer = (float*)output_ptr;
  397. uint32_t sz = true_tensor->get_tensor_total_size_in_byte() / sizeof(float);
  398. GetTop(buffer, fMaxProb, MaxClass, sz, 5);
  399. std::vector<uint32_t> result_class = {
  400. 286, 464, 282, 357, 285,
  401. };
  402. std::vector<float> result_prob = {
  403. 0.407227, 0.365723, 0.090454, 0.018051, 0.013069,
  404. };
  405. for (int i = 0; i < 5; i++) {
  406. ASSERT_TRUE(result_class[i] == MaxClass[i]);
  407. ASSERT_TRUE(std::fabs(result_prob[i] - fMaxProb[i]) < 0.0001);
  408. }
  409. }
  410. {
  411. float* true_data = static_cast<float*>(true_tensor->get_memory_ptr());
  412. void* output_ptr;
  413. LITE_get_tensor_memory(c_output_tensor, &output_ptr);
  414. float* data1 = static_cast<float*>(output_ptr);
  415. size_t length = true_tensor->get_tensor_total_size_in_byte() / sizeof(float);
  416. for (size_t i = 0; i < length; i++) {
  417. ASSERT_LT(std::abs(data1[i] - true_data[i]), 1e-3);
  418. }
  419. }
  420. LITE_destroy_network(c_network);
  421. #undef SET_INFO_SIZE
  422. #undef TENSOR_FORMAT_NHWC
  423. #undef TENSOR_TYPE_UINT8
  424. }
  425. TEST(TestCapiNetWork, rknntest_set_info_two_input) {
  426. #define SET_INFO_SIZE 2
  427. #define TENSOR_TYPE_UINT8 3
  428. #define TENSOR_FORMAT_NHWC 1
  429. LiteConfig config;
  430. config.backend = LiteBackend::LITE_RK_NPU;
  431. config.device_type = LiteDeviceType::LITE_NPU;
  432. config.bare_model_cryption_name = nullptr;
  433. auto lite_tensor = lite::get_input_data("./model/cat_224x224.npy");
  434. auto lite_tensor_dog = lite::get_input_data("./model/dog_224x224.npy");
  435. auto true_tensor = lite::get_input_data("./output_data.npy");
  436. auto rknn_model = "./model/mobilenet_v1.rknn";
  437. LiteNetwork c_network;
  438. LITE_CAPI_CHECK(LITE_make_network_config(&c_network, config));
  439. LITE_CAPI_CHECK(LITE_load_model_from_path(c_network, rknn_model));
  440. size_t input_size, output_size;
  441. LITE_get_all_input_name(c_network, &input_size, nullptr);
  442. LITE_get_all_output_name(c_network, &output_size, nullptr);
  443. std::vector<const char*> input_names(input_size);
  444. std::vector<const char*> output_names(output_size);
  445. LiteTensor c_input_tensor, c_output_tensor;
  446. LITE_get_all_input_name(c_network, nullptr, input_names.data());
  447. LITE_get_all_output_name(c_network, nullptr, output_names.data());
  448. LITE_CAPI_CHECK(
  449. LITE_get_io_tensor(c_network, input_names[0], LITE_IO, &c_input_tensor));
  450. size_t input_length = 0;
  451. LITE_get_tensor_total_size_in_byte(c_input_tensor, &input_length);
  452. size_t data_length_in_byte = lite_tensor->get_tensor_total_size_in_byte();
  453. {
  454. LiteLayout input_layout;
  455. LITE_get_tensor_layout(c_input_tensor, &input_layout);
  456. ASSERT_TRUE(input_layout.data_type == LITE_INT8);
  457. std::vector<int> input_shape = {1, 224, 224, 3};
  458. for (size_t i = 0; i < input_layout.ndim; i++) {
  459. ASSERT_TRUE(input_layout.shapes[i] = input_shape[i]);
  460. }
  461. }
  462. const char* keys[] = {"type", "fmt"};
  463. int info_size = SET_INFO_SIZE;
  464. int type = TENSOR_TYPE_UINT8;
  465. int fmt = TENSOR_FORMAT_NHWC;
  466. void* values[] = {static_cast<void*>(&type), static_cast<void*>(&fmt)};
  467. LITE_CAPI_CHECK(
  468. LITE_set_tensor_information(c_input_tensor, keys, values, info_size));
  469. ASSERT_TRUE(
  470. std::string(output_names[0]) ==
  471. std::string("MobilenetV1/Predictions/Reshape_1"));
  472. LITE_CAPI_CHECK(
  473. LITE_get_io_tensor(c_network, output_names[0], LITE_IO, &c_output_tensor));
  474. LITE_CAPI_CHECK(LITE_reset_tensor_memory(
  475. c_input_tensor, lite_tensor->get_memory_ptr(), data_length_in_byte));
  476. LITE_CAPI_CHECK(
  477. LITE_get_io_tensor(c_network, output_names[0], LITE_IO, &c_output_tensor));
  478. {
  479. const char* keys[] = {"want_float"};
  480. uint8_t want_float = 1;
  481. void* values[] = {static_cast<void*>(&want_float)};
  482. LITE_CAPI_CHECK(LITE_set_tensor_information(c_output_tensor, keys, values, 1));
  483. }
  484. LITE_CAPI_CHECK(LITE_forward(c_network));
  485. LITE_CAPI_CHECK(LITE_wait(c_network));
  486. ASSERT_TRUE(std::string(output_names[0]) == "MobilenetV1/Predictions/Reshape_1");
  487. ASSERT_EQ(output_names.size(), 1);
  488. {
  489. uint32_t MaxClass[5];
  490. float fMaxProb[5];
  491. void* output_ptr;
  492. LITE_get_tensor_memory(c_output_tensor, &output_ptr);
  493. float* buffer = (float*)output_ptr;
  494. uint32_t sz = true_tensor->get_tensor_total_size_in_byte() / sizeof(float);
  495. GetTop(buffer, fMaxProb, MaxClass, sz, 5);
  496. std::vector<uint32_t> result_class = {
  497. 286, 464, 282, 357, 285,
  498. };
  499. std::vector<float> result_prob = {
  500. 0.407227, 0.365723, 0.090454, 0.018051, 0.013069,
  501. };
  502. for (int i = 0; i < 5; i++) {
  503. ASSERT_TRUE(result_class[i] == MaxClass[i]);
  504. ASSERT_TRUE(std::fabs(result_prob[i] - fMaxProb[i]) < 0.0001);
  505. }
  506. }
  507. {
  508. float* true_data = static_cast<float*>(true_tensor->get_memory_ptr());
  509. void* output_ptr;
  510. LITE_get_tensor_memory(c_output_tensor, &output_ptr);
  511. float* data1 = static_cast<float*>(output_ptr);
  512. size_t length = true_tensor->get_tensor_total_size_in_byte() / sizeof(float);
  513. for (size_t i = 0; i < length; i++) {
  514. ASSERT_LT(std::abs(data1[i] - true_data[i]), 1e-3);
  515. }
  516. }
  517. LITE_CAPI_CHECK(LITE_reset_tensor_memory(
  518. c_input_tensor, lite_tensor_dog->get_memory_ptr(), data_length_in_byte));
  519. LITE_CAPI_CHECK(LITE_forward(c_network));
  520. LITE_CAPI_CHECK(LITE_wait(c_network));
  521. ASSERT_TRUE(std::string(output_names[0]) == "MobilenetV1/Predictions/Reshape_1");
  522. ASSERT_EQ(output_names.size(), 1);
  523. {
  524. uint32_t MaxClass[5];
  525. float fMaxProb[5];
  526. void* output_ptr;
  527. LITE_get_tensor_memory(c_output_tensor, &output_ptr);
  528. float* buffer = (float*)output_ptr;
  529. uint32_t sz = true_tensor->get_tensor_total_size_in_byte() / sizeof(float);
  530. GetTop(buffer, fMaxProb, MaxClass, sz, 5);
  531. std::vector<float> result_prob = {
  532. 0.407227, 0.365723, 0.090454, 0.018051, 0.013069,
  533. };
  534. for (int i = 0; i < 5; i++) {
  535. ASSERT_FALSE(std::fabs(result_prob[i] - fMaxProb[i]) < 0.0001);
  536. }
  537. }
  538. LITE_destroy_network(c_network);
  539. #undef SET_INFO_SIZE
  540. #undef TENSOR_FORMAT_NHWC
  541. #undef TENSOR_TYPE_UINT8
  542. }
  543. #endif
  544. TEST(TestCapiNetWork, BasicResetOutput) {
  545. ForwardMgb;
  546. LiteNetwork c_network;
  547. LITE_CAPI_CHECK(LITE_make_default_network(&c_network));
  548. LoadNetwork;
  549. SetInput;
  550. LiteLayout output_layout{{1, 1000}, 2, LiteDataType::LITE_FLOAT};
  551. std::shared_ptr<float> ptr(new float[1000], [](float* ptr) { delete[] ptr; });
  552. const char* output_name;
  553. LITE_CAPI_CHECK(LITE_get_output_name(c_network, 0, &output_name));
  554. LITE_CAPI_CHECK(
  555. LITE_get_io_tensor(c_network, output_name, LITE_IO, &c_output_tensor));
  556. LITE_CAPI_CHECK(LITE_reset_tensor(c_output_tensor, output_layout, ptr.get()));
  557. ForwardNetwork;
  558. EXPECT_TRUE(lite::compare_memory<float>(
  559. ptr.get(), result_mgb->get_memory_ptr(),
  560. result_mgb->get_tensor_total_size_in_byte() / sizeof(float)));
  561. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  562. }
  563. TEST(TestCapiNetWork, BasicInplaceAndSingleThreadAffinity) {
  564. ForwardMgb;
  565. MakeNetwork;
  566. //! config the network with cpu inplace mode
  567. LITE_CAPI_CHECK(LITE_set_cpu_inplace_mode(c_network));
  568. LoadNetwork;
  569. //! set single thread affinith callback
  570. LITE_CAPI_CHECK(
  571. LITE_set_runtime_thread_affinity(c_network, single_thread_affinity));
  572. SetInput;
  573. ForwardNetwork;
  574. ASSERT_EQ(affinity_set, true);
  575. affinity_set = false;
  576. GetOutput;
  577. CompareResult;
  578. LITE_destroy_network(c_network);
  579. }
  580. TEST(TestCapiNetWork, UserAllocator) {
  581. ForwardMgb;
  582. MakeNetwork;
  583. LITE_CAPI_CHECK(LITE_set_memory_allocator(c_network, allocate, free));
  584. LoadNetwork;
  585. SetInput;
  586. ForwardNetwork;
  587. ASSERT_GE(m_nr_allocated, 1);
  588. GetOutput;
  589. CompareResult;
  590. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  591. ASSERT_EQ(m_nr_left, 0);
  592. }
  593. TEST(TestCapiNetWork, BasicMultiThread) {
  594. ForwardMgb;
  595. MakeNetwork;
  596. LITE_CAPI_CHECK(LITE_set_cpu_threads_number(c_network, NUMBER_THREDS));
  597. LoadNetwork;
  598. LITE_CAPI_CHECK(LITE_set_runtime_thread_affinity(c_network, multi_thread_affinity));
  599. SetInput;
  600. ForwardNetwork;
  601. for (size_t i = 0; i < NUMBER_THREDS; i++) {
  602. for (size_t j = i + 1; j < NUMBER_THREDS; j++) {
  603. ASSERT_NE(thread_ids[i], thread_ids[j]);
  604. }
  605. }
  606. for (size_t i = 0; i < NUMBER_THREDS; i++) {
  607. thread_ids[i] = std::thread::id();
  608. }
  609. GetOutput;
  610. CompareResult;
  611. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  612. }
  613. TEST(TestCapiNetWork, DeviceIO) {
  614. ForwardMgb;
  615. LiteNetwork c_network;
  616. LiteIO input_io = default_io;
  617. input_io.is_host = true;
  618. input_io.name = "data";
  619. LiteNetworkIO network_io = *default_network_io();
  620. network_io.inputs = &input_io;
  621. network_io.input_size = 1;
  622. LITE_CAPI_CHECK(LITE_make_network(&c_network, *default_config(), network_io));
  623. LoadNetwork;
  624. SetInput;
  625. ForwardNetwork;
  626. GetOutput;
  627. CompareResult;
  628. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  629. }
  630. TEST(TestCapiNetWork, StartCallBack) {
  631. ForwardMgb;
  632. MakeNetwork;
  633. LoadNetwork;
  634. LITE_CAPI_CHECK(LITE_set_start_callback(c_network, start_callback));
  635. SetInput;
  636. ForwardNetwork;
  637. GetOutput;
  638. CompareResult;
  639. ASSERT_TRUE(start_checked);
  640. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  641. }
  642. TEST(TestCapiNetWork, StartCallBackWithData) {
  643. ForwardMgb;
  644. MakeNetwork;
  645. LoadNetwork;
  646. size_t user_data = 1;
  647. LITE_CAPI_CHECK(LITE_set_start_callback_with_userdata(
  648. c_network, start_callback_with_data, &user_data));
  649. SetInput;
  650. ForwardNetwork;
  651. GetOutput;
  652. CompareResult;
  653. ASSERT_TRUE(start_checked_with_data);
  654. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  655. }
  656. TEST(TestCapiNetWork, FinishCallBack) {
  657. ForwardMgb;
  658. MakeNetwork;
  659. LoadNetwork;
  660. LITE_CAPI_CHECK(LITE_set_finish_callback(c_network, finish_callback));
  661. SetInput;
  662. ForwardNetwork;
  663. GetOutput;
  664. CompareResult;
  665. ASSERT_TRUE(finish_checked);
  666. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  667. }
  668. TEST(TestCapiNetWork, FinishCallBackWtihData) {
  669. ForwardMgb;
  670. MakeNetwork;
  671. LoadNetwork;
  672. size_t user_data = 1;
  673. LITE_CAPI_CHECK(LITE_set_finish_callback_with_userdata(
  674. c_network, finish_callback_with_data, &user_data));
  675. SetInput;
  676. ForwardNetwork;
  677. GetOutput;
  678. CompareResult;
  679. ASSERT_TRUE(finish_checked_with_data);
  680. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  681. }
  682. TEST(TestCapiNetWork, BasicCryptAes) {
  683. ForwardMgb;
  684. LiteConfig c_config = *default_config();
  685. c_config.bare_model_cryption_name = "AES_default";
  686. LiteNetwork c_network;
  687. LITE_CAPI_CHECK(LITE_make_network(&c_network, c_config, *default_network_io()));
  688. std::string model_crypt_path = "./shufflenet_crypt_aes.mge";
  689. LITE_CAPI_CHECK(LITE_load_model_from_path(c_network, model_crypt_path.c_str()));
  690. SetInput;
  691. ForwardNetwork;
  692. GetOutput;
  693. CompareResult;
  694. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  695. }
  696. TEST(TestCapiNetWork, PackedCryptRc4) {
  697. ForwardMgb;
  698. MakeNetwork;
  699. std::string model_crypt_path = "./test_packed_model_rc4.lite";
  700. LITE_CAPI_CHECK(LITE_load_model_from_path(c_network, model_crypt_path.c_str()));
  701. SetInput;
  702. ForwardNetwork;
  703. GetOutput;
  704. CompareResult;
  705. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  706. }
  707. TEST(TestCapiNetWork, AsyncExec) {
  708. finished = false;
  709. ForwardMgb;
  710. LiteNetwork c_network;
  711. LiteConfig c_config = *default_config();
  712. c_config.options.var_sanity_check_first_run = false;
  713. LITE_CAPI_CHECK(LITE_make_network(&c_network, c_config, *default_network_io()));
  714. LITE_CAPI_CHECK(LITE_set_async_callback(c_network, async_callback));
  715. LoadNetwork;
  716. SetInput;
  717. LITE_forward(c_network);
  718. size_t count = 0;
  719. while (finished == false) {
  720. count++;
  721. }
  722. ASSERT_GT(count, 0);
  723. finished = false;
  724. GetOutput;
  725. CompareResult;
  726. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  727. }
  728. TEST(TestCapiNetWork, AsyncExecWithData) {
  729. finished = false;
  730. ForwardMgb;
  731. LiteNetwork c_network;
  732. LiteConfig c_config = *default_config();
  733. c_config.options.var_sanity_check_first_run = false;
  734. LITE_CAPI_CHECK(LITE_make_network(&c_network, c_config, *default_network_io()));
  735. size_t user_data = 1;
  736. LITE_CAPI_CHECK(LITE_set_async_callback_with_userdata(
  737. c_network, async_callback_with_data, &user_data));
  738. LoadNetwork;
  739. SetInput;
  740. LITE_forward(c_network);
  741. size_t count = 0;
  742. while (finished_with_data == false) {
  743. count++;
  744. }
  745. ASSERT_GT(count, 0);
  746. finished_with_data = false;
  747. GetOutput;
  748. CompareResult;
  749. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  750. }
  751. TEST(TestCapiNetWork, OutputShapeOnly) {
  752. ForwardMgb;
  753. LiteNetwork c_network;
  754. LiteNetworkIO c_network_io = *default_network_io();
  755. LiteIO io_output = default_io;
  756. io_output.io_type = LiteIOType::LITE_IO_SHAPE;
  757. io_output.name = "TRUE_DIV(EXP[12065],reduce0[12067])[12077]";
  758. c_network_io.outputs = &io_output;
  759. c_network_io.output_size = 1;
  760. LITE_CAPI_CHECK(LITE_make_network(&c_network, *default_config(), c_network_io));
  761. LoadNetwork;
  762. SetInput;
  763. ForwardNetwork;
  764. GetOutput;
  765. size_t length = 0;
  766. LITE_CAPI_CHECK(LITE_get_tensor_total_size_in_byte(c_output_tensor, &length));
  767. ASSERT_EQ(length / sizeof(float), 1000);
  768. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  769. }
  770. TEST(TestCapiNetWork, ProfileIOdump) {
  771. ForwardMgb;
  772. MakeNetwork;
  773. LITE_CAPI_CHECK(LITE_enable_profile_performance(c_network, "./profile.json"));
  774. LoadNetwork;
  775. SetInput;
  776. ForwardNetwork;
  777. ASSERT_TRUE(fopen("./profile.json", "r"));
  778. LITE_CAPI_CHECK(LITE_enable_io_txt_dump(c_network, "./io_txt_dump.txt"));
  779. ForwardNetwork;
  780. ASSERT_TRUE(fopen("./io_txt_dump.txt", "r"));
  781. GetOutput;
  782. CompareResult;
  783. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  784. }
  785. TEST(TestCapiNetWork, GetDeviceType) {
  786. lite::Config config;
  787. auto lite_tensor = lite::get_input_data("./input_data.npy");
  788. std::string model_path = "./shufflenet.mge";
  789. MakeNetwork;
  790. LoadNetwork;
  791. LiteDeviceType devicetype;
  792. LITE_CAPI_CHECK(LITE_get_device_type(c_network, &devicetype));
  793. ASSERT_TRUE(devicetype == LiteDeviceType::LITE_CPU);
  794. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  795. }
  796. TEST(TestCapiNetWork, GetModelExtraInfo) {
  797. lite::Config config;
  798. std::string model_path = "./track_640_320_pack_model_rc4_with_info.lite";
  799. MakeNetwork;
  800. LITE_load_model_from_path(c_network, model_path.c_str());
  801. const char* info = nullptr;
  802. int info_size = 0;
  803. LITE_CAPI_CHECK(LITE_get_model_extra_info(c_network, &info, &info_size));
  804. ASSERT_TRUE(info_size > 0);
  805. printf("info %s \n", info);
  806. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  807. }
  808. TEST(TestCapiNetWork, TestWorkSpaceLimit) {
  809. lite::Config config;
  810. auto lite_tensor = lite::get_input_data("./input_data.npy");
  811. size_t data_length_in_byte = lite_tensor->get_tensor_total_size_in_byte();
  812. std::string model_path = "./shufflenet.mge";
  813. MakeNetwork;
  814. LoadNetwork;
  815. printf("go to config workspace limit\n");
  816. LITE_CAPI_CHECK(LITE_set_network_algo_workspace_limit(c_network, 1000));
  817. SetInput;
  818. ForwardNetwork;
  819. GetOutput;
  820. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  821. }
  822. TEST(TestCapiNetWork, TestShareWeights) {
  823. ForwardMgb;
  824. MakeNetwork;
  825. LoadNetwork;
  826. SetInput;
  827. ForwardNetwork;
  828. GetOutput;
  829. CompareResult;
  830. LiteNetwork c_network2;
  831. LITE_CAPI_CHECK(
  832. LITE_make_network(&c_network2, *default_config(), *default_network_io()));
  833. LITE_CAPI_CHECK(LITE_set_cpu_inplace_mode(c_network2));
  834. LITE_CAPI_CHECK(LITE_shared_weight_with_network(c_network2, c_network));
  835. int is_cpu_inplace_mode = false;
  836. LITE_CAPI_CHECK(LITE_is_cpu_inplace_mode(c_network2, &is_cpu_inplace_mode));
  837. ASSERT_EQ(is_cpu_inplace_mode, true);
  838. LiteTensor c_input_tensor2, c_output_tensor2;
  839. LITE_CAPI_CHECK(LITE_get_io_tensor(c_network2, "data", LITE_IO, &c_input_tensor2));
  840. LITE_CAPI_CHECK(LITE_reset_tensor_memory(
  841. c_input_tensor2, lite_tensor->get_memory_ptr(),
  842. lite_tensor->get_tensor_total_size_in_byte()));
  843. LITE_CAPI_CHECK(LITE_forward(c_network2));
  844. LITE_CAPI_CHECK(LITE_wait(c_network2));
  845. LITE_CAPI_CHECK(
  846. LITE_get_io_tensor(c_network2, output_name, LITE_IO, &c_output_tensor2));
  847. void* output_ptr2;
  848. LITE_CAPI_CHECK(LITE_get_tensor_memory(c_output_tensor2, &output_ptr2));
  849. EXPECT_TRUE(lite::compare_memory<float>(
  850. output_ptr2, result_mgb->get_memory_ptr(),
  851. result_mgb->get_tensor_total_size_in_byte() / sizeof(float)));
  852. LITE_CAPI_CHECK(LITE_destroy_network(c_network));
  853. LITE_CAPI_CHECK(LITE_destroy_network(c_network2));
  854. }
  855. #endif
  856. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}