You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_tensor_c.cpp 12 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332
  1. /**
  2. * \file test/test_tensor_c.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "lite_build_config.h"
  12. #if LITE_BUILD_WITH_MGE
  13. #include "../src/misc.h"
  14. #include "lite-c/global_c.h"
  15. #include "lite-c/tensor_c.h"
  16. #include <gtest/gtest.h>
  17. #include <memory>
  18. #include <thread>
  19. TEST(TestCapiTensor, Basic) {
  20. LiteTensor c_tensor0, c_tensor1;
  21. LiteTensorDesc description = default_desc;
  22. LITE_make_tensor(description, &c_tensor0);
  23. int is_pinned_host = false;
  24. LITE_is_pinned_host(c_tensor0, &is_pinned_host);
  25. ASSERT_FALSE(is_pinned_host);
  26. LiteDeviceType device_type;
  27. LITE_get_tensor_device_type(c_tensor0, &device_type);
  28. ASSERT_EQ(device_type, LiteDeviceType::LITE_CPU);
  29. size_t length = 0;
  30. LITE_get_tensor_total_size_in_byte(c_tensor0, &length);
  31. ASSERT_EQ(length, 0);
  32. LiteLayout layout{{1, 3, 224, 224}, 4, LiteDataType::LITE_FLOAT};
  33. description.device_type = LiteDeviceType::LITE_CPU;
  34. description.layout = layout;
  35. description.is_pinned_host = true;
  36. LITE_make_tensor(description, &c_tensor1);
  37. LITE_is_pinned_host(c_tensor1, &is_pinned_host);
  38. ASSERT_TRUE(is_pinned_host);
  39. LITE_get_tensor_total_size_in_byte(c_tensor1, &length);
  40. ASSERT_EQ(length, 1 * 3 * 224 * 224 * 4);
  41. LiteLayout get_layout;
  42. LITE_get_tensor_layout(c_tensor1, &get_layout);
  43. ASSERT_EQ(get_layout.ndim, layout.ndim);
  44. ASSERT_EQ(get_layout.data_type, layout.data_type);
  45. ASSERT_EQ(get_layout.shapes[0], layout.shapes[0]);
  46. ASSERT_EQ(get_layout.shapes[1], layout.shapes[1]);
  47. ASSERT_EQ(get_layout.shapes[2], layout.shapes[2]);
  48. ASSERT_EQ(get_layout.shapes[3], layout.shapes[3]);
  49. //! test error
  50. ASSERT_EQ(LITE_is_pinned_host(c_tensor0, nullptr), -1);
  51. ASSERT_NE(strlen(LITE_get_last_error()), 0);
  52. ASSERT_EQ(LITE_get_last_error_code(), ErrorCode::LITE_INTERNAL_ERROR);
  53. printf("The last error is: %s\n", LITE_get_last_error());
  54. LITE_clear_last_error();
  55. ASSERT_EQ(strlen(LITE_get_last_error()), 0);
  56. ASSERT_EQ(LITE_get_last_error_code(), ErrorCode::OK);
  57. LITE_destroy_tensor(c_tensor0);
  58. LITE_destroy_tensor(c_tensor1);
  59. }
  60. TEST(TestCapiTensor, SetLayoutReAlloc) {
  61. LiteTensor c_tensor0;
  62. LiteTensorDesc description = default_desc;
  63. description.layout = LiteLayout{{1, 3, 224, 224}, 4, LiteDataType::LITE_FLOAT};
  64. LITE_make_tensor(description, &c_tensor0);
  65. void *old_ptr, *new_ptr;
  66. LITE_get_tensor_memory(c_tensor0, &old_ptr);
  67. LiteLayout new_layout = LiteLayout{{1, 3, 100, 100}, 4, LiteDataType::LITE_INT8};
  68. LITE_set_tensor_layout(c_tensor0, new_layout);
  69. LITE_get_tensor_memory(c_tensor0, &new_ptr);
  70. size_t length = 0;
  71. LITE_get_tensor_total_size_in_byte(c_tensor0, &length);
  72. ASSERT_EQ(length, 1 * 3 * 100 * 100);
  73. ASSERT_EQ(old_ptr, new_ptr);
  74. }
  75. TEST(TestCapiTensor, Reset) {
  76. LiteTensor c_tensor0, c_tensor1;
  77. LiteTensorDesc description = default_desc;
  78. description.layout = LiteLayout{{3, 20}, 2, LiteDataType::LITE_FLOAT};
  79. LITE_make_tensor(description, &c_tensor0);
  80. LITE_make_tensor(description, &c_tensor1);
  81. void *old_ptr0, *old_ptr1;
  82. LITE_get_tensor_memory(c_tensor0, &old_ptr0);
  83. LITE_get_tensor_memory(c_tensor1, &old_ptr1);
  84. //! make sure memory is allocted
  85. ASSERT_NO_THROW(memcpy(old_ptr0, old_ptr1, 3 * 20 * 4));
  86. std::shared_ptr<float> new_ptr0(
  87. new float[3 * 20], [](float* ptr) { delete[] ptr; });
  88. std::shared_ptr<float> new_ptr1(
  89. new float[3 * 20], [](float* ptr) { delete[] ptr; });
  90. LITE_reset_tensor_memory(c_tensor0, new_ptr0.get(), 3 * 20 * 4);
  91. LITE_reset_tensor_memory(c_tensor1, new_ptr1.get(), 3 * 20 * 4);
  92. void *tmp_ptr0, *tmp_ptr1;
  93. LITE_get_tensor_memory(c_tensor0, &tmp_ptr0);
  94. LITE_get_tensor_memory(c_tensor1, &tmp_ptr1);
  95. ASSERT_EQ(tmp_ptr0, new_ptr0.get());
  96. ASSERT_EQ(tmp_ptr1, new_ptr1.get());
  97. ASSERT_NO_THROW(memcpy(new_ptr0.get(), new_ptr1.get(), 3 * 20 * 4));
  98. LiteLayout layout1{{6, 20}, 2, LiteDataType::LITE_FLOAT};
  99. std::shared_ptr<float> ptr2(new float[6 * 20], [](float* ptr) { delete[] ptr; });
  100. std::shared_ptr<float> ptr3(new float[6 * 20], [](float* ptr) { delete[] ptr; });
  101. LITE_reset_tensor(c_tensor0, layout1, new_ptr0.get());
  102. LITE_reset_tensor(c_tensor1, layout1, new_ptr1.get());
  103. //! memory is not freed by Tensor reset
  104. ASSERT_NO_THROW(memcpy(new_ptr0.get(), new_ptr1.get(), 3 * 20 * 4));
  105. LiteLayout tmp_layout0, tmp_layout1;
  106. LITE_get_tensor_layout(c_tensor0, &tmp_layout0);
  107. LITE_get_tensor_layout(c_tensor1, &tmp_layout1);
  108. ASSERT_EQ(tmp_layout0.ndim, tmp_layout1.ndim);
  109. ASSERT_EQ(tmp_layout0.data_type, tmp_layout1.data_type);
  110. ASSERT_EQ(tmp_layout0.shapes[0], tmp_layout1.shapes[0]);
  111. ASSERT_EQ(tmp_layout0.shapes[1], tmp_layout1.shapes[1]);
  112. LITE_destroy_tensor(c_tensor0);
  113. LITE_destroy_tensor(c_tensor1);
  114. }
  115. TEST(TestCapiTensor, CrossCNCopy) {
  116. LiteTensor c_tensor0, c_tensor1, c_tensor2;
  117. LiteTensorDesc description = default_desc;
  118. LITE_make_tensor(description, &c_tensor0);
  119. description.layout = LiteLayout{{1, 3, 224, 224}, 4, LiteDataType::LITE_FLOAT};
  120. LITE_make_tensor(description, &c_tensor1);
  121. LITE_make_tensor(description, &c_tensor2);
  122. LITE_tensor_copy(c_tensor1, c_tensor2);
  123. LITE_tensor_copy(c_tensor2, c_tensor1);
  124. void *old_ptr1, *old_ptr2, *new_ptr1, *new_ptr2;
  125. LITE_get_tensor_memory(c_tensor1, &old_ptr1);
  126. LITE_get_tensor_memory(c_tensor2, &old_ptr2);
  127. //! test source tenor is empty
  128. ASSERT_EQ(LITE_tensor_copy(c_tensor1, c_tensor0), -1);
  129. ASSERT_NE(strlen(LITE_get_last_error()), 0);
  130. printf("The last error is: %s\n", LITE_get_last_error());
  131. LITE_tensor_copy(c_tensor0, c_tensor1);
  132. LITE_tensor_copy(c_tensor1, c_tensor2);
  133. LITE_tensor_copy(c_tensor2, c_tensor0);
  134. LITE_get_tensor_memory(c_tensor1, &new_ptr1);
  135. LITE_get_tensor_memory(c_tensor2, &new_ptr2);
  136. ASSERT_EQ(old_ptr1, new_ptr1);
  137. ASSERT_EQ(old_ptr2, new_ptr2);
  138. LITE_destroy_tensor(c_tensor0);
  139. LITE_destroy_tensor(c_tensor1);
  140. LITE_destroy_tensor(c_tensor2);
  141. }
  142. TEST(TestCapiTensor, ShareMemoryWith) {
  143. LiteTensor c_tensor0, c_tensor1;
  144. LiteTensorDesc description = default_desc;
  145. LITE_make_tensor(description, &c_tensor0);
  146. description.layout = LiteLayout{{1, 3, 224, 224}, 4, LiteDataType::LITE_FLOAT};
  147. LITE_make_tensor(description, &c_tensor1);
  148. ASSERT_EQ(LITE_tensor_share_memory_with(c_tensor1, c_tensor0), -1);
  149. LITE_tensor_share_memory_with(c_tensor0, c_tensor1);
  150. void *ptr0, *ptr1;
  151. LITE_get_tensor_memory(c_tensor0, &ptr0);
  152. LITE_get_tensor_memory(c_tensor1, &ptr1);
  153. ASSERT_EQ(ptr0, ptr1);
  154. LITE_destroy_tensor(c_tensor0);
  155. LITE_destroy_tensor(c_tensor1);
  156. }
  157. TEST(TestCapiTensor, Reshape) {
  158. LiteTensor c_tensor0;
  159. LiteTensorDesc description = default_desc;
  160. description.layout = LiteLayout{{8, 8, 100, 100}, 4, LiteDataType::LITE_FLOAT};
  161. LITE_make_tensor(description, &c_tensor0);
  162. void* old_ptr;
  163. LITE_get_tensor_memory(c_tensor0, &old_ptr);
  164. auto check = [&](std::vector<size_t> expect, const LiteTensor& tensor) {
  165. LiteLayout get_layout;
  166. LITE_get_tensor_layout(tensor, &get_layout);
  167. ASSERT_EQ(get_layout.ndim, expect.size());
  168. for (size_t i = 0; i < expect.size(); i++) {
  169. ASSERT_EQ(get_layout.shapes[i], expect[i]);
  170. }
  171. void* new_ptr;
  172. LITE_get_tensor_memory(tensor, &new_ptr);
  173. ASSERT_EQ(old_ptr, new_ptr);
  174. };
  175. {
  176. int shape[2] = {-1, 50};
  177. LITE_tensor_reshape(c_tensor0, shape, 2);
  178. check({8 * 8 * 100 * 2, 50}, c_tensor0);
  179. }
  180. {
  181. int shape[3] = {64, 100, 100};
  182. LITE_tensor_reshape(c_tensor0, shape, 3);
  183. check({8 * 8, 100, 100}, c_tensor0);
  184. }
  185. {
  186. int shape[3] = {16, 100, -1};
  187. LITE_tensor_reshape(c_tensor0, shape, 3);
  188. check({16, 100, 400}, c_tensor0);
  189. }
  190. LITE_destroy_tensor(c_tensor0);
  191. }
  192. TEST(TestCapiTensor, Slice) {
  193. LiteTensor c_tensor0;
  194. LiteTensorDesc description = default_desc;
  195. description.layout = LiteLayout{{20, 20}, 2, LiteDataType::LITE_FLOAT};
  196. LITE_make_tensor(description, &c_tensor0);
  197. void* old_ptr;
  198. LITE_get_tensor_memory(c_tensor0, &old_ptr);
  199. for (size_t i = 0; i < 20 * 20; i++) {
  200. *(static_cast<float*>(old_ptr) + i) = i;
  201. }
  202. auto check = [&](size_t start, size_t end, size_t step, bool have_step) {
  203. LiteTensor tensor, slice_tensor;
  204. LITE_make_tensor(default_desc, &tensor);
  205. size_t start_ptr[2] = {start, start};
  206. size_t end_ptr[2] = {end, end};
  207. size_t step_ptr[2] = {step, step};
  208. if (have_step) {
  209. LITE_tensor_slice(
  210. c_tensor0, start_ptr, end_ptr, step_ptr, 2, &slice_tensor);
  211. } else {
  212. LITE_tensor_slice(c_tensor0, start_ptr, end_ptr, nullptr, 2, &slice_tensor);
  213. }
  214. int is_continue = true;
  215. LITE_is_memory_continue(slice_tensor, &is_continue);
  216. ASSERT_FALSE(is_continue);
  217. LITE_tensor_copy(tensor, slice_tensor);
  218. void* new_ptr;
  219. LITE_get_tensor_memory(tensor, &new_ptr);
  220. float* ptr = static_cast<float*>(new_ptr);
  221. for (size_t i = start; i < end; i += step) {
  222. for (size_t j = start; j < end; j += step) {
  223. ASSERT_EQ(float(i * 20 + j), *ptr);
  224. ++ptr;
  225. }
  226. }
  227. LITE_destroy_tensor(tensor);
  228. };
  229. check(1, 8, 1, true);
  230. check(1, 8, 1, false);
  231. check(2, 10, 2, true);
  232. check(10, 18, 4, true);
  233. check(10, 18, 1, false);
  234. LITE_destroy_tensor(c_tensor0);
  235. }
  236. TEST(TestCapiTensor, Memset) {
  237. LiteTensor c_tensor0;
  238. LiteTensorDesc description = default_desc;
  239. description.layout = LiteLayout{{20, 20}, 2, LiteDataType::LITE_FLOAT};
  240. LITE_make_tensor(description, &c_tensor0);
  241. void* ptr;
  242. uint8_t* uint8_ptr;
  243. LITE_get_tensor_memory(c_tensor0, &ptr);
  244. LITE_tensor_fill_zero(c_tensor0);
  245. uint8_ptr = static_cast<uint8_t*>(ptr);
  246. for (size_t i = 0; i < 20 * 20; i++) {
  247. ASSERT_EQ(0, *uint8_ptr);
  248. uint8_ptr++;
  249. }
  250. LITE_destroy_tensor(c_tensor0);
  251. }
  252. TEST(TestCapiTensor, GetMemoryByIndex) {
  253. LiteTensor c_tensor0;
  254. LiteTensorDesc description = default_desc;
  255. description.layout = LiteLayout{{20, 20}, 2, LiteDataType::LITE_FLOAT};
  256. LITE_make_tensor(description, &c_tensor0);
  257. void *ptr0, *ptr1, *ptr2, *ptr3;
  258. LITE_get_tensor_memory(c_tensor0, &ptr0);
  259. size_t index0[] = {3, 4};
  260. LITE_get_tensor_memory_with_index(c_tensor0, &index0[0], 2, &ptr1);
  261. size_t index1[] = {5, 7};
  262. LITE_get_tensor_memory_with_index(c_tensor0, &index1[0], 2, &ptr2);
  263. size_t index2[] = {5};
  264. LITE_get_tensor_memory_with_index(c_tensor0, &index2[0], 1, &ptr3);
  265. ASSERT_EQ(ptr1, static_cast<float*>(ptr0) + 3 * 20 + 4);
  266. ASSERT_EQ(ptr2, static_cast<float*>(ptr0) + 5 * 20 + 7);
  267. ASSERT_EQ(ptr3, static_cast<float*>(ptr0) + 5 * 20);
  268. LITE_destroy_tensor(c_tensor0);
  269. }
  270. TEST(TestCapiTensor, ThreadLocalError) {
  271. LiteTensor c_tensor0;
  272. LiteTensorDesc description = default_desc;
  273. description.layout = LiteLayout{{20, 20}, 2, LiteDataType::LITE_FLOAT};
  274. void *ptr0, *ptr1;
  275. std::thread thread1([&]() {
  276. LITE_make_tensor(description, &c_tensor0);
  277. LITE_get_tensor_memory(c_tensor0, &ptr0);
  278. });
  279. thread1.join();
  280. std::thread thread2([&]() {
  281. LITE_get_tensor_memory(c_tensor0, &ptr1);
  282. LITE_destroy_tensor(c_tensor0);
  283. });
  284. thread2.join();
  285. }
  286. #endif
  287. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}