You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_tensor_c.cpp 11 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. /**
  2. * \file test/test_tensor_c.cpp
  3. *
  4. * This file is part of MegEngine, a deep learning framework developed by
  5. * Megvii.
  6. *
  7. * \copyright Copyright (c) 2020-2021 Megvii Inc. All rights reserved.
  8. */
  9. #include "lite_build_config.h"
  10. #if LITE_BUILD_WITH_MGE
  11. #include "../src/misc.h"
  12. #include "lite-c/global_c.h"
  13. #include "lite-c/tensor_c.h"
  14. #include <gtest/gtest.h>
  15. #include <memory>
  16. TEST(TestCapiTensor, Basic) {
  17. LiteTensor c_tensor0, c_tensor1;
  18. LiteTensorDesc description = default_desc;
  19. LITE_make_tensor(description, &c_tensor0);
  20. int is_pinned_host = false;
  21. LITE_is_pinned_host(c_tensor0, &is_pinned_host);
  22. ASSERT_FALSE(is_pinned_host);
  23. LiteDeviceType device_type;
  24. LITE_get_tensor_device_type(c_tensor0, &device_type);
  25. ASSERT_EQ(device_type, LiteDeviceType::LITE_CPU);
  26. size_t length = 0;
  27. LITE_get_tensor_total_size_in_byte(c_tensor0, &length);
  28. ASSERT_EQ(length, 0);
  29. LiteLayout layout{{1, 3, 224, 224}, 4, LiteDataType::LITE_FLOAT};
  30. description.device_type = LiteDeviceType::LITE_CPU;
  31. description.layout = layout;
  32. description.is_pinned_host = true;
  33. LITE_make_tensor(description, &c_tensor1);
  34. LITE_is_pinned_host(c_tensor1, &is_pinned_host);
  35. ASSERT_TRUE(is_pinned_host);
  36. LITE_get_tensor_total_size_in_byte(c_tensor1, &length);
  37. ASSERT_EQ(length, 1 * 3 * 224 * 224 * 4);
  38. LiteLayout get_layout;
  39. LITE_get_tensor_layout(c_tensor1, &get_layout);
  40. ASSERT_EQ(get_layout.ndim, layout.ndim);
  41. ASSERT_EQ(get_layout.data_type, layout.data_type);
  42. ASSERT_EQ(get_layout.shapes[0], layout.shapes[0]);
  43. ASSERT_EQ(get_layout.shapes[1], layout.shapes[1]);
  44. ASSERT_EQ(get_layout.shapes[2], layout.shapes[2]);
  45. ASSERT_EQ(get_layout.shapes[3], layout.shapes[3]);
  46. //! test error
  47. ASSERT_EQ(LITE_is_pinned_host(c_tensor0, nullptr), -1);
  48. ASSERT_NE(strlen(LITE_get_last_error()), 0);
  49. printf("The last error is: %s\n", LITE_get_last_error());
  50. LITE_destroy_tensor(c_tensor0);
  51. LITE_destroy_tensor(c_tensor1);
  52. }
  53. TEST(TestCapiTensor, SetLayoutReAlloc) {
  54. LiteTensor c_tensor0;
  55. LiteTensorDesc description = default_desc;
  56. description.layout =
  57. LiteLayout{{1, 3, 224, 224}, 4, LiteDataType::LITE_FLOAT};
  58. LITE_make_tensor(description, &c_tensor0);
  59. void *old_ptr, *new_ptr;
  60. LITE_get_tensor_memory(c_tensor0, &old_ptr);
  61. LiteLayout new_layout =
  62. LiteLayout{{1, 3, 100, 100}, 4, LiteDataType::LITE_INT8};
  63. LITE_set_tensor_layout(c_tensor0, new_layout);
  64. LITE_get_tensor_memory(c_tensor0, &new_ptr);
  65. size_t length = 0;
  66. LITE_get_tensor_total_size_in_byte(c_tensor0, &length);
  67. ASSERT_EQ(length, 1 * 3 * 100 * 100);
  68. ASSERT_EQ(old_ptr, new_ptr);
  69. }
  70. TEST(TestCapiTensor, Reset) {
  71. LiteTensor c_tensor0, c_tensor1;
  72. LiteTensorDesc description = default_desc;
  73. description.layout = LiteLayout{{3, 20}, 2, LiteDataType::LITE_FLOAT};
  74. LITE_make_tensor(description, &c_tensor0);
  75. LITE_make_tensor(description, &c_tensor1);
  76. void *old_ptr0, *old_ptr1;
  77. LITE_get_tensor_memory(c_tensor0, &old_ptr0);
  78. LITE_get_tensor_memory(c_tensor1, &old_ptr1);
  79. //! make sure memory is allocted
  80. ASSERT_NO_THROW(memcpy(old_ptr0, old_ptr1, 3 * 20 * 4));
  81. std::shared_ptr<float> new_ptr0(new float[3 * 20],
  82. [](float* ptr) { delete[] ptr; });
  83. std::shared_ptr<float> new_ptr1(new float[3 * 20],
  84. [](float* ptr) { delete[] ptr; });
  85. LITE_reset_tensor_memory(c_tensor0, new_ptr0.get(), 3 * 20 * 4);
  86. LITE_reset_tensor_memory(c_tensor1, new_ptr1.get(), 3 * 20 * 4);
  87. void *tmp_ptr0, *tmp_ptr1;
  88. LITE_get_tensor_memory(c_tensor0, &tmp_ptr0);
  89. LITE_get_tensor_memory(c_tensor1, &tmp_ptr1);
  90. ASSERT_EQ(tmp_ptr0, new_ptr0.get());
  91. ASSERT_EQ(tmp_ptr1, new_ptr1.get());
  92. ASSERT_NO_THROW(memcpy(new_ptr0.get(), new_ptr1.get(), 3 * 20 * 4));
  93. LiteLayout layout1{{6, 20}, 2, LiteDataType::LITE_FLOAT};
  94. std::shared_ptr<float> ptr2(new float[6 * 20],
  95. [](float* ptr) { delete[] ptr; });
  96. std::shared_ptr<float> ptr3(new float[6 * 20],
  97. [](float* ptr) { delete[] ptr; });
  98. LITE_reset_tensor(c_tensor0, layout1, new_ptr0.get());
  99. LITE_reset_tensor(c_tensor1, layout1, new_ptr1.get());
  100. //! memory is not freed by Tensor reset
  101. ASSERT_NO_THROW(memcpy(new_ptr0.get(), new_ptr1.get(), 3 * 20 * 4));
  102. LiteLayout tmp_layout0, tmp_layout1;
  103. LITE_get_tensor_layout(c_tensor0, &tmp_layout0);
  104. LITE_get_tensor_layout(c_tensor1, &tmp_layout1);
  105. ASSERT_EQ(tmp_layout0.ndim, tmp_layout1.ndim);
  106. ASSERT_EQ(tmp_layout0.data_type, tmp_layout1.data_type);
  107. ASSERT_EQ(tmp_layout0.shapes[0], tmp_layout1.shapes[0]);
  108. ASSERT_EQ(tmp_layout0.shapes[1], tmp_layout1.shapes[1]);
  109. LITE_destroy_tensor(c_tensor0);
  110. LITE_destroy_tensor(c_tensor1);
  111. }
  112. TEST(TestCapiTensor, CrossCNCopy) {
  113. LiteTensor c_tensor0, c_tensor1, c_tensor2;
  114. LiteTensorDesc description = default_desc;
  115. LITE_make_tensor(description, &c_tensor0);
  116. description.layout =
  117. LiteLayout{{1, 3, 224, 224}, 4, LiteDataType::LITE_FLOAT};
  118. LITE_make_tensor(description, &c_tensor1);
  119. LITE_make_tensor(description, &c_tensor2);
  120. LITE_tensor_copy(c_tensor1, c_tensor2);
  121. LITE_tensor_copy(c_tensor2, c_tensor1);
  122. void *old_ptr1, *old_ptr2, *new_ptr1, *new_ptr2;
  123. LITE_get_tensor_memory(c_tensor1, &old_ptr1);
  124. LITE_get_tensor_memory(c_tensor2, &old_ptr2);
  125. //! test source tenor is empty
  126. ASSERT_EQ(LITE_tensor_copy(c_tensor1, c_tensor0), -1);
  127. ASSERT_NE(strlen(LITE_get_last_error()), 0);
  128. printf("The last error is: %s\n", LITE_get_last_error());
  129. LITE_tensor_copy(c_tensor0, c_tensor1);
  130. LITE_tensor_copy(c_tensor1, c_tensor2);
  131. LITE_tensor_copy(c_tensor2, c_tensor0);
  132. LITE_get_tensor_memory(c_tensor1, &new_ptr1);
  133. LITE_get_tensor_memory(c_tensor2, &new_ptr2);
  134. ASSERT_EQ(old_ptr1, new_ptr1);
  135. ASSERT_EQ(old_ptr2, new_ptr2);
  136. LITE_destroy_tensor(c_tensor0);
  137. LITE_destroy_tensor(c_tensor1);
  138. LITE_destroy_tensor(c_tensor2);
  139. }
  140. TEST(TestCapiTensor, ShareMemoryWith) {
  141. LiteTensor c_tensor0, c_tensor1;
  142. LiteTensorDesc description = default_desc;
  143. LITE_make_tensor(description, &c_tensor0);
  144. description.layout =
  145. LiteLayout{{1, 3, 224, 224}, 4, LiteDataType::LITE_FLOAT};
  146. LITE_make_tensor(description, &c_tensor1);
  147. ASSERT_EQ(LITE_tensor_share_memory_with(c_tensor1, c_tensor0), -1);
  148. LITE_tensor_share_memory_with(c_tensor0, c_tensor1);
  149. void *ptr0, *ptr1;
  150. LITE_get_tensor_memory(c_tensor0, &ptr0);
  151. LITE_get_tensor_memory(c_tensor1, &ptr1);
  152. ASSERT_EQ(ptr0, ptr1);
  153. LITE_destroy_tensor(c_tensor0);
  154. LITE_destroy_tensor(c_tensor1);
  155. }
  156. TEST(TestCapiTensor, Reshape) {
  157. LiteTensor c_tensor0;
  158. LiteTensorDesc description = default_desc;
  159. description.layout =
  160. LiteLayout{{8, 8, 100, 100}, 4, LiteDataType::LITE_FLOAT};
  161. LITE_make_tensor(description, &c_tensor0);
  162. void* old_ptr;
  163. LITE_get_tensor_memory(c_tensor0, &old_ptr);
  164. auto check = [&](std::vector<size_t> expect, const LiteTensor& tensor) {
  165. LiteLayout get_layout;
  166. LITE_get_tensor_layout(tensor, &get_layout);
  167. ASSERT_EQ(get_layout.ndim, expect.size());
  168. for (size_t i = 0; i < expect.size(); i++) {
  169. ASSERT_EQ(get_layout.shapes[i], expect[i]);
  170. }
  171. void* new_ptr;
  172. LITE_get_tensor_memory(tensor, &new_ptr);
  173. ASSERT_EQ(old_ptr, new_ptr);
  174. };
  175. {
  176. int shape[2] = {-1, 50};
  177. LITE_tensor_reshape(c_tensor0, shape, 2);
  178. check({8 * 8 * 100 * 2, 50}, c_tensor0);
  179. }
  180. {
  181. int shape[3] = {64, 100, 100};
  182. LITE_tensor_reshape(c_tensor0, shape, 3);
  183. check({8 * 8, 100, 100}, c_tensor0);
  184. }
  185. {
  186. int shape[3] = {16, 100, -1};
  187. LITE_tensor_reshape(c_tensor0, shape, 3);
  188. check({16, 100, 400}, c_tensor0);
  189. }
  190. LITE_destroy_tensor(c_tensor0);
  191. }
  192. TEST(TestCapiTensor, Slice) {
  193. LiteTensor c_tensor0;
  194. LiteTensorDesc description = default_desc;
  195. description.layout = LiteLayout{{20, 20}, 2, LiteDataType::LITE_FLOAT};
  196. LITE_make_tensor(description, &c_tensor0);
  197. void* old_ptr;
  198. LITE_get_tensor_memory(c_tensor0, &old_ptr);
  199. for (size_t i = 0; i < 20 * 20; i++) {
  200. *(static_cast<float*>(old_ptr) + i) = i;
  201. }
  202. auto check = [&](size_t start, size_t end, size_t step, bool have_step) {
  203. LiteTensor tensor, slice_tensor;
  204. LITE_make_tensor(default_desc, &tensor);
  205. size_t start_ptr[2] = {start, start};
  206. size_t end_ptr[2] = {end, end};
  207. size_t step_ptr[2] = {step, step};
  208. if (have_step) {
  209. LITE_tensor_slice(c_tensor0, start_ptr, end_ptr, step_ptr, 2,
  210. &slice_tensor);
  211. } else {
  212. LITE_tensor_slice(c_tensor0, start_ptr, end_ptr, nullptr, 2,
  213. &slice_tensor);
  214. }
  215. int is_continue = true;
  216. LITE_is_memory_continue(slice_tensor, &is_continue);
  217. ASSERT_FALSE(is_continue);
  218. LITE_tensor_copy(tensor, slice_tensor);
  219. void* new_ptr;
  220. LITE_get_tensor_memory(tensor, &new_ptr);
  221. float* ptr = static_cast<float*>(new_ptr);
  222. for (size_t i = start; i < end; i += step) {
  223. for (size_t j = start; j < end; j += step) {
  224. ASSERT_EQ(float(i * 20 + j), *ptr);
  225. ++ptr;
  226. }
  227. }
  228. LITE_destroy_tensor(tensor);
  229. };
  230. check(1, 8, 1, true);
  231. check(1, 8, 1, false);
  232. check(2, 10, 2, true);
  233. check(10, 18, 4, true);
  234. check(10, 18, 1, false);
  235. LITE_destroy_tensor(c_tensor0);
  236. }
  237. TEST(TestCapiTensor, Memset) {
  238. LiteTensor c_tensor0;
  239. LiteTensorDesc description = default_desc;
  240. description.layout = LiteLayout{{20, 20}, 2, LiteDataType::LITE_FLOAT};
  241. LITE_make_tensor(description, &c_tensor0);
  242. void* ptr;
  243. uint8_t* uint8_ptr;
  244. LITE_get_tensor_memory(c_tensor0, &ptr);
  245. LITE_tensor_fill_zero(c_tensor0);
  246. uint8_ptr = static_cast<uint8_t*>(ptr);
  247. for (size_t i = 0; i < 20 * 20; i++) {
  248. ASSERT_EQ(0, *uint8_ptr);
  249. uint8_ptr++;
  250. }
  251. LITE_destroy_tensor(c_tensor0);
  252. }
  253. TEST(TestCapiTensor, GetMemoryByIndex) {
  254. LiteTensor c_tensor0;
  255. LiteTensorDesc description = default_desc;
  256. description.layout = LiteLayout{{20, 20}, 2, LiteDataType::LITE_FLOAT};
  257. LITE_make_tensor(description, &c_tensor0);
  258. void *ptr0, *ptr1, *ptr2, *ptr3;
  259. LITE_get_tensor_memory(c_tensor0, &ptr0);
  260. size_t index0[] = {3, 4};
  261. LITE_get_tensor_memory_with_index(c_tensor0, &index0[0], 2, &ptr1);
  262. size_t index1[] = {5, 7};
  263. LITE_get_tensor_memory_with_index(c_tensor0, &index1[0], 2, &ptr2);
  264. size_t index2[] = {5};
  265. LITE_get_tensor_memory_with_index(c_tensor0, &index2[0], 1, &ptr3);
  266. ASSERT_EQ(ptr1, static_cast<float*>(ptr0) + 3 * 20 + 4);
  267. ASSERT_EQ(ptr2, static_cast<float*>(ptr0) + 5 * 20 + 7);
  268. ASSERT_EQ(ptr3, static_cast<float*>(ptr0) + 5 * 20);
  269. LITE_destroy_tensor(c_tensor0);
  270. }
  271. #endif
  272. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台