You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

test_tensor_c.cpp 11 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. /**
  2. * \file test/test_tensor_c.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "lite_build_config.h"
  12. #if LITE_BUILD_WITH_MGE
  13. #include "../src/misc.h"
  14. #include "lite-c/global_c.h"
  15. #include "lite-c/tensor_c.h"
  16. #include <gtest/gtest.h>
  17. #include <memory>
  18. TEST(TestCapiTensor, Basic) {
  19. LiteTensor c_tensor0, c_tensor1;
  20. LiteTensorDesc description = default_desc;
  21. LITE_make_tensor(description, &c_tensor0);
  22. int is_pinned_host = false;
  23. LITE_is_pinned_host(c_tensor0, &is_pinned_host);
  24. ASSERT_FALSE(is_pinned_host);
  25. LiteDeviceType device_type;
  26. LITE_get_tensor_device_type(c_tensor0, &device_type);
  27. ASSERT_EQ(device_type, LiteDeviceType::LITE_CPU);
  28. size_t length = 0;
  29. LITE_get_tensor_total_size_in_byte(c_tensor0, &length);
  30. ASSERT_EQ(length, 0);
  31. LiteLayout layout{{1, 3, 224, 224}, 4, LiteDataType::LITE_FLOAT};
  32. description.device_type = LiteDeviceType::LITE_CPU;
  33. description.layout = layout;
  34. description.is_pinned_host = true;
  35. LITE_make_tensor(description, &c_tensor1);
  36. LITE_is_pinned_host(c_tensor1, &is_pinned_host);
  37. ASSERT_TRUE(is_pinned_host);
  38. LITE_get_tensor_total_size_in_byte(c_tensor1, &length);
  39. ASSERT_EQ(length, 1 * 3 * 224 * 224 * 4);
  40. LiteLayout get_layout;
  41. LITE_get_tensor_layout(c_tensor1, &get_layout);
  42. ASSERT_EQ(get_layout.ndim, layout.ndim);
  43. ASSERT_EQ(get_layout.data_type, layout.data_type);
  44. ASSERT_EQ(get_layout.shapes[0], layout.shapes[0]);
  45. ASSERT_EQ(get_layout.shapes[1], layout.shapes[1]);
  46. ASSERT_EQ(get_layout.shapes[2], layout.shapes[2]);
  47. ASSERT_EQ(get_layout.shapes[3], layout.shapes[3]);
  48. //! test error
  49. ASSERT_EQ(LITE_is_pinned_host(c_tensor0, nullptr), -1);
  50. ASSERT_NE(strlen(LITE_get_last_error()), 0);
  51. printf("The last error is: %s\n", LITE_get_last_error());
  52. LITE_destroy_tensor(c_tensor0);
  53. LITE_destroy_tensor(c_tensor1);
  54. }
  55. TEST(TestCapiTensor, SetLayoutReAlloc) {
  56. LiteTensor c_tensor0;
  57. LiteTensorDesc description = default_desc;
  58. description.layout =
  59. LiteLayout{{1, 3, 224, 224}, 4, LiteDataType::LITE_FLOAT};
  60. LITE_make_tensor(description, &c_tensor0);
  61. void *old_ptr, *new_ptr;
  62. LITE_get_tensor_memory(c_tensor0, &old_ptr);
  63. LiteLayout new_layout =
  64. LiteLayout{{1, 3, 100, 100}, 4, LiteDataType::LITE_INT8};
  65. LITE_set_tensor_layout(c_tensor0, new_layout);
  66. LITE_get_tensor_memory(c_tensor0, &new_ptr);
  67. size_t length = 0;
  68. LITE_get_tensor_total_size_in_byte(c_tensor0, &length);
  69. ASSERT_EQ(length, 1 * 3 * 100 * 100);
  70. ASSERT_EQ(old_ptr, new_ptr);
  71. }
  72. TEST(TestCapiTensor, Reset) {
  73. LiteTensor c_tensor0, c_tensor1;
  74. LiteTensorDesc description = default_desc;
  75. description.layout = LiteLayout{{3, 20}, 2, LiteDataType::LITE_FLOAT};
  76. LITE_make_tensor(description, &c_tensor0);
  77. LITE_make_tensor(description, &c_tensor1);
  78. void *old_ptr0, *old_ptr1;
  79. LITE_get_tensor_memory(c_tensor0, &old_ptr0);
  80. LITE_get_tensor_memory(c_tensor1, &old_ptr1);
  81. //! make sure memory is allocted
  82. ASSERT_NO_THROW(memcpy(old_ptr0, old_ptr1, 3 * 20 * 4));
  83. std::shared_ptr<float> new_ptr0(new float[3 * 20],
  84. [](float* ptr) { delete[] ptr; });
  85. std::shared_ptr<float> new_ptr1(new float[3 * 20],
  86. [](float* ptr) { delete[] ptr; });
  87. LITE_reset_tensor_memory(c_tensor0, new_ptr0.get(), 3 * 20 * 4);
  88. LITE_reset_tensor_memory(c_tensor1, new_ptr1.get(), 3 * 20 * 4);
  89. void *tmp_ptr0, *tmp_ptr1;
  90. LITE_get_tensor_memory(c_tensor0, &tmp_ptr0);
  91. LITE_get_tensor_memory(c_tensor1, &tmp_ptr1);
  92. ASSERT_EQ(tmp_ptr0, new_ptr0.get());
  93. ASSERT_EQ(tmp_ptr1, new_ptr1.get());
  94. ASSERT_NO_THROW(memcpy(new_ptr0.get(), new_ptr1.get(), 3 * 20 * 4));
  95. LiteLayout layout1{{6, 20}, 2, LiteDataType::LITE_FLOAT};
  96. std::shared_ptr<float> ptr2(new float[6 * 20],
  97. [](float* ptr) { delete[] ptr; });
  98. std::shared_ptr<float> ptr3(new float[6 * 20],
  99. [](float* ptr) { delete[] ptr; });
  100. LITE_reset_tensor(c_tensor0, layout1, new_ptr0.get());
  101. LITE_reset_tensor(c_tensor1, layout1, new_ptr1.get());
  102. //! memory is not freed by Tensor reset
  103. ASSERT_NO_THROW(memcpy(new_ptr0.get(), new_ptr1.get(), 3 * 20 * 4));
  104. LiteLayout tmp_layout0, tmp_layout1;
  105. LITE_get_tensor_layout(c_tensor0, &tmp_layout0);
  106. LITE_get_tensor_layout(c_tensor1, &tmp_layout1);
  107. ASSERT_EQ(tmp_layout0.ndim, tmp_layout1.ndim);
  108. ASSERT_EQ(tmp_layout0.data_type, tmp_layout1.data_type);
  109. ASSERT_EQ(tmp_layout0.shapes[0], tmp_layout1.shapes[0]);
  110. ASSERT_EQ(tmp_layout0.shapes[1], tmp_layout1.shapes[1]);
  111. LITE_destroy_tensor(c_tensor0);
  112. LITE_destroy_tensor(c_tensor1);
  113. }
  114. TEST(TestCapiTensor, CrossCNCopy) {
  115. LiteTensor c_tensor0, c_tensor1, c_tensor2;
  116. LiteTensorDesc description = default_desc;
  117. LITE_make_tensor(description, &c_tensor0);
  118. description.layout =
  119. LiteLayout{{1, 3, 224, 224}, 4, LiteDataType::LITE_FLOAT};
  120. LITE_make_tensor(description, &c_tensor1);
  121. LITE_make_tensor(description, &c_tensor2);
  122. LITE_tensor_copy(c_tensor1, c_tensor2);
  123. LITE_tensor_copy(c_tensor2, c_tensor1);
  124. void *old_ptr1, *old_ptr2, *new_ptr1, *new_ptr2;
  125. LITE_get_tensor_memory(c_tensor1, &old_ptr1);
  126. LITE_get_tensor_memory(c_tensor2, &old_ptr2);
  127. //! test source tenor is empty
  128. ASSERT_EQ(LITE_tensor_copy(c_tensor1, c_tensor0), -1);
  129. ASSERT_NE(strlen(LITE_get_last_error()), 0);
  130. printf("The last error is: %s\n", LITE_get_last_error());
  131. LITE_tensor_copy(c_tensor0, c_tensor1);
  132. LITE_tensor_copy(c_tensor1, c_tensor2);
  133. LITE_tensor_copy(c_tensor2, c_tensor0);
  134. LITE_get_tensor_memory(c_tensor1, &new_ptr1);
  135. LITE_get_tensor_memory(c_tensor2, &new_ptr2);
  136. ASSERT_EQ(old_ptr1, new_ptr1);
  137. ASSERT_EQ(old_ptr2, new_ptr2);
  138. LITE_destroy_tensor(c_tensor0);
  139. LITE_destroy_tensor(c_tensor1);
  140. LITE_destroy_tensor(c_tensor2);
  141. }
  142. TEST(TestCapiTensor, ShareMemoryWith) {
  143. LiteTensor c_tensor0, c_tensor1;
  144. LiteTensorDesc description = default_desc;
  145. LITE_make_tensor(description, &c_tensor0);
  146. description.layout =
  147. LiteLayout{{1, 3, 224, 224}, 4, LiteDataType::LITE_FLOAT};
  148. LITE_make_tensor(description, &c_tensor1);
  149. ASSERT_EQ(LITE_tensor_share_memory_with(c_tensor1, c_tensor0), -1);
  150. LITE_tensor_share_memory_with(c_tensor0, c_tensor1);
  151. void *ptr0, *ptr1;
  152. LITE_get_tensor_memory(c_tensor0, &ptr0);
  153. LITE_get_tensor_memory(c_tensor1, &ptr1);
  154. ASSERT_EQ(ptr0, ptr1);
  155. LITE_destroy_tensor(c_tensor0);
  156. LITE_destroy_tensor(c_tensor1);
  157. }
  158. TEST(TestCapiTensor, Reshape) {
  159. LiteTensor c_tensor0;
  160. LiteTensorDesc description = default_desc;
  161. description.layout =
  162. LiteLayout{{8, 8, 100, 100}, 4, LiteDataType::LITE_FLOAT};
  163. LITE_make_tensor(description, &c_tensor0);
  164. void* old_ptr;
  165. LITE_get_tensor_memory(c_tensor0, &old_ptr);
  166. auto check = [&](std::vector<size_t> expect, const LiteTensor& tensor) {
  167. LiteLayout get_layout;
  168. LITE_get_tensor_layout(tensor, &get_layout);
  169. ASSERT_EQ(get_layout.ndim, expect.size());
  170. for (size_t i = 0; i < expect.size(); i++) {
  171. ASSERT_EQ(get_layout.shapes[i], expect[i]);
  172. }
  173. void* new_ptr;
  174. LITE_get_tensor_memory(tensor, &new_ptr);
  175. ASSERT_EQ(old_ptr, new_ptr);
  176. };
  177. {
  178. int shape[2] = {-1, 50};
  179. LITE_tensor_reshape(c_tensor0, shape, 2);
  180. check({8 * 8 * 100 * 2, 50}, c_tensor0);
  181. }
  182. {
  183. int shape[3] = {64, 100, 100};
  184. LITE_tensor_reshape(c_tensor0, shape, 3);
  185. check({8 * 8, 100, 100}, c_tensor0);
  186. }
  187. {
  188. int shape[3] = {16, 100, -1};
  189. LITE_tensor_reshape(c_tensor0, shape, 3);
  190. check({16, 100, 400}, c_tensor0);
  191. }
  192. LITE_destroy_tensor(c_tensor0);
  193. }
  194. TEST(TestCapiTensor, Slice) {
  195. LiteTensor c_tensor0;
  196. LiteTensorDesc description = default_desc;
  197. description.layout = LiteLayout{{20, 20}, 2, LiteDataType::LITE_FLOAT};
  198. LITE_make_tensor(description, &c_tensor0);
  199. void* old_ptr;
  200. LITE_get_tensor_memory(c_tensor0, &old_ptr);
  201. for (size_t i = 0; i < 20 * 20; i++) {
  202. *(static_cast<float*>(old_ptr) + i) = i;
  203. }
  204. auto check = [&](size_t start, size_t end, size_t step, bool have_step) {
  205. LiteTensor tensor, slice_tensor;
  206. LITE_make_tensor(default_desc, &tensor);
  207. size_t start_ptr[2] = {start, start};
  208. size_t end_ptr[2] = {end, end};
  209. size_t step_ptr[2] = {step, step};
  210. if (have_step) {
  211. LITE_tensor_slice(c_tensor0, start_ptr, end_ptr, step_ptr, 2,
  212. &slice_tensor);
  213. } else {
  214. LITE_tensor_slice(c_tensor0, start_ptr, end_ptr, nullptr, 2,
  215. &slice_tensor);
  216. }
  217. int is_continue = true;
  218. LITE_is_memory_continue(slice_tensor, &is_continue);
  219. ASSERT_FALSE(is_continue);
  220. LITE_tensor_copy(tensor, slice_tensor);
  221. void* new_ptr;
  222. LITE_get_tensor_memory(tensor, &new_ptr);
  223. float* ptr = static_cast<float*>(new_ptr);
  224. for (size_t i = start; i < end; i += step) {
  225. for (size_t j = start; j < end; j += step) {
  226. ASSERT_EQ(float(i * 20 + j), *ptr);
  227. ++ptr;
  228. }
  229. }
  230. LITE_destroy_tensor(tensor);
  231. };
  232. check(1, 8, 1, true);
  233. check(1, 8, 1, false);
  234. check(2, 10, 2, true);
  235. check(10, 18, 4, true);
  236. check(10, 18, 1, false);
  237. LITE_destroy_tensor(c_tensor0);
  238. }
  239. TEST(TestCapiTensor, Memset) {
  240. LiteTensor c_tensor0;
  241. LiteTensorDesc description = default_desc;
  242. description.layout = LiteLayout{{20, 20}, 2, LiteDataType::LITE_FLOAT};
  243. LITE_make_tensor(description, &c_tensor0);
  244. void* ptr;
  245. uint8_t* uint8_ptr;
  246. LITE_get_tensor_memory(c_tensor0, &ptr);
  247. LITE_tensor_fill_zero(c_tensor0);
  248. uint8_ptr = static_cast<uint8_t*>(ptr);
  249. for (size_t i = 0; i < 20 * 20; i++) {
  250. ASSERT_EQ(0, *uint8_ptr);
  251. uint8_ptr++;
  252. }
  253. LITE_destroy_tensor(c_tensor0);
  254. }
  255. TEST(TestCapiTensor, GetMemoryByIndex) {
  256. LiteTensor c_tensor0;
  257. LiteTensorDesc description = default_desc;
  258. description.layout = LiteLayout{{20, 20}, 2, LiteDataType::LITE_FLOAT};
  259. LITE_make_tensor(description, &c_tensor0);
  260. void *ptr0, *ptr1, *ptr2, *ptr3;
  261. LITE_get_tensor_memory(c_tensor0, &ptr0);
  262. size_t index0[] = {3, 4};
  263. LITE_get_tensor_memory_with_index(c_tensor0, &index0[0], 2, &ptr1);
  264. size_t index1[] = {5, 7};
  265. LITE_get_tensor_memory_with_index(c_tensor0, &index1[0], 2, &ptr2);
  266. size_t index2[] = {5};
  267. LITE_get_tensor_memory_with_index(c_tensor0, &index2[0], 1, &ptr3);
  268. ASSERT_EQ(ptr1, static_cast<float*>(ptr0) + 3 * 20 + 4);
  269. ASSERT_EQ(ptr2, static_cast<float*>(ptr0) + 5 * 20 + 7);
  270. ASSERT_EQ(ptr3, static_cast<float*>(ptr0) + 5 * 20);
  271. LITE_destroy_tensor(c_tensor0);
  272. }
  273. #endif
  274. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台