You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

tensor.cpp 12 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. #include "lite/tensor.h"
  2. #include <set>
  3. #include <string>
  4. #include <unordered_map>
  5. #include "../../src/tensor_impl_base.h"
  6. #include "common.h"
  7. #include "ipc_helper.h"
  8. #include "lite-c/tensor_c.h"
  9. const LiteLayout default_layout = {
  10. .shapes = {0, 0, 0, 0, 0}, .ndim = 0, .data_type = LiteDataType::LITE_FLOAT};
  11. const LiteTensorDesc default_desc = {
  12. .is_pinned_host = false,
  13. .layout = default_layout,
  14. .device_type = LiteDeviceType::LITE_CPU,
  15. .device_id = 0};
  16. namespace {
  17. static LITE_MUTEX mtx_tensor;
  18. std::unordered_map<void*, std::shared_ptr<lite::Tensor>>& get_global_tensor_holder() {
  19. static std::unordered_map<void*, std::shared_ptr<lite::Tensor>> global_holder;
  20. return global_holder;
  21. }
  22. static LITE_MUTEX mtx_attr;
  23. std::unordered_map<std::string, lite::LiteAny>& get_global_tensor_attr_holder() {
  24. static std::unordered_map<std::string, lite::LiteAny> global_holder;
  25. return global_holder;
  26. }
  27. } // namespace
  28. //! convert the lite::Layout to Layout
  29. LiteLayout convert_to_clayout(const lite::Layout& layout) {
  30. LiteLayout clayout;
  31. clayout.ndim = layout.ndim;
  32. LITE_ASSERT(layout.ndim < LAYOUT_MAX_DIM, "layout ndim is to large");
  33. for (size_t i = 0; i < layout.ndim; i++) {
  34. clayout.shapes[i] = layout.shapes[i];
  35. }
  36. clayout.data_type = layout.data_type;
  37. return clayout;
  38. }
  39. //! convert the C Layout to lite::Layout
  40. lite::Layout convert_to_layout(const LiteLayout& clayout) {
  41. lite::Layout layout;
  42. layout.ndim = clayout.ndim;
  43. LITE_ASSERT(layout.ndim < LAYOUT_MAX_DIM, "clayout ndim is to large");
  44. for (size_t i = 0; i < layout.ndim; i++) {
  45. layout.shapes[i] = clayout.shapes[i];
  46. }
  47. layout.data_type = clayout.data_type;
  48. return layout;
  49. }
  50. int LITE_make_tensor(const LiteTensorDesc tensor_describe, LiteTensor* tensor) {
  51. LITE_CAPI_BEGIN();
  52. LITE_ASSERT(tensor, "The tensor pass to LITE_make_tensor is null");
  53. lite::Layout layout = convert_to_layout(tensor_describe.layout);
  54. auto lite_tensor = std::make_shared<lite::Tensor>(
  55. tensor_describe.device_id, tensor_describe.device_type, layout,
  56. tensor_describe.is_pinned_host);
  57. {
  58. LITE_LOCK_GUARD(mtx_tensor);
  59. get_global_tensor_holder()[lite_tensor.get()] = lite_tensor;
  60. }
  61. *tensor = lite_tensor.get();
  62. LITE_CAPI_END();
  63. }
  64. int LITE_destroy_tensor(LiteTensor tensor) {
  65. LITE_CAPI_BEGIN();
  66. LITE_ASSERT(tensor, "The tensor pass to LITE c_api is null");
  67. LITE_LOCK_GUARD(mtx_tensor);
  68. auto& global_holder = get_global_tensor_holder();
  69. if (global_holder.find(tensor) != global_holder.end()) {
  70. global_holder.erase(tensor);
  71. }
  72. LITE_CAPI_END();
  73. }
  74. int LITE_set_tensor_layout(LiteTensor tensor, const LiteLayout layout) {
  75. LITE_CAPI_BEGIN();
  76. LITE_ASSERT(tensor, "The tensor pass to LITE c_api is null");
  77. auto tensor_ptr = static_cast<lite::Tensor*>(tensor);
  78. tensor_ptr->set_layout(convert_to_layout(layout));
  79. LITE_CAPI_END();
  80. }
  81. int LITE_reset_tensor_memory(
  82. LiteTensor tensor, void* prepared_data, size_t data_length_in_byte) {
  83. LITE_CAPI_BEGIN();
  84. LITE_ASSERT(tensor, "The tensor pass to LITE c_api is null");
  85. LITE_ASSERT(prepared_data, "The prepared_data pass to LITE c_api is null");
  86. static_cast<lite::Tensor*>(tensor)->reset(prepared_data, data_length_in_byte);
  87. LITE_CAPI_END();
  88. }
  89. int LITE_reset_tensor(LiteTensor tensor, const LiteLayout layout, void* prepared_data) {
  90. LITE_CAPI_BEGIN();
  91. LITE_ASSERT(tensor, "The tensor pass to LITE c_api is null");
  92. LITE_ASSERT(prepared_data, "The prepared_data pass to LITE c_api is null");
  93. static_cast<lite::Tensor*>(tensor)->reset(prepared_data, convert_to_layout(layout));
  94. LITE_CAPI_END();
  95. }
  96. int LITE_tensor_reshape(LiteTensor tensor, const int* shape, int size) {
  97. LITE_CAPI_BEGIN();
  98. LITE_ASSERT(tensor && shape, "The tensor pass to LITE c_api is null");
  99. std::vector<int> shapes;
  100. for (int i = 0; i < size; i++) {
  101. shapes.push_back(shape[i]);
  102. }
  103. static_cast<lite::Tensor*>(tensor)->reshape(shapes);
  104. LITE_CAPI_END();
  105. }
  106. int LITE_tensor_slice(
  107. const LiteTensor tensor, const size_t* start, const size_t* end,
  108. const size_t* step, size_t size, LiteTensor* slice_tensor) {
  109. LITE_CAPI_BEGIN();
  110. LITE_ASSERT(
  111. tensor && start && end && slice_tensor,
  112. "The tensor pass to LITE c_api is null");
  113. std::vector<size_t> starts, ends, steps;
  114. for (size_t i = 0; i < size; i++) {
  115. starts.push_back(start[i]);
  116. ends.push_back(end[i]);
  117. if (step) {
  118. steps.push_back(step[i]);
  119. }
  120. }
  121. auto ret_tensor = static_cast<lite::Tensor*>(tensor)->slice(starts, ends, steps);
  122. {
  123. LITE_LOCK_GUARD(mtx_tensor);
  124. get_global_tensor_holder()[ret_tensor.get()] = ret_tensor;
  125. }
  126. *slice_tensor = ret_tensor.get();
  127. LITE_CAPI_END();
  128. }
  129. int LITE_tensor_fill_zero(LiteTensor tensor) {
  130. LITE_CAPI_BEGIN();
  131. LITE_ASSERT(tensor, "The tensor pass to LITE c_api is null");
  132. static_cast<lite::Tensor*>(tensor)->fill_zero();
  133. LITE_CAPI_END();
  134. }
  135. int LITE_tensor_copy(LiteTensor dst_tensor, const LiteTensor src_tensor) {
  136. LITE_CAPI_BEGIN();
  137. LITE_ASSERT(dst_tensor && src_tensor, "The tensor pass to LITE c_api is null");
  138. static_cast<lite::Tensor*>(dst_tensor)
  139. ->copy_from(*static_cast<lite::Tensor*>(src_tensor));
  140. LITE_CAPI_END();
  141. }
  142. int LITE_tensor_share_memory_with(LiteTensor dst_tensor, const LiteTensor src_tensor) {
  143. LITE_CAPI_BEGIN();
  144. LITE_ASSERT(dst_tensor && src_tensor, "The tensor pass to LITE c_api is null");
  145. static_cast<lite::Tensor*>(dst_tensor)
  146. ->share_memory_with(*static_cast<lite::Tensor*>(src_tensor));
  147. LITE_CAPI_END();
  148. }
  149. int LITE_get_tensor_memory(const LiteTensor tensor, void** data) {
  150. LITE_CAPI_BEGIN();
  151. LITE_ASSERT(tensor, "The tensor pass to LITE c_api is null");
  152. LITE_ASSERT(data, "The data ptr pass to LITE c_api is null");
  153. if (ipc_imp::is_server()) {
  154. *data = static_cast<lite::Tensor*>(tensor)->get_memory_ptr();
  155. } else {
  156. size_t need_size = sizeof(LiteTensor);
  157. IPC_INSTACE().check_shm_size(need_size);
  158. void* raw_shm_ptr = IPC_INSTACE().get_shm_ptr(nullptr);
  159. char* shm_ptr_c = static_cast<char*>(raw_shm_ptr);
  160. memcpy(shm_ptr_c, &tensor, sizeof(LiteTensor));
  161. IPC_HELP_REMOTE_CALL(raw_shm_ptr, ipc::RemoteFuncId::LITE_GET_TENSOR_MEMORY);
  162. int* ret_ptr = static_cast<int*>(raw_shm_ptr);
  163. auto ret = *ret_ptr;
  164. ret_ptr++;
  165. memcpy(data, ret_ptr, sizeof(void*));
  166. return ret;
  167. }
  168. LITE_CAPI_END();
  169. }
  170. void* LITE_memset(void* s, int c, size_t n) {
  171. if (ipc_imp::is_server()) {
  172. return memset(s, c, n);
  173. } else {
  174. size_t need_size = sizeof(void*) + sizeof(int) + sizeof(size_t);
  175. IPC_INSTACE().check_shm_size(need_size);
  176. void* raw_shm_ptr = IPC_INSTACE().get_shm_ptr(nullptr);
  177. char* shm_ptr_c = static_cast<char*>(raw_shm_ptr);
  178. memcpy(shm_ptr_c, &s, sizeof(void*));
  179. memcpy(shm_ptr_c + sizeof(void*), &c, sizeof(int));
  180. memcpy(shm_ptr_c + sizeof(void*) + sizeof(int), &n, sizeof(size_t));
  181. IPC_HELP_REMOTE_CALL(raw_shm_ptr, ipc::RemoteFuncId::LITE_MEMSET);
  182. return s;
  183. }
  184. }
  185. int LITE_copy_server_tensor_memory(
  186. void* server_ptr, void* client_ptr, size_t size_in_byte) {
  187. LITE_CAPI_BEGIN();
  188. if (ipc_imp::is_server()) {
  189. LITE_ASSERT(
  190. false, "lite not in fork debug mode, please do not call this function");
  191. } else {
  192. size_t need_size = sizeof(void*) + sizeof(size_t);
  193. IPC_INSTACE().check_shm_size(need_size);
  194. IPC_INSTACE().check_shm_size(size_in_byte);
  195. void* raw_shm_ptr = IPC_INSTACE().get_shm_ptr(nullptr);
  196. char* shm_ptr_c = static_cast<char*>(raw_shm_ptr);
  197. memcpy(shm_ptr_c, &server_ptr, sizeof(void*));
  198. memcpy(shm_ptr_c + sizeof(void*), &size_in_byte, sizeof(size_t));
  199. IPC_HELP_REMOTE_CALL(
  200. raw_shm_ptr, ipc::RemoteFuncId::LITE_COPY_SERVER_TENSOR_MEMORY);
  201. memcpy(client_ptr, raw_shm_ptr, size_in_byte);
  202. return 0;
  203. }
  204. LITE_CAPI_END();
  205. }
  206. int LITE_get_tensor_memory_with_index(
  207. const LiteTensor tensor, const size_t* index, size_t size, void** data) {
  208. LITE_CAPI_BEGIN();
  209. LITE_ASSERT(tensor && index && data, "The tensor pass to LITE c_api is null");
  210. std::vector<size_t> index_v;
  211. for (size_t i = 0; i < size; i++) {
  212. index_v.push_back(index[i]);
  213. }
  214. *data = static_cast<lite::Tensor*>(tensor)->get_memory_ptr(index_v);
  215. LITE_CAPI_END();
  216. }
  217. int LITE_get_tensor_total_size_in_byte(const LiteTensor tensor, size_t* size) {
  218. LITE_CAPI_BEGIN();
  219. LITE_ASSERT(tensor, "The tensor pass to LITE c_api is null");
  220. LITE_ASSERT(size, "The size ptr pass to LITE c_api is null");
  221. if (ipc_imp::is_server()) {
  222. *size = static_cast<lite::Tensor*>(tensor)->get_tensor_total_size_in_byte();
  223. } else {
  224. size_t need_size = sizeof(LiteTensor);
  225. IPC_INSTACE().check_shm_size(need_size);
  226. void* raw_shm_ptr = IPC_INSTACE().get_shm_ptr(nullptr);
  227. char* shm_ptr_c = static_cast<char*>(raw_shm_ptr);
  228. memcpy(shm_ptr_c, &tensor, sizeof(LiteTensor));
  229. IPC_HELP_REMOTE_CALL(
  230. raw_shm_ptr, ipc::RemoteFuncId::LITE_GET_TENSOR_TOTAL_SIZE_IN_BYTE);
  231. int* ret_ptr = static_cast<int*>(raw_shm_ptr);
  232. auto ret = *ret_ptr;
  233. ret_ptr++;
  234. memcpy(size, ret_ptr, sizeof(size_t));
  235. return ret;
  236. }
  237. LITE_CAPI_END();
  238. }
  239. int LITE_get_tensor_layout(const LiteTensor tensor, LiteLayout* layout) {
  240. LITE_CAPI_BEGIN();
  241. LITE_ASSERT(tensor, "The tensor pass to LITE c_api is null");
  242. LITE_ASSERT(layout, "The layout ptr pass to LITE c_api is null");
  243. *layout = convert_to_clayout(static_cast<lite::Tensor*>(tensor)->get_layout());
  244. LITE_CAPI_END();
  245. }
  246. int LITE_get_tensor_device_type(const LiteTensor tensor, LiteDeviceType* device_type) {
  247. LITE_CAPI_BEGIN();
  248. LITE_ASSERT(tensor, "The tensor pass to LITE c_api is null");
  249. LITE_ASSERT(device_type, "The device ptr pass to LITE c_api is null");
  250. *device_type = static_cast<lite::Tensor*>(tensor)->get_device_type();
  251. LITE_CAPI_END();
  252. }
  253. int LITE_get_tensor_device_id(const LiteTensor tensor, int* device_id) {
  254. LITE_CAPI_BEGIN();
  255. LITE_ASSERT(tensor && device_id, "The tensor pass to LITE c_api is null");
  256. *device_id = static_cast<lite::Tensor*>(tensor)->get_device_id();
  257. LITE_CAPI_END();
  258. }
  259. int LITE_is_pinned_host(const LiteTensor tensor, int* is_pinned_host) {
  260. LITE_CAPI_BEGIN();
  261. LITE_ASSERT(tensor, "The tensor pass to LITE c_api is null");
  262. LITE_ASSERT(is_pinned_host, "The is_pinned_host ptr pass to LITE c_api is null");
  263. *is_pinned_host = static_cast<lite::Tensor*>(tensor)->is_pinned_host();
  264. LITE_CAPI_END();
  265. }
  266. int LITE_is_memory_continue(const LiteTensor tensor, int* is_continue) {
  267. LITE_CAPI_BEGIN();
  268. LITE_ASSERT(tensor, "The tensor pass to LITE c_api is null");
  269. LITE_ASSERT(is_continue, "The is_continue ptr pass to LITE c_api is null");
  270. *is_continue = static_cast<lite::Tensor*>(tensor)->is_continue_memory();
  271. LITE_CAPI_END();
  272. }
  273. int LITE_tensor_concat(
  274. LiteTensor* tensors, int nr_tensor, int dim, LiteDeviceType dst_device,
  275. int device_id, LiteTensor* result_tensor) {
  276. LITE_CAPI_BEGIN();
  277. LITE_ASSERT(result_tensor, "The tensor pass to LITE c_api is null");
  278. std::vector<lite::Tensor> v_tensors;
  279. for (int i = 0; i < nr_tensor; i++) {
  280. v_tensors.push_back(*static_cast<lite::Tensor*>(tensors[i]));
  281. }
  282. auto tensor = lite::TensorUtils::concat(v_tensors, dim, dst_device, device_id);
  283. {
  284. LITE_LOCK_GUARD(mtx_tensor);
  285. get_global_tensor_holder()[tensor.get()] = tensor;
  286. }
  287. *result_tensor = tensor.get();
  288. LITE_CAPI_END()
  289. }
  290. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}