You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

single_op_task_unittest.cc 14 kB

3 years ago
3 years ago
3 years ago
3 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
4 years ago
3 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366
  1. /**
  2. * Copyright 2019-2020 Huawei Technologies Co., Ltd
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <gtest/gtest.h>
  17. #include <vector>
  18. #include "graph/load/model_manager/model_utils.h"
  19. #include "graph/utils/graph_utils.h"
  20. #include "hybrid/node_executor/aicpu/aicpu_ext_info.h"
  21. #include "runtime/rt.h"
  22. #define protected public
  23. #define private public
  24. #include "single_op/single_op_model.h"
  25. #include "single_op/task/tbe_task_builder.h"
  26. #include "single_op/task/op_task.h"
  27. #include "single_op/task/tbe_task_builder.h"
  28. #include "external/register/op_tiling_registry.h"
  29. #undef private
  30. #undef protected
  31. #include "tests/depends/runtime/src/runtime_stub.h"
  32. using namespace std;
  33. using namespace testing;
  34. using namespace ge;
  35. using namespace optiling;
  36. class UtestSingleOpTask : public testing::Test {
  37. protected:
  38. void SetUp() {
  39. RTS_STUB_SETUP();
  40. }
  41. void TearDown() {
  42. RTS_STUB_TEARDOWN();
  43. }
  44. };
  45. TEST_F(UtestSingleOpTask, test_build_kernel_task) {
  46. string model_data_str = "123456789";
  47. SingleOpModel model("model", model_data_str.c_str(), model_data_str.size());
  48. model.input_offset_list_.push_back(0);
  49. model.input_sizes_.push_back(16);
  50. model.output_offset_list_.push_back(0);
  51. model.output_sizes_.push_back(16);
  52. auto graph = make_shared<ComputeGraph>("graph");
  53. auto op_desc = make_shared<OpDesc>("Add", "Add");
  54. AttrUtils::SetStr(op_desc, TVM_ATTR_NAME_MAGIC, "RT_DEV_BINARY_MAGIC_ELF");
  55. std::vector<char> kernelBin;
  56. TBEKernelPtr tbe_kernel = std::make_shared<ge::OpKernelBin>("name/Add", std::move(kernelBin));
  57. op_desc->SetExtAttr(ge::OP_EXTATTR_NAME_TBE_KERNEL, tbe_kernel);
  58. std::string kernel_name("kernel/Add");
  59. AttrUtils::SetStr(op_desc, op_desc->GetName() + "_kernelname", kernel_name);
  60. vector<int64_t> shape{16, 16};
  61. GeShape ge_shape(shape);
  62. GeTensorDesc desc(ge_shape);
  63. op_desc->AddInputDesc(desc);
  64. op_desc->AddOutputDesc(desc);
  65. auto node = graph->AddNode(op_desc);
  66. std::mutex stream_mu_;
  67. rtStream_t stream_ = nullptr;
  68. StreamResource stream_resource(0);
  69. SingleOp single_op(&stream_resource, &stream_mu_, stream_);
  70. domi::TaskDef task_def;
  71. task_def.set_type(RT_MODEL_TASK_ALL_KERNEL);
  72. domi::KernelDefWithHandle *kernel_with_handle = task_def.mutable_kernel_with_handle();
  73. kernel_with_handle->set_original_kernel_key("");
  74. kernel_with_handle->set_node_info("");
  75. kernel_with_handle->set_block_dim(32);
  76. kernel_with_handle->set_args_size(64);
  77. string args(64, '1');
  78. kernel_with_handle->set_args(args.data(), 64);
  79. domi::KernelContext *context = kernel_with_handle->mutable_context();
  80. context->set_op_index(1);
  81. context->set_kernel_type(2); // ccKernelType::TE
  82. uint16_t args_offset[9] = {0};
  83. context->set_args_offset(args_offset, 9 * sizeof(uint16_t));
  84. model.op_list_[1] = node;
  85. TbeOpTask task_tmp;
  86. TbeOpTask *task = &task_tmp;
  87. ASSERT_EQ(model.BuildKernelTask(task_def, &task), SUCCESS);
  88. ge::DataBuffer data_buffer;
  89. vector<GeTensorDesc> input_desc;
  90. vector<DataBuffer> input_buffers = { data_buffer };
  91. vector<GeTensorDesc> output_desc;
  92. vector<DataBuffer> output_buffers = { data_buffer };
  93. task->node_ = node;
  94. OpTilingFunc op_tiling_func = [](const TeOpParas &, const OpCompileInfo &, OpRunInfo &) -> bool {return true;};
  95. OpTilingRegistryInterf("Add", op_tiling_func);
  96. ge::AttrUtils::SetStr(op_desc, "compile_info_key", "op_compile_info_key");
  97. ge::AttrUtils::SetStr(op_desc, "compile_info_json", "op_compile_info_json");
  98. char c = '0';
  99. char* buffer = &c;
  100. task->tiling_buffer_ = buffer;
  101. task->max_tiling_size_ = 64;
  102. task->tiling_data_ = "tiling_data";
  103. task->arg_size_ = 64;
  104. task->args_.reset(new (std::nothrow) uint8_t[sizeof(void *) * 3]);
  105. ASSERT_EQ(task->LaunchKernel(input_desc, input_buffers, output_desc, output_buffers, stream_), SUCCESS);
  106. char *handle = "00";
  107. task->SetHandle(handle);
  108. ASSERT_EQ(task->LaunchKernel(input_desc, input_buffers, output_desc, output_buffers, stream_), SUCCESS);
  109. }
  110. TEST_F(UtestSingleOpTask, test_update_ioaddr) {
  111. auto graph = make_shared<ComputeGraph>("graph");
  112. auto op_desc = make_shared<OpDesc>("Add", "Add");
  113. GeTensorDesc desc;
  114. op_desc->AddInputDesc(desc);
  115. op_desc->AddInputDesc(desc);
  116. op_desc->AddOutputDesc(desc);
  117. vector<bool> is_input_const = { true, false };
  118. op_desc->SetIsInputConst(is_input_const);
  119. auto node = graph->AddNode(op_desc);
  120. TbeOpTask task;
  121. task.op_desc_ = op_desc;
  122. task.node_ = node;
  123. ASSERT_EQ(task.SetArgIndex(), SUCCESS);
  124. task.arg_size_ = sizeof(void *) * 4;
  125. task.args_.reset(new (std::nothrow) uint8_t[task.arg_size_]);
  126. task.arg_index_ = {0};
  127. task.input_num_ = 2;
  128. task.output_num_ = 1;
  129. vector<void *> args;
  130. vector<DataBuffer> inputs;
  131. vector<DataBuffer> outputs;
  132. ASSERT_EQ(task.UpdateIoAddr(inputs, outputs), ACL_ERROR_GE_PARAM_INVALID);
  133. ge::DataBuffer data_buffer;
  134. inputs = { data_buffer };
  135. outputs = { data_buffer };
  136. ASSERT_EQ(task.UpdateIoAddr(inputs, outputs), SUCCESS);
  137. task.tiling_buffer_ = (void *)0x0001;
  138. task.workspaces_ = { (void *)0x0002 };
  139. ASSERT_EQ(task.UpdateTilingArgs(nullptr), SUCCESS);
  140. task.tiling_buffer_ = nullptr;
  141. }
  142. TEST_F(UtestSingleOpTask, test_atomic_exec) {
  143. auto graph = make_shared<ComputeGraph>("graph");
  144. auto op_desc = make_shared<OpDesc>("Add", "Add");
  145. GeTensorDesc desc;
  146. op_desc->AddInputDesc(desc);
  147. op_desc->AddOutputDesc(desc);
  148. auto node = graph->AddNode(op_desc);
  149. AtomicAddrCleanOpTask task;
  150. task.op_desc_ = op_desc;
  151. task.node_ = node;
  152. vector<DataBuffer> inputs;
  153. vector<DataBuffer> outputs;
  154. std::vector<int64_t> atomic_output_indices;
  155. ge::AttrUtils::SetListInt(op_desc, ATOMIC_ATTR_OUTPUT_INDEX, atomic_output_indices);
  156. ASSERT_EQ(task.InitAtomicAddrCleanIndices(), INTERNAL_ERROR);
  157. atomic_output_indices = { 0 };
  158. ge::AttrUtils::SetListInt(op_desc, ATOMIC_ATTR_OUTPUT_INDEX, atomic_output_indices);
  159. ASSERT_EQ(task.InitAtomicAddrCleanIndices(), INTERNAL_ERROR);
  160. task.arg_size_ = sizeof(void *) * 2;
  161. task.args_.reset(new (std::nothrow) uint8_t[task.arg_size_]);
  162. ASSERT_EQ(task.InitAtomicAddrCleanIndices(), SUCCESS);
  163. ASSERT_EQ(task.UpdateIoAddr(inputs, outputs), ACL_ERROR_GE_PARAM_INVALID);
  164. ge::DataBuffer data_buffer;
  165. outputs = { data_buffer };
  166. ASSERT_EQ(task.UpdateIoAddr(inputs, outputs), SUCCESS);
  167. task.tiling_buffer_ = (void *)0x0001;
  168. ASSERT_EQ(task.UpdateTilingArgs(nullptr), SUCCESS);
  169. task.tiling_buffer_ = nullptr;
  170. optiling::utils::OpRunInfo run_info(0, true, 0);
  171. task.CalcTilingInfo(run_info);
  172. }
  173. TEST_F(UtestSingleOpTask, test_aicpu_task_update_io_addr) {
  174. AiCpuCCTask task;
  175. task.num_inputs_ = 2;
  176. task.num_outputs_ = 1;
  177. task.input_is_const_ = {true, false};
  178. int total_addr = 3;
  179. uint32_t* addrs[total_addr] = {nullptr, nullptr, nullptr};
  180. task.io_addr_ = reinterpret_cast<uintptr_t*>(addrs);
  181. task.io_addr_num_ = total_addr;
  182. {
  183. vector<DataBuffer> inputs(1, DataBuffer());
  184. vector<DataBuffer> outputs(1, DataBuffer());
  185. auto ret = task.UpdateIoAddr(inputs, outputs);
  186. ASSERT_EQ(ret, SUCCESS);
  187. ASSERT_EQ(addrs[0], nullptr);
  188. ASSERT_EQ(addrs[1], nullptr);
  189. ASSERT_EQ(addrs[2], nullptr);
  190. }
  191. {
  192. uint32_t data_buf[2];
  193. vector<DataBuffer> inputs{DataBuffer(&data_buf[0], 4, false)};
  194. vector<DataBuffer> outputs{DataBuffer(&data_buf[1], 4, false)};
  195. auto ret = task.UpdateIoAddr(inputs, outputs);
  196. ASSERT_EQ(ret, SUCCESS);
  197. ASSERT_EQ(addrs[0], nullptr);
  198. ASSERT_EQ(addrs[1], &data_buf[0]);
  199. ASSERT_EQ(addrs[2], &data_buf[1]);
  200. }
  201. {
  202. uint32_t data_buf[2];
  203. vector<DataBuffer> inputs{DataBuffer(nullptr, 4, false)};
  204. vector<DataBuffer> outputs{DataBuffer(&data_buf[1], 4, false)};
  205. auto ret = task.UpdateIoAddr(inputs, outputs);
  206. ASSERT_EQ(ret, PARAM_INVALID);
  207. }
  208. {
  209. uint32_t data_buf[2];
  210. vector<DataBuffer> inputs{DataBuffer(&data_buf[0], 4, false)};
  211. vector<DataBuffer> outputs{DataBuffer(nullptr, 4, false)};
  212. auto ret = task.UpdateIoAddr(inputs, outputs);
  213. ASSERT_EQ(ret, PARAM_INVALID);
  214. }
  215. }
  216. TEST_F(UtestSingleOpTask, test_blocking_aicpu_op_01) {
  217. int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo);
  218. vector<char> aicpu_ext_info(len, 0);
  219. char *buf = aicpu_ext_info.data();
  220. int offset = 0;
  221. hybrid::AicpuExtInfo *ext_info = reinterpret_cast<hybrid::AicpuExtInfo*>(buf + offset);
  222. ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT;
  223. ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo);
  224. offset += sizeof(hybrid::AicpuExtInfo);
  225. hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast<hybrid::AsyncWaitInfo*>(buf + offset);
  226. async_wait_info->waitType = 0;
  227. async_wait_info->waitId = 0;
  228. async_wait_info->timeOut = 0;
  229. async_wait_info->reserved = 0;
  230. domi::KernelDef kernel_def;
  231. kernel_def.set_kernel_ext_info(buf, len);
  232. kernel_def.set_kernel_ext_info_size(len);
  233. auto op_desc = make_shared<OpDesc>("deque", "Deque");
  234. ge::AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, true);
  235. AiCpuCCTask aicpu_task;
  236. aicpu_task.SetOpDesc(op_desc);
  237. rtStream_t stream;
  238. ASSERT_EQ(rtStreamCreate(&stream, 0), RT_ERROR_NONE);
  239. ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS);
  240. ASSERT_EQ(aicpu_task.LaunchKernel(stream), SUCCESS);
  241. }
  242. TEST_F(UtestSingleOpTask, test_blocking_aicpu_op_02) {
  243. int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo);
  244. vector<char> aicpu_ext_info(len, 0);
  245. char *buf = aicpu_ext_info.data();
  246. int offset = 0;
  247. hybrid::AicpuExtInfo *ext_info = reinterpret_cast<hybrid::AicpuExtInfo*>(buf + offset);
  248. ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT;
  249. ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo);
  250. offset += sizeof(hybrid::AicpuExtInfo);
  251. hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast<hybrid::AsyncWaitInfo*>(buf + offset);
  252. async_wait_info->waitType = 0;
  253. async_wait_info->waitId = 0;
  254. async_wait_info->timeOut = 0;
  255. async_wait_info->reserved = 0;
  256. domi::KernelDef kernel_def;
  257. kernel_def.set_kernel_ext_info(buf, len);
  258. kernel_def.set_kernel_ext_info_size(len);
  259. auto op_desc = make_shared<OpDesc>("deque", "Deque");
  260. ge::AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, true);
  261. AiCpuTask aicpu_task;
  262. aicpu_task.SetOpDesc(op_desc);
  263. rtStream_t stream;
  264. ASSERT_EQ(rtStreamCreate(&stream, 0), RT_ERROR_NONE);
  265. ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS);
  266. ASSERT_EQ(aicpu_task.LaunchKernel(stream), SUCCESS);
  267. }
  268. TEST_F(UtestSingleOpTask, test_blocking_aicpu_op_fail) {
  269. int len = sizeof(hybrid::AicpuExtInfo) + sizeof(hybrid::AsyncWaitInfo);
  270. vector<char> aicpu_ext_info(len, 0);
  271. char *buf = aicpu_ext_info.data();
  272. int offset = 0;
  273. hybrid::AicpuExtInfo *ext_info = reinterpret_cast<hybrid::AicpuExtInfo*>(buf + offset);
  274. ext_info->infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_ASYNCWAIT;
  275. ext_info->infoLen = sizeof(hybrid::AsyncWaitInfo);
  276. offset += sizeof(hybrid::AicpuExtInfo);
  277. hybrid::AsyncWaitInfo *async_wait_info = reinterpret_cast<hybrid::AsyncWaitInfo*>(buf + offset);
  278. async_wait_info->waitType = 0;
  279. async_wait_info->waitId = 0;
  280. async_wait_info->timeOut = 0;
  281. async_wait_info->reserved = 0;
  282. domi::KernelDef kernel_def;
  283. kernel_def.set_kernel_ext_info(buf, len);
  284. kernel_def.set_kernel_ext_info_size(len);
  285. auto op_desc = make_shared<OpDesc>("deque", "Deque");
  286. ge::AttrUtils::SetBool(op_desc, ATTR_NAME_IS_BLOCKING_OP, true);
  287. AiCpuTask aicpu_task;
  288. aicpu_task.SetOpDesc(op_desc);
  289. rtStream_t stream;
  290. ASSERT_EQ(rtStreamCreate(&stream, 0), RT_ERROR_NONE);
  291. ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS);
  292. ASSERT_EQ(aicpu_task.LaunchKernel(stream), SUCCESS);
  293. RTS_STUB_RETURN_VALUE(rtGetDevice, rtError_t, 0x78000001);
  294. ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), FAILED);
  295. RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001);
  296. ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), FAILED);
  297. RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, 0x78000001);
  298. ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), FAILED);
  299. RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE);
  300. RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_SUPPORT + 1);
  301. ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), FAILED);
  302. RTS_STUB_RETURN_VALUE(rtGetDevice, rtError_t, 0x78000001);
  303. ASSERT_EQ(aicpu_task.LaunchKernel(stream), FAILED);
  304. ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS);
  305. RTS_STUB_RETURN_VALUE(rtStreamWaitEvent, rtError_t, 0x78000001);
  306. ASSERT_EQ(aicpu_task.LaunchKernel(stream), FAILED);
  307. ASSERT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS);
  308. RTS_STUB_RETURN_VALUE(rtEventReset, rtError_t, 0x78000001);
  309. ASSERT_EQ(aicpu_task.LaunchKernel(stream), FAILED);
  310. RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE);
  311. RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT);
  312. EXPECT_EQ(aicpu_task.SetExtInfoAndType(kernel_def.kernel_ext_info(), 0), SUCCESS);
  313. RTS_STUB_RETURN_VALUE(rtGetDeviceCapability, rtError_t, RT_ERROR_NONE);
  314. RTS_STUB_OUTBOUND_VALUE(rtGetDeviceCapability, int32_t, value, RT_AICPU_BLOCKING_OP_NOT_SUPPORT);
  315. EXPECT_EQ(aicpu_task.LaunchKernel(stream), SUCCESS);
  316. }

图引擎模块(GE)是MindSpore的一个子模块,其代码由C++实现,位于前端模块ME和底层硬件之间,起到承接作用。图引擎模块以ME下发的图作为输入,然后进行一系列的深度图优化操作,最后输出一张可以在底层硬件上高效运行的图。GE针对昇腾AI处理器的硬件结构特点,做了特定的优化工作,以此来充分发挥出昇腾AI处理器的强大算力。在进行模型训练/推理时,GE会被自动调用而用户并不感知。GE主要由GE API和GE Core两部分组成,详细的架构图如下所示