diff --git a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc index f309ebd0..386726c8 100755 --- a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc +++ b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc @@ -548,8 +548,8 @@ Status AicpuTfNodeTask::EnsureSessionCreated(uint64_t session_id) { return SUCCESS; } -Status AicpuTfNodeTask::ReadResultSummaryAndPrepareMemory(TaskContext &context, - std::vector> &out_shape_hbm) { +Status AicpuNodeTaskBase::ReadResultSummaryAndPrepareMemory(TaskContext &context, + std::vector> &out_shape_hbm) { for (auto i = 0; i < node_item_->num_outputs; ++i) { auto &result_summary = output_summary_host_[i]; GE_CHK_RT_RET(rtMemcpy(&result_summary, sizeof(aicpu::FWKAdapter::ResultSummary), @@ -574,6 +574,30 @@ Status AicpuTfNodeTask::ReadResultSummaryAndPrepareMemory(TaskContext &context, return SUCCESS; } +Status AicpuNodeTask::CopyDataToHbm(TaskContext &context, + const std::vector> &out_shape_hbm) { + GE_CHK_BOOL_RET_STATUS(out_shape_hbm.size() == static_cast(node_item_->num_outputs), + INTERNAL_ERROR, + "[Check][Size]Node[%s] has %d outputs but out shape is %zu not equal.", + node_name_.c_str(), node_item_->num_outputs, out_shape_hbm.size()); + + GE_CHK_STATUS_RET_NOLOG(PrepareCopyInputs(context, out_shape_hbm)); + + RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[LaunchCopy] Start"); + auto rt_ret = rtCpuKernelLaunchWithFlag(reinterpret_cast(memcpy_so_name_.c_str()), + reinterpret_cast(memcpy_kernel_name_.c_str()), + 1, // default core dim is 1 + memcpy_args_.get(), memcpy_args_size_, + nullptr, context.GetStream(), RT_KERNEL_DEFAULT); + GE_CHK_RT_RET(rt_ret); + + RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[LaunchCopy] End"); + + GE_CHK_RT_RET(rtStreamSynchronize(context.GetStream())); + RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[SynchronizeCopy] End"); + return SUCCESS; +} + Status AicpuTfNodeTask::CopyDataToHbm(TaskContext &context, const std::vector> &out_shape_hbm) { GE_CHK_BOOL_RET_STATUS(out_shape_hbm.size() == static_cast(node_item_->num_outputs), @@ -593,8 +617,8 @@ Status AicpuTfNodeTask::CopyDataToHbm(TaskContext &context, return SUCCESS; } -Status AicpuTfNodeTask::PrepareCopyInputs(const TaskContext &context, - const std::vector> &out_shape_hbm) { +Status AicpuNodeTaskBase::PrepareCopyInputs(const TaskContext &context, + const std::vector> &out_shape_hbm) { std::vector copy_input_release_flag; std::vector copy_input_data_size; std::vector copy_input_src; @@ -635,8 +659,8 @@ Status AicpuTfNodeTask::PrepareCopyInputs(const TaskContext &context, return SUCCESS; } -Status AicpuTfNodeTask::UpdateShapeByHbmBuffer(TaskContext &context, - const std::vector> &out_shape_hbm) { +Status AicpuNodeTaskBase::UpdateShapeByHbmBuffer(TaskContext &context, + const std::vector> &out_shape_hbm) { GE_CHK_BOOL_RET_STATUS(out_shape_hbm.size() == static_cast(node_item_->num_outputs), INTERNAL_ERROR, "Node[%s] has %d outputs but out shape is %zu", @@ -667,7 +691,7 @@ Status AicpuTfNodeTask::UpdateShapeByHbmBuffer(TaskContext &context, return SUCCESS; } -Status AicpuTfNodeTask::UpdateShapeAndDataByResultSummary(TaskContext &context) { +Status AicpuNodeTaskBase::UpdateShapeAndDataByResultSummary(TaskContext &context) { GELOGD("Node[%s] update shape and data by result summary begin.", node_name_.c_str()); std::vector> out_shape_hbm; @@ -762,7 +786,7 @@ Status AicpuTfNodeTask::LaunchTask(TaskContext &context) { return SUCCESS; } -Status AicpuTfNodeTask::TaskCallback(TaskContext &context) { +Status AicpuNodeTaskBase::TaskCallback(TaskContext &context) { GELOGD("Node[%s] task callback start. is_dynamic=%s, unknown_type=%d.", node_name_.c_str(), node_item_->is_dynamic ? "true" : "false", unknown_type_); Status callback_ret = SUCCESS; @@ -779,14 +803,115 @@ Status AicpuTfNodeTask::TaskCallback(TaskContext &context) { return callback_ret; } +Status AicpuNodeTask::SetMemCopyTask(const domi::TaskDef &task_def) { + if (node_item_->num_outputs == 0) { + GELOGD("Node[%s] type[%s] has no output, no need set mem_copy task.", + node_name_.c_str(), node_item_->node_type.c_str()); + return SUCCESS; + } + + GELOGD("Start to set memcpy task for node[%s].", node_name_.c_str()); + const domi::KernelDef &kernel_def = task_def.kernel(); + auto &memcpy_args = kernel_def.args(); + memcpy_args_size_ = kernel_def.args_size(); + memcpy_so_name_ = kernel_def.so_name(); + memcpy_kernel_name_ = kernel_def.kernel_name(); + if (memcpy_args.size() != memcpy_args_size_) { + REPORT_INNER_ERROR("E19999", "MemCopy task def args.size=%zu, but args_size=%u not equal.", + memcpy_args.size(), memcpy_args_size_); + GELOGE(FAILED, "[Check][Size]MemCopy task def args.size=%zu, but args_size=%u not equal.", + memcpy_args.size(), memcpy_args_size_); + return FAILED; + } + + if (memcpy_args_size_ < sizeof(aicpu::AicpuParamHead)) { + REPORT_INNER_ERROR("E19999", "Task def args_size=%u is less than aicpu param head len=%zu.", + memcpy_args_size_, sizeof(aicpu::AicpuParamHead)); + GELOGE(FAILED, "[Check][Size] Task def args_size=%u is less than aicpu param head len=%zu.", + memcpy_args_size_, sizeof(aicpu::AicpuParamHead)); + return FAILED; + } + + memcpy_args_.reset(new(std::nothrow) uint8_t[memcpy_args_size_]()); + if (memcpy_args_ == nullptr) { + REPORT_INNER_ERROR("E19999", "new memory failed for Node[MemCopy], task_size[%u].", + memcpy_args_size_); + GELOGE(FAILED, "[Malloc][Memory] failed for Node[MemCopy], task_size[%u].", + memcpy_args_size_); + return FAILED; + } + + errno_t sec_ret = memcpy_s(memcpy_args_.get(), memcpy_args_size_, memcpy_args.c_str(), memcpy_args.size()); + if (sec_ret != EOK) { + REPORT_INNER_ERROR("E19999", + "memcpy_s argc_ failed for Node[MemCopy], ret: %d", sec_ret); + GELOGE(INTERNAL_ERROR, + "[Update][args] failed for Node[MemCopy], ret: %d", sec_ret); + return sec_ret; + } + + auto memcpy_param_head = reinterpret_cast(memcpy_args_.get()); + uint32_t memcpy_io_num = memcpy_param_head->ioAddrNum; + auto memcpy_io_addr = memcpy_args_.get() + sizeof(aicpu::AicpuParamHead); + // if has input and output, need copy to ioaddr + int cpy_ret = memcpy_s(memcpy_io_addr, memcpy_args_size_ - sizeof(aicpu::AicpuParamHead), + ©_io_addr_[0], sizeof(uint64_t) * memcpy_io_num); + if (cpy_ret != EOK) { + REPORT_INNER_ERROR("E19999", "Node[Memcpoy] memcpy io addr to AicpuParamHead failed," + "ret=%d, args_size=%u, io nums=%u.", + cpy_ret, memcpy_args_size_, memcpy_io_num); + GELOGE(INTERNAL_ERROR, "[Update][io_addr]Node[MemCopy] memcpy io addr to AicpuParamHead failed," + "ret=%d, args_size=%u, io nums=%u.", + cpy_ret, memcpy_args_size_, memcpy_io_num); + return INTERNAL_ERROR; + } + GELOGD("Set memcpy task for node[MemCopy] successfully."); + return SUCCESS; +} + +Status AicpuNodeTask::InitForDependComputeTask() { + if ((unknown_type_ != DEPEND_COMPUTE) || (node_item_->num_outputs == 0)) { + GELOGD("Node[%s] type[%s] unknown_type is %d, output num is %d.", + node_name_.c_str(), node_item_->node_type.c_str(), unknown_type_, node_item_->num_outputs); + return SUCCESS; + } + + output_summary_.resize(node_item_->num_outputs); + constexpr auto result_summary_size = sizeof(aicpu::FWKAdapter::ResultSummary); + for (auto i = 0; i < node_item_->num_outputs; ++i) { + GE_CHK_STATUS_RET(AllocTensorBuffer(result_summary_size, output_summary_[i]), + "[Alloc][TensorBuffer] failed for Node[%s] to copy result summary info, size=%zu.", + node_name_.c_str(), result_summary_size); + } + output_summary_host_.resize(node_item_->num_outputs); + + // init for mem copy task + // copy task need copy output_data and output_shape, max len is 2 * output_num + const size_t copy_input_buf_len = node_item_->num_outputs * 2 * sizeof(uint64_t); + GE_CHK_STATUS_RET(AllocTensorBuffer(copy_input_buf_len, copy_input_release_flag_dev_), + "[Alloc][TensorBuffer] failed for Node[%s] to copy task input release_flag, size=%zu", + node_name_.c_str(), copy_input_buf_len); + GE_CHK_STATUS_RET(AllocTensorBuffer(copy_input_buf_len, copy_input_data_size_dev_), + "[Alloc][TensorBuffer] failed for Node[%s] to copy task input data_size, size=%zu", + node_name_.c_str(), copy_input_buf_len); + GE_CHK_STATUS_RET(AllocTensorBuffer(copy_input_buf_len, copy_input_src_dev_), + "[Alloc][TensorBuffer] failed for Node[%s] to copy task input src, size=%zu", + node_name_.c_str(), copy_input_buf_len); + GE_CHK_STATUS_RET(AllocTensorBuffer(copy_input_buf_len, copy_input_dst_dev_), + "[Alloc][TensorBuffer] failed for Node[%s] to copy task input dst, size=%zu", + node_name_.c_str(), copy_input_buf_len); + + copy_io_addr_.emplace_back(reinterpret_cast(copy_input_release_flag_dev_->GetData())); + copy_io_addr_.emplace_back(reinterpret_cast(copy_input_data_size_dev_->GetData())); + copy_io_addr_.emplace_back(reinterpret_cast(copy_input_src_dev_->GetData())); + copy_io_addr_.emplace_back(reinterpret_cast(copy_input_dst_dev_->GetData())); + return SUCCESS; +} + Status AicpuNodeTask::Init(const HybridModel &model) { auto node_name = node_name_; GELOGD("Node[%s] init start.", node_name.c_str()); - GE_CHK_BOOL_RET_STATUS(unknown_type_ != DEPEND_COMPUTE, FAILED, - "[Check][Type]Node[%s] unknown type[%d] is depend compute, it's not supported now.", - node_name.c_str(), unknown_type_); - GE_CHK_BOOL_RET_STATUS(task_def_.has_kernel(), FAILED, "[Check][task_def_]Node[%s] task def does not has kernel.", node_name.c_str()); auto &kernel_def = task_def_.kernel(); @@ -877,7 +1002,9 @@ Status AicpuNodeTask::Init(const HybridModel &model) { GELOGD("Get op:%s attribute(is_blocking_op), value:%d", op_desc->GetName().c_str(), is_blocking_aicpu_op_); GE_CHK_STATUS_RET(InitExtInfo(kernel_ext_info, ext_session_id), "[Init][ExtInfo] failed for Node[%s].", node_name.c_str()); - + GE_CHK_STATUS_RET(InitForDependComputeTask(), + "[Init][DependComputeTask] failed for Node[%s].", + node_name_.c_str()); if (ext_info_addr_dev_ == nullptr) { aicpu_param_head->extInfoLength = 0; aicpu_param_head->extInfoAddr = 0; @@ -885,7 +1012,11 @@ Status AicpuNodeTask::Init(const HybridModel &model) { aicpu_param_head->extInfoLength = ext_info_addr_dev_->GetSize(); aicpu_param_head->extInfoAddr = reinterpret_cast(ext_info_addr_dev_->GetData()); } - + auto task_defs = model.GetTaskDefs(node_item_->node); + GE_CHECK_NOTNULL(task_defs); + if (unknown_type_ == DEPEND_COMPUTE) { + GE_CHK_STATUS_RET_NOLOG(SetMemCopyTask((*task_defs)[1])); + } GELOGD("Node[%s] init end.", node_name.c_str()); return SUCCESS; } @@ -900,21 +1031,36 @@ Status AicpuNodeTask::UpdateIoAddr(TaskContext &context) { GELOGD("Node[%s] input[%d] = %p, size = %zu", node_name_.c_str(), i, inputData->GetData(), inputData->GetSize()); io_addrs.emplace_back(reinterpret_cast(inputData->GetData())); } + // known shape or not depend compute + if (!node_item_->is_dynamic || unknown_type_ != DEPEND_COMPUTE) { + // unknown type 4 do this in call back. + GE_CHK_STATUS_RET_NOLOG(context.AllocateOutputs()); + for (auto j = 0; j < node_item_->num_outputs; ++j) { + auto outputData = context.GetOutput(j); + GE_CHECK_NOTNULL(outputData); + GELOGD("Node[%s] output[%d] addr = %p, size = %zu", + node_name_.c_str(), j, outputData->GetData(), outputData->GetSize()); + io_addrs.emplace_back(reinterpret_cast(outputData->GetData())); + } + } else { + // unknown type 4 use result summary update ioaddr. + GELOGD("Node[%s] is depend compute node, use result summary as out addr.", node_name_.c_str()); + GE_CHK_BOOL_RET_STATUS(output_summary_.size() == static_cast(node_item_->num_outputs), + INTERNAL_ERROR, + "[Check][Size]Node[%s] has %d output but %zu output summary not equal.", + node_name_.c_str(), node_item_->num_outputs, output_summary_.size()); - GE_CHK_STATUS_RET_NOLOG(context.AllocateOutputs()); - for (auto j = 0; j < node_item_->num_outputs; ++j) { - auto outputData = context.GetOutput(j); - GE_CHECK_NOTNULL(outputData); - GELOGD("Node[%s] output[%d] addr = %p, size = %zu", node_name_.c_str(), j, - outputData->GetData(), outputData->GetSize()); - io_addrs.emplace_back(reinterpret_cast(outputData->GetData())); + for (auto j = 0; j < node_item_->num_outputs; ++j) { + void *summary_addr = output_summary_[j]->GetData(); + io_addrs.emplace_back(reinterpret_cast(summary_addr)); + } } auto io_addr = args_.get() + sizeof(aicpu::AicpuParamHead); // if has input and output, need copy to ioaddr int cpy_ret = memcpy_s(io_addr, args_size_ - sizeof(aicpu::AicpuParamHead), &io_addrs[0], sizeof(uint64_t) * io_addrs.size()); - GE_IF_BOOL_EXEC(cpy_ret != 0, + GE_IF_BOOL_EXEC(cpy_ret != EOK, REPORT_INNER_ERROR("E19999", "Node[%s] memcpy io addr to AicpuParamHead failed," "ret=%d, args_size=%u, io nums=%zu.", node_name_.c_str(), cpy_ret, args_size_, io_addrs.size()); @@ -951,23 +1097,6 @@ Status AicpuNodeTask::LaunchTask(TaskContext &context) { return SUCCESS; } -Status AicpuNodeTask::TaskCallback(TaskContext &context) { - GELOGD("Node[%s] task callback start, is_dynamic = %s, unknown_type=%d.", - node_name_.c_str(), node_item_->is_dynamic ? "true" : "false", unknown_type_); - Status callback_ret = SUCCESS; - - // check need update shape, call update shape. - if (node_item_->is_dynamic && unknown_type_ == DEPEND_SHAPE_RANGE) { - // check result - callback_ret = UpdateOutputShapeFromExtInfo(context); - } else { - GELOGD("Node[%s] unknown shape type is %d no need update output shape.", - node_name_.c_str(), unknown_type_); - } - GELOGD("Node[%s] task callback end.", node_name_.c_str()); - return callback_ret; -} - Status AiCpuNodeExecutor::PrepareTask(NodeTask &task, TaskContext &context) const { // malloc HBM memory at Init, here just update them RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[AiCpuNodeExecutorPrepareTask] Start"); diff --git a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.h b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.h index 3911e090..304821aa 100644 --- a/ge/hybrid/node_executor/aicpu/aicpu_node_executor.h +++ b/ge/hybrid/node_executor/aicpu/aicpu_node_executor.h @@ -55,11 +55,33 @@ class AicpuNodeTaskBase : public NodeTask { virtual Status LaunchTask(TaskContext &context) = 0; - virtual Status TaskCallback(TaskContext &context) = 0; + virtual Status InitForDependComputeTask() = 0; + + Status TaskCallback(TaskContext &context); + + virtual Status UpdateShapeAndDataByResultSummary(TaskContext &context); virtual Status UpdateIoAddr(TaskContext &context) = 0; static Status AllocTensorBuffer(size_t size, std::unique_ptr &tensor_buffer); + + virtual Status CopyDataToHbm(TaskContext &context, + const std::vector> &out_shape_hbm) = 0; + + /// + /// read result summary and prepare copy task memory. + /// @param context task context + /// @param out_shape_hbm if scalar, TensorBuffer->data is null, size=0 + /// @return SUCCESS:success other:failed + /// + Status ReadResultSummaryAndPrepareMemory(TaskContext &context, + std::vector> &out_shape_hbm); + + Status UpdateShapeByHbmBuffer(TaskContext &context, + const std::vector> &out_shape_hbm); + + Status PrepareCopyInputs(const TaskContext &context, + const std::vector> &out_shape_hbm); Status DistributeWaitTaskForAicpuBlockingOp(rtStream_t stream); Status CheckDeviceSupportBlockingAicpuOpProcess(bool &is_support); @@ -83,6 +105,13 @@ class AicpuNodeTaskBase : public NodeTask { // ext info addr, device mem std::unique_ptr ext_info_addr_dev_; + std::vector> output_summary_; + std::vector output_summary_host_; + + std::unique_ptr copy_input_release_flag_dev_; + std::unique_ptr copy_input_data_size_dev_; + std::unique_ptr copy_input_src_dev_; + std::unique_ptr copy_input_dst_dev_; // for blocking aicpu op bool is_blocking_aicpu_op_ = false; rtEvent_t rt_event_ = nullptr; @@ -101,33 +130,14 @@ class AicpuTfNodeTask : public AicpuNodeTaskBase { Status LaunchTask(TaskContext &context) override; - Status TaskCallback(TaskContext &context) override; - Status UpdateIoAddr(TaskContext &context) override; - private: - Status SetMemCopyTask(const domi::TaskDef &task_def); + Status InitForDependComputeTask() override; - Status InitForDependComputeTask(); - - Status UpdateShapeAndDataByResultSummary(TaskContext &context); - - /// - /// read result summary and prepare copy task memory. - /// @param context task context - /// @param out_shape_hbm if scalar, TensorBuffer->data is null, size=0 - /// @return SUCCESS:success other:failed - /// - Status ReadResultSummaryAndPrepareMemory(TaskContext &context, - std::vector> &out_shape_hbm); Status CopyDataToHbm(TaskContext &context, - const std::vector> &out_shape_hbm); - - Status UpdateShapeByHbmBuffer(TaskContext &context, - const std::vector> &out_shape_hbm); - - Status PrepareCopyInputs(const TaskContext &context, - const std::vector> &out_shape_hbm); + const std::vector> &out_shape_hbm) override; + private: + Status SetMemCopyTask(const domi::TaskDef &task_def); static Status EnsureSessionCreated(uint64_t session_id); static uint64_t GetStepIdAddr(const HybridModel &model); @@ -142,16 +152,7 @@ class AicpuTfNodeTask : public AicpuNodeTaskBase { // just used for depend DEPEND_COMPUTE op std::unique_ptr copy_task_args_buf_; - - std::vector> output_summary_; - std::vector output_summary_host_; - std::unique_ptr copy_ioaddr_dev_; - - std::unique_ptr copy_input_release_flag_dev_; - std::unique_ptr copy_input_data_size_dev_; - std::unique_ptr copy_input_src_dev_; - std::unique_ptr copy_input_dst_dev_; bool need_sync_ = false; std::unique_ptr copy_workspace_buf_; @@ -170,14 +171,28 @@ class AicpuNodeTask : public AicpuNodeTaskBase { Status LaunchTask(TaskContext &context) override; - Status TaskCallback(TaskContext &context) override; + Status CopyDataToHbm(TaskContext &context, + const std::vector> &out_shape_hbm) override; Status UpdateIoAddr(TaskContext &context) override; + Status InitForDependComputeTask() override; +private: + Status SetMemCopyTask(const domi::TaskDef &task_def); + protected: // host mem std::unique_ptr args_; + // host memcpy mem + std::unique_ptr memcpy_args_; + + std::string memcpy_so_name_; + + std::string memcpy_kernel_name_; + // args size + uint32_t memcpy_args_size_ = 0; + std::vector copy_io_addr_; // args size uint32_t args_size_ = 0; }; diff --git a/ge/single_op/single_op_model.cc b/ge/single_op/single_op_model.cc index ca07d2ae..08778c09 100755 --- a/ge/single_op/single_op_model.cc +++ b/ge/single_op/single_op_model.cc @@ -333,7 +333,7 @@ Status SingleOpModel::BuildTaskList(StreamResource *stream_resource, SingleOp &s single_op.tasks_.emplace_back(tbe_task); } else if (kernel_type == ccKernelType::AI_CPU || kernel_type == ccKernelType::CUST_AI_CPU) { GELOGD("Building AICPU_CC task"); - OpTask *task = nullptr; + AiCpuCCTask *task = nullptr; uint64_t singleop_kernel_id = aicpu_kernel_id++; GELOGI("Build singleOp CCTask, kernel_id = %lu", singleop_kernel_id); GE_CHK_STATUS_RET_NOLOG(BuildCpuKernelTask(task_def.kernel(), &task, singleop_kernel_id)); @@ -489,7 +489,7 @@ Status SingleOpModel::BuildKernelExTask(const domi::KernelExDef &kernel_def, AiC return SUCCESS; } -Status SingleOpModel::BuildCpuKernelTask(const domi::KernelDef &kernel_def, OpTask **task, uint64_t kernel_id) { +Status SingleOpModel::BuildCpuKernelTask(const domi::KernelDef &kernel_def, AiCpuCCTask **task, uint64_t kernel_id) { const auto &context = kernel_def.context(); auto iter = op_list_.find(context.op_index()); if (iter == op_list_.end()) { @@ -611,10 +611,19 @@ Status SingleOpModel::BuildTaskListForDynamicOp(StreamResource *stream_resource, } else if (lib_name == kEngineNameAiCpu) { const auto &task_def = task_defs[0]; GELOGD("Building AICPU_CC task"); - OpTask *task = nullptr; + AiCpuCCTask *task = nullptr; uint64_t dynamic_singleop_kernel_id = aicpu_kernel_id++; GELOGI("Build dynamic singleOp CCTask, kernel_id = %lu", dynamic_singleop_kernel_id); GE_CHK_STATUS_RET_NOLOG(BuildCpuKernelTask(task_def.kernel(), &task, dynamic_singleop_kernel_id)); + if (task->GetUnknownType() == DEPEND_COMPUTE) { + if (task_defs.size() < kNumTaskWithMemCpyTask) { + GELOGE(ACL_ERROR_GE_PARAM_INVALID, "[Check][Task]The copy task of the fourth operator was not found."); + REPORT_INNER_ERROR("E19999", "The copy task of the fourth operator was not found."); + return ACL_ERROR_GE_PARAM_INVALID; + } + const TaskDef ©_task_def = task_defs[1]; + GE_CHK_STATUS_RET_NOLOG(task->SetMemCopyTask(copy_task_def.kernel())); + } task->SetModelArgs(model_name_, model_id_); single_op.op_task_.reset(task); } else if (lib_name == kEngineNameAiCpuTf) { diff --git a/ge/single_op/single_op_model.h b/ge/single_op/single_op_model.h index b1cd161c..22ee11b2 100755 --- a/ge/single_op/single_op_model.h +++ b/ge/single_op/single_op_model.h @@ -71,7 +71,7 @@ class SingleOpModel { Status BuildKernelTask(const domi::TaskDef &task_def, TbeOpTask **task); Status BuildAtomicTask(const domi::TaskDef &task_def, AtomicAddrCleanOpTask **task); Status BuildKernelExTask(const domi::KernelExDef &kernel_def, AiCpuTask **task, uint64_t kernel_id); - Status BuildCpuKernelTask(const domi::KernelDef &kernel_def, OpTask **task, uint64_t kernel_id); + Status BuildCpuKernelTask(const domi::KernelDef &kernel_def, AiCpuCCTask **task, uint64_t kernel_id); static void ParseOpModelParams(ModelHelper &model_helper, SingleOpModelParam ¶m); void ParseArgTable(OpTask *task, SingleOp &op); diff --git a/ge/single_op/task/aicpu_kernel_task_builder.cc b/ge/single_op/task/aicpu_kernel_task_builder.cc index 2f0856bf..3099d8b6 100755 --- a/ge/single_op/task/aicpu_kernel_task_builder.cc +++ b/ge/single_op/task/aicpu_kernel_task_builder.cc @@ -102,11 +102,8 @@ Status AiCpuCCTaskBuilder::BuildTask(AiCpuCCTask &task, uint64_t kernel_id, cons return ret; } GE_CHK_STATUS_RET(task.SetInputConst(), "[Set][InputConst] failed."); + GE_CHK_STATUS_RET(task.InitForSummaryAndCopy(), "[Init][SummaryAndCopy] failed."); - if (task.GetUnknownType() == DEPEND_COMPUTE) { - GELOGE(FAILED, "[Get][UnknownType] is depend compute, it's not supported now."); - return FAILED; - } auto aicpu_param_head = reinterpret_cast(task.args_.get()); if (task.ext_info_addr_dev_ != nullptr) { aicpu_param_head->extInfoLength = kernel_ext_info.size(); diff --git a/ge/single_op/task/op_task.cc b/ge/single_op/task/op_task.cc index 83cb0529..f41a59aa 100755 --- a/ge/single_op/task/op_task.cc +++ b/ge/single_op/task/op_task.cc @@ -567,6 +567,16 @@ AiCpuBaseTask::~AiCpuBaseTask() { if (rt_event_ != nullptr) { (void)rtEventDestroy(rt_event_); } + FreeHbm(copy_input_release_flag_dev_); + FreeHbm(copy_input_data_size_dev_); + FreeHbm(copy_input_src_dev_); + FreeHbm(copy_input_dst_dev_); + for (auto summary : output_summary_) { + FreeHbm(summary); + } + for (auto out_shape : out_shape_hbm_) { + FreeHbm(out_shape); + } } Status AiCpuBaseTask::UpdateEventIdForBlockingAicpuOp() { @@ -878,17 +888,7 @@ AiCpuTask::~AiCpuTask() { FreeHbm(workspace_addr_); FreeHbm(copy_workspace_buf_); FreeHbm(copy_ioaddr_dev_); - FreeHbm(copy_input_release_flag_dev_); - FreeHbm(copy_input_data_size_dev_); - FreeHbm(copy_input_src_dev_); - FreeHbm(copy_input_dst_dev_); FreeHbm(copy_task_args_buf_); - for (auto summary : output_summary_) { - FreeHbm(summary); - } - for (auto out_shape : out_shape_hbm_) { - FreeHbm(out_shape); - } } Status AiCpuTask::LaunchKernel(rtStream_t stream) { @@ -926,7 +926,7 @@ Status AiCpuTask::LaunchKernel(rtStream_t stream) { return SUCCESS; } -Status AiCpuTask::PrepareCopyInputs(vector &outputs) { +Status AiCpuBaseTask::PrepareCopyInputs(vector &outputs) { std::vector copy_input_release_flag; std::vector copy_input_data_size; std::vector copy_input_src; @@ -955,7 +955,6 @@ Status AiCpuTask::PrepareCopyInputs(vector &outputs) { } const size_t copy_input_buf_len = num_outputs_ * kCopyNum * sizeof(uint64_t); - GE_CHK_RT_RET(rtMemcpy(copy_input_release_flag_dev_, copy_input_buf_len, copy_input_release_flag.data(), copy_input_buf_len, RT_MEMCPY_HOST_TO_DEVICE)); GE_CHK_RT_RET(rtMemcpy(copy_input_data_size_dev_, copy_input_buf_len, @@ -967,7 +966,7 @@ Status AiCpuTask::PrepareCopyInputs(vector &outputs) { return SUCCESS; } -Status AiCpuTask::ReadResultSummaryAndPrepareMemory() { +Status AiCpuBaseTask::ReadResultSummaryAndPrepareMemory() { for (size_t i = 0; i < num_outputs_; ++i) { auto &result_summary = output_summary_host_[i]; @@ -984,6 +983,19 @@ Status AiCpuTask::ReadResultSummaryAndPrepareMemory() { return SUCCESS; } +Status AiCpuCCTask::CopyDataToHbm(vector &outputs, + rtStream_t stream) { + GE_CHK_STATUS_RET_NOLOG(PrepareCopyInputs(outputs)); + + auto ret = rtCpuKernelLaunchWithFlag(static_cast(memcpy_so_name_.data()), + static_cast(memcpy_kernel_name_.data()), + block_dim_, memcpy_args_.get(), memcpy_args_size_, + nullptr, stream, RT_KERNEL_DEFAULT); + GE_CHK_RT_RET(ret); + GE_CHK_RT_RET(rtStreamSynchronize(stream)); + return SUCCESS; +} + Status AiCpuTask::CopyDataToHbm(vector &outputs, rtStream_t stream) { GE_CHK_STATUS_RET_NOLOG(PrepareCopyInputs(outputs)); @@ -994,7 +1006,7 @@ Status AiCpuTask::CopyDataToHbm(vector &outputs, return SUCCESS; } -Status AiCpuTask::UpdateShapeByHbmBuffer(vector &output_desc) { +Status AiCpuBaseTask::UpdateShapeByHbmBuffer(vector &output_desc) { for (size_t i = 0; i < num_outputs_; ++i) { const auto &result_summary = output_summary_host_[i]; std::vector shape_dims; @@ -1023,9 +1035,9 @@ Status AiCpuTask::UpdateShapeByHbmBuffer(vector &output_desc) { } -Status AiCpuTask::UpdateShapeAndDataByResultSummary(vector &output_desc, - vector &outputs, - rtStream_t stream) { +Status AiCpuBaseTask::UpdateShapeAndDataByResultSummary(vector &output_desc, + vector &outputs, + rtStream_t stream) { if (num_outputs_ == 0) { GELOGI("Output num is 0, there is no need to update the output and size."); return SUCCESS; @@ -1151,6 +1163,119 @@ Status AiCpuTask::LaunchKernel(const std::vector &input_desc, return SUCCESS; } +Status AiCpuCCTask::LaunchKernel(const std::vector &input_desc, + const std::vector &input_buffers, + std::vector &output_desc, + std::vector &output_buffers, + rtStream_t stream) { + GE_CHK_STATUS_RET_NOLOG(UpdateExtInfo(input_desc, output_desc, stream)); + if (unknown_type_ == DEPEND_COMPUTE) { + std::vector summary_buffers; + for (size_t i = 0; i < num_outputs_; ++i) { + summary_buffers.emplace_back(output_summary_[i], sizeof(aicpu::FWKAdapter::ResultSummary), false); + } + GE_CHK_STATUS_RET_NOLOG(UpdateIoAddr(input_buffers, summary_buffers)); + } else { + GE_CHK_STATUS_RET_NOLOG(UpdateIoAddr(input_buffers, output_buffers)); + } + + GE_CHK_STATUS_RET_NOLOG(LaunchKernel(stream)); + if (unknown_type_ == DEPEND_SHAPE_RANGE) { + GE_CHK_RT_RET(rtStreamSynchronize(stream)); + GE_CHK_STATUS_RET_NOLOG(UpdateOutputShape(output_desc)); + } else if (unknown_type_ == DEPEND_COMPUTE) { + GE_CHK_RT_RET(rtStreamSynchronize(stream)); + GE_CHK_STATUS_RET_NOLOG(UpdateShapeAndDataByResultSummary(output_desc, output_buffers, stream)); + } + + return SUCCESS; +} + +Status AiCpuCCTask::InitForSummaryAndCopy() { + if (unknown_type_ != DEPEND_COMPUTE || num_outputs_ == 0) { + GELOGI("Unknown_type is %d, output num is %zu.", unknown_type_, num_outputs_); + return SUCCESS; + } + + output_summary_.resize(num_outputs_); + constexpr auto result_summary_size = sizeof(aicpu::FWKAdapter::ResultSummary); + for (size_t i = 0; i < num_outputs_; ++i) { + GE_CHK_RT_RET(rtMalloc(&output_summary_[i], result_summary_size, RT_MEMORY_HBM)); + } + output_summary_host_.resize(num_outputs_); + + const size_t copy_input_buf_len = num_outputs_ * kCopyNum * sizeof(uint64_t); + + GE_CHK_RT_RET(rtMalloc(©_input_release_flag_dev_, copy_input_buf_len, RT_MEMORY_HBM)); + GE_CHK_RT_RET(rtMalloc(©_input_data_size_dev_, copy_input_buf_len, RT_MEMORY_HBM)); + GE_CHK_RT_RET(rtMalloc(©_input_src_dev_, copy_input_buf_len, RT_MEMORY_HBM)); + GE_CHK_RT_RET(rtMalloc(©_input_dst_dev_, copy_input_buf_len, RT_MEMORY_HBM)); + + copy_io_addr_.emplace_back(reinterpret_cast(copy_input_release_flag_dev_)); + copy_io_addr_.emplace_back(reinterpret_cast(copy_input_data_size_dev_)); + copy_io_addr_.emplace_back(reinterpret_cast(copy_input_src_dev_)); + copy_io_addr_.emplace_back(reinterpret_cast(copy_input_dst_dev_)); + return SUCCESS; +} + +Status AiCpuCCTask::SetMemCopyTask(const domi::KernelDef &kernel_def) { + auto &memcpy_args = kernel_def.args(); + memcpy_args_size_ = kernel_def.args_size(); + memcpy_so_name_ = kernel_def.so_name(); + memcpy_kernel_name_ = kernel_def.kernel_name(); + if (memcpy_args.size() != memcpy_args_size_) { + REPORT_INNER_ERROR("E19999", "MemCopy task def args.size=%zu, but args_size=%u not equal.", + memcpy_args.size(), memcpy_args_size_); + GELOGE(FAILED, "[Check][Size]MemCopy task def args.size=%zu, but args_size=%u not equal.", + memcpy_args.size(), memcpy_args_size_); + return FAILED; + } + if (memcpy_args_size_ < sizeof(aicpu::AicpuParamHead)) { + REPORT_INNER_ERROR("E19999", + "Task def args_size=%u is less than aicpu param head len=%zu.", + memcpy_args_size_, sizeof(aicpu::AicpuParamHead)); + GELOGE(FAILED, + "[Check][Size] Task def args_size=%u is less than aicpu param head len=%zu.", + memcpy_args_size_, sizeof(aicpu::AicpuParamHead)); + return FAILED; + } + + memcpy_args_.reset(new(std::nothrow) uint8_t[memcpy_args_size_]()); + if (memcpy_args_ == nullptr) { + REPORT_INNER_ERROR("E19999", "new memory failed for Node[MemCopy], task_size[%u].", + memcpy_args_size_); + GELOGE(FAILED, "[Malloc][Memory] failed for Node[MemCopy], task_size[%u].", + memcpy_args_size_); + return FAILED; + } + + errno_t sec_ret = memcpy_s(memcpy_args_.get(), memcpy_args_size_, memcpy_args.c_str(), memcpy_args.size()); + if (sec_ret != EOK) { + REPORT_INNER_ERROR("E19999", + "memcpy_s argc_ failed for Node[MemCopy], ret: %d", sec_ret); + GELOGE(INTERNAL_ERROR, + "[Update][args] failed for Node[MemCopy], ret: %d", sec_ret); + return sec_ret; + } + auto memcpy_param_head = reinterpret_cast(memcpy_args_.get()); + uint32_t memcpy_io_num = memcpy_param_head->ioAddrNum; + auto memcpy_io_addr = memcpy_args_.get() + sizeof(aicpu::AicpuParamHead); + // if has input and output, need copy to ioaddr + int cpy_ret = memcpy_s(memcpy_io_addr, memcpy_args_size_ - sizeof(aicpu::AicpuParamHead), + ©_io_addr_[0], sizeof(uint64_t) * memcpy_io_num); + if (cpy_ret != 0) { + REPORT_INNER_ERROR("E19999", "Node[Memcpoy] memcpy io addr to AicpuParamHead failed," + "ret=%d, args_size=%u, io nums=%u.", + cpy_ret, memcpy_args_size_, memcpy_io_num); + GELOGE(INTERNAL_ERROR, "[Update][io_addr]Node[MemCopy] memcpy io addr to AicpuParamHead failed," + "ret=%d, args_size=%u, io nums=%u.", + cpy_ret, memcpy_args_size_, memcpy_io_num); + return INTERNAL_ERROR; + } + GELOGD("Set memcpy task for node[MemCopy] successfully."); + return SUCCESS; +} + Status AiCpuBaseTask::UpdateArgTable(const SingleOpModelParam ¶m) { // aicpu do not have workspace, for now return DoUpdateArgTable(param, false); @@ -1209,22 +1334,6 @@ Status AiCpuCCTask::LaunchKernel(rtStream_t stream) { return SUCCESS; } -Status AiCpuCCTask::LaunchKernel(const std::vector &input_desc, - const std::vector &input_buffers, - std::vector &output_desc, - std::vector &output_buffers, - rtStream_t stream) { - GE_CHK_STATUS_RET_NOLOG(UpdateExtInfo(input_desc, output_desc, stream)); - GE_CHK_STATUS_RET_NOLOG(UpdateIoAddr(input_buffers, output_buffers)); - GE_CHK_STATUS_RET_NOLOG(LaunchKernel(stream)); - if (unknown_type_ == DEPEND_SHAPE_RANGE) { - GE_CHK_RT_RET(rtStreamSynchronize(stream)); - GE_CHK_STATUS_RET_NOLOG(UpdateOutputShape(output_desc)); - } - - return SUCCESS; -} - void AiCpuCCTask::GetIoAddr(uintptr_t *&arg_base, size_t &arg_count) { arg_base = io_addr_; arg_count = io_addr_num_; diff --git a/ge/single_op/task/op_task.h b/ge/single_op/task/op_task.h index adf51dba..836b2046 100644 --- a/ge/single_op/task/op_task.h +++ b/ge/single_op/task/op_task.h @@ -77,12 +77,12 @@ class OpTask { class TbeOpTask : public OpTask { public: ~TbeOpTask() override; - Status LaunchKernel(rtStream_t stream) override; Status LaunchKernel(const std::vector &input_desc, const std::vector &input_buffers, std::vector &output_desc, std::vector &output_buffers, rtStream_t stream) override; + Status LaunchKernel(rtStream_t stream) override; void GetIoAddr(uintptr_t *&arg_base, size_t &arg_count) override; void SetSmDesc(void *sm_desc); void SetStubFunc(const std::string &name, const void *stub_func); @@ -167,7 +167,6 @@ class AiCpuBaseTask : public OpTask { UnknowShapeOpType GetUnknownType() const { return unknown_type_; } Status UpdateArgTable(const SingleOpModelParam ¶m) override; const std::string &GetTaskType() const override; - protected: Status UpdateIoAddr(const std::vector &inputs, const std::vector &outputs); Status SetInputConst(); @@ -178,6 +177,16 @@ class AiCpuBaseTask : public OpTask { rtStream_t stream); Status UpdateOutputShape(vector &output_desc); Status UpdateShapeToOutputDesc(const GeShape &shape_new, GeTensorDesc &output_desc); + Status UpdateShapeAndDataByResultSummary(vector &output_desc, + vector &outputs, + rtStream_t stream); + Status ReadResultSummaryAndPrepareMemory(); + + Status PrepareCopyInputs(vector &outputs); + + Status UpdateShapeByHbmBuffer(vector &output_desc); + + virtual Status CopyDataToHbm(vector &outputs, rtStream_t stream) = 0; // for blocking aicpu op Status DistributeWaitTaskForAicpuBlockingOp(rtStream_t stream); Status UpdateEventIdForBlockingAicpuOp(); @@ -193,6 +202,15 @@ class AiCpuBaseTask : public OpTask { // for blocking aicpu op bool is_blocking_aicpu_op_ = false; rtEvent_t rt_event_ = nullptr; + std::vector output_summary_; + std::vector output_summary_host_; + + void *copy_input_release_flag_dev_ = nullptr; + void *copy_input_data_size_dev_ = nullptr; + void *copy_input_src_dev_ = nullptr; + void *copy_input_dst_dev_ = nullptr; + + vector out_shape_hbm_; }; class AiCpuTask : public AiCpuBaseTask { @@ -202,7 +220,6 @@ class AiCpuTask : public AiCpuBaseTask { Status LaunchKernel(rtStream_t stream) override; void GetIoAddr(uintptr_t *&arg_base, size_t &arg_count) override; - Status LaunchKernel(const std::vector &input_desc, const std::vector &input_buffers, std::vector &output_desc, @@ -213,15 +230,7 @@ class AiCpuTask : public AiCpuBaseTask { private: // for copy task. Status InitForSummaryAndCopy(); - Status UpdateShapeAndDataByResultSummary(vector &output_desc, - vector &outputs, - rtStream_t stream); - Status ReadResultSummaryAndPrepareMemory(); - - Status CopyDataToHbm(vector &outputs, rtStream_t stream); - Status PrepareCopyInputs(vector &outputs); - - Status UpdateShapeByHbmBuffer(vector &output_desc); + Status CopyDataToHbm(vector &outputs, rtStream_t stream) override; friend class AiCpuTaskBuilder; void *workspace_addr_ = nullptr; @@ -241,17 +250,8 @@ class AiCpuTask : public AiCpuBaseTask { void *copy_task_args_buf_ = nullptr; void *copy_workspace_buf_ = nullptr; - std::vector output_summary_; - std::vector output_summary_host_; - void *copy_ioaddr_dev_ = nullptr; - void *copy_input_release_flag_dev_ = nullptr; - void *copy_input_data_size_dev_ = nullptr; - void *copy_input_src_dev_ = nullptr; - void *copy_input_dst_dev_ = nullptr; - - vector out_shape_hbm_; uint64_t kernel_id_ = 0; }; @@ -261,8 +261,13 @@ class AiCpuCCTask : public AiCpuBaseTask { ~AiCpuCCTask() override; AiCpuCCTask(const AiCpuCCTask &) = delete; AiCpuCCTask &operator=(const AiCpuCCTask &) = delete; - + Status SetMemCopyTask(const domi::KernelDef &kernel_def); Status LaunchKernel(rtStream_t stream) override; + Status LaunchKernel(const std::vector &input_desc, + const std::vector &input_buffers, + std::vector &output_desc, + std::vector &output_buffers, + rtStream_t stream) override; void GetIoAddr(uintptr_t *&arg_base, size_t &arg_count) override; const void *GetArgs() const; void SetKernelArgs(std::unique_ptr args, size_t arg_size); @@ -270,13 +275,9 @@ class AiCpuCCTask : public AiCpuBaseTask { void SetkernelName(const std::string &kernel_Name); void SetIoAddr(uintptr_t *io_addr); size_t GetArgSize() const; - - Status LaunchKernel(const std::vector &input_desc, - const std::vector &input_buffers, - std::vector &output_desc, - std::vector &output_buffers, - rtStream_t stream) override; - + private: + Status InitForSummaryAndCopy(); + Status CopyDataToHbm(vector &outputs, rtStream_t stream) override; private: friend class AiCpuCCTaskBuilder; std::string so_name_; @@ -290,6 +291,13 @@ private: uint32_t dump_flag_ = RT_KERNEL_DEFAULT; std::string op_type_; uint64_t kernel_id_ = 0; + // host memcpy mem + std::unique_ptr memcpy_args_; + std::string memcpy_so_name_; + std::string memcpy_kernel_name_; + std::vector copy_io_addr_; + // args size + uint32_t memcpy_args_size_ = 0; }; class MemcpyAsyncTask : public OpTask { diff --git a/tests/ut/ge/hybrid/node_executor/aicpu/aicpu_node_executor_unittest.cc b/tests/ut/ge/hybrid/node_executor/aicpu/aicpu_node_executor_unittest.cc index 034b3f47..2bf5b8ac 100644 --- a/tests/ut/ge/hybrid/node_executor/aicpu/aicpu_node_executor_unittest.cc +++ b/tests/ut/ge/hybrid/node_executor/aicpu/aicpu_node_executor_unittest.cc @@ -152,20 +152,147 @@ TEST_F(UtestAicpuNodeExecutor, aicpu_tf_node_task) { domi::TaskDef task_def2; task_def2.set_type(RT_MODEL_TASK_ALL_KERNEL); - task_def2.mutable_kernel()->set_args(reinterpret_cast(&args), args.head.length); - task_def2.mutable_kernel()->set_args_size(args.head.length); + domi::KernelDef *kernel_def = task_def2.mutable_kernel(); + kernel_def->set_args(reinterpret_cast(&args), args.head.length); + kernel_def->set_args_size(args.head.length); + AicpuExtInfo aicpu_ext_info2; + aicpu_ext_info2.infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_SHAPE_TYPE; + aicpu_ext_info2.infoLen = sizeof(int32_t); + memcpy_s(aicpu_ext_info2.infoMsg, sizeof(int32_t), &type, sizeof(int32_t)); + char *ext_mem2 = (char*)malloc(sizeof(AicpuExtInfo) + sizeof(int32_t)); + memcpy_s(ext_mem2, sizeof(AicpuExtInfo) + sizeof(int32_t), &aicpu_ext_info2, sizeof(AicpuExtInfo) + sizeof(int32_t)); + kernel_def->set_kernel_ext_info(ext_mem2, sizeof(AicpuExtInfo) + sizeof(int32_t)); + kernel_def->set_kernel_ext_info_size(sizeof(AicpuExtInfo) + sizeof(int32_t)); + hybrid_model.task_defs_[node] = std::vector({task_def2, task_def2}); + + AicpuNodeTask aicpu_node_task(node_item, task_def2); + ASSERT_EQ(aicpu_node_task.Init(hybrid_model), SUCCESS); + ASSERT_EQ(aicpu_node_task.UpdateIoAddr(*node_state->GetTaskContext()), SUCCESS); + ASSERT_EQ(aicpu_node_task.LaunchTask(*node_state->GetTaskContext()), SUCCESS); + node_item->is_dynamic = false; + ASSERT_EQ(aicpu_node_task.UpdateIoAddr(*node_state->GetTaskContext()), SUCCESS); + //kernel_ex_def->set_allocated_kernel_ext_info(nullptr); + free(ext_mem); + free(ext_mem2); +} - hybrid_model.task_defs_[node] = std::vector({task_def2}); +TEST_F(UtestAicpuNodeExecutor, aicpu_memcopy_task) { + ComputeGraphPtr graph = std::make_shared("test"); + GeModelPtr ge_sub_model = std::make_shared(); + GeRootModelPtr ge_root_model = std::make_shared(graph); + ge_root_model->SetModelName("test_name"); + ge_root_model->SetSubgraphInstanceNameToModel("sub", ge_sub_model); + HybridModel hybrid_model(ge_root_model); + NodePtr node = CreateNode(graph, "frameworkop", FRAMEWORK_OP_TYPE, 4, 2); + std::unique_ptr new_node; + ASSERT_EQ(NodeItem::Create(node, new_node), SUCCESS); + NodeItem *node_item = new_node.get(); + AicpuTaskStruct args; + args.head.length = sizeof(args); + args.head.ioAddrNum = 6; + domi::TaskDef task_def; + task_def.set_type(RT_MODEL_TASK_ALL_KERNEL); + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + kernel_def->set_args(reinterpret_cast(&args), args.head.length); + kernel_def->set_args_size(args.head.length); + node_item->num_outputs = 0; AicpuNodeTask aicpu_node_task(node_item, task_def); - ASSERT_EQ(aicpu_node_task.Init(hybrid_model), FAILED); - ASSERT_EQ(aicpu_node_task.LaunchTask(*node_state->GetTaskContext()), SUCCESS); + ASSERT_EQ(aicpu_node_task.SetMemCopyTask(task_def), SUCCESS); + node_item->num_outputs = 1; + AicpuNodeTask aicpu_node_task2(node_item, task_def); + ASSERT_EQ(aicpu_node_task2.SetMemCopyTask(task_def), INTERNAL_ERROR); + kernel_def->set_args_size(0); + ASSERT_EQ(aicpu_node_task2.SetMemCopyTask(task_def), FAILED); + char* args2 = "123"; + kernel_def->set_args(reinterpret_cast(&args2), 3); + kernel_def->set_args_size(3); + ASSERT_EQ(aicpu_node_task2.SetMemCopyTask(task_def), FAILED); +} +TEST_F(UtestAicpuNodeExecutor, aicpu_copy_data_to_hbm) { + ComputeGraphPtr graph = std::make_shared("test"); + GeModelPtr ge_sub_model = std::make_shared(); + GeRootModelPtr ge_root_model = std::make_shared(graph); + ge_root_model->SetModelName("test_name"); + ge_root_model->SetSubgraphInstanceNameToModel("sub", ge_sub_model); + HybridModel hybrid_model(ge_root_model); - //kernel_ex_def->set_allocated_kernel_ext_info(nullptr); + NodePtr node = CreateNode(graph, "frameworkop", FRAMEWORK_OP_TYPE, 4, 2); - free(ext_mem); + std::unique_ptr new_node; + ASSERT_EQ(NodeItem::Create(node, new_node), SUCCESS); + NodeItem *node_item = new_node.get(); + hybrid_model.node_items_[node] = std::move(new_node); + node_item->input_start = 0; + node_item->output_start = 0; + node_item->is_dynamic = true; + node_item->shape_inference_type = DEPEND_COMPUTE; + node_item->num_outputs = 2; + GraphItem graph_item; + graph_item.node_items_.emplace_back(node_item); + graph_item.total_inputs_ = 4; + graph_item.total_outputs_ = 2; + GraphExecutionContext graph_context; + SubgraphContext subgraph_context(&graph_item, &graph_context); + ASSERT_EQ(subgraph_context.Init(), SUCCESS); + graph_context.callback_manager = std::unique_ptr(new CallbackManager()); + + auto node_state = subgraph_context.GetOrCreateNodeState(node_item); + ASSERT_NE(node_state, nullptr); + + for (int i=0; i<4; ++i) { + uint64_t value_0 = 512; + TensorValue in_tensor0(&value_0, sizeof(value_0)); + subgraph_context.SetInput(*node_item, 0, in_tensor0); + } + + uint64_t value_0 = 512; + TensorValue out_tensor0(&value_0, sizeof(value_0)); + subgraph_context.SetOutput(*node_item, 0, out_tensor0); + + uint64_t value_1 = 512; + TensorValue out_tensor1(&value_1, sizeof(value_1)); + subgraph_context.SetOutput(*node_item, 1, out_tensor1); + + // task + domi::TaskDef task_def; + AicpuTaskStruct args; + args.head.length = sizeof(args); + args.head.ioAddrNum = 6; + task_def.set_type(RT_MODEL_TASK_ALL_KERNEL); + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + kernel_def->set_args(reinterpret_cast(&args), args.head.length); + kernel_def->set_args_size(args.head.length); + AicpuExtInfo aicpu_ext_info; + aicpu_ext_info.infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_SHAPE_TYPE; + aicpu_ext_info.infoLen = sizeof(int32_t); + int32_t type = node_item->shape_inference_type; + memcpy_s(aicpu_ext_info.infoMsg, sizeof(int32_t), &type, sizeof(int32_t)); + char *ext_mem = (char*)malloc(sizeof(AicpuExtInfo) + sizeof(int32_t)); + memcpy_s(ext_mem, sizeof(AicpuExtInfo) + sizeof(int32_t), &aicpu_ext_info, sizeof(AicpuExtInfo) + sizeof(int32_t)); + kernel_def->set_kernel_ext_info(ext_mem, sizeof(AicpuExtInfo) + sizeof(int32_t)); + kernel_def->set_kernel_ext_info_size(sizeof(AicpuExtInfo) + sizeof(int32_t)); + hybrid_model.task_defs_[node] = std::vector({task_def, task_def}); + + AicpuNodeTask aicpu_node_task(node_item, task_def); + std::vector> out_shape_hbm; + ASSERT_EQ(aicpu_node_task.Init(hybrid_model), SUCCESS); + for (int i = 0; i < node_item->num_outputs; i++) { + auto &summary = aicpu_node_task.output_summary_host_[i]; + summary.shape_data_ptr = 0; + summary.shape_data_size = 1; + summary.raw_data_ptr = 0; + summary.raw_data_size = 1; + } + for (int i = 0; i < node_item->num_outputs; i++) { + std::unique_ptr shape_buffer; + AicpuNodeTask::AllocTensorBuffer(1, shape_buffer); + out_shape_hbm.emplace_back(std::move(shape_buffer)); + } + ASSERT_EQ(aicpu_node_task.CopyDataToHbm(*node_state->GetTaskContext(), out_shape_hbm), SUCCESS); + free(ext_mem); } TEST_F(UtestAicpuNodeExecutor, aicpu_blocking_node_task) { @@ -231,7 +358,7 @@ TEST_F(UtestAicpuNodeExecutor, aicpu_blocking_node_task) { kernel_def.set_args_size(args.head.length); domi::KernelDef *kernel_def_tmp = task_def.mutable_kernel(); *kernel_def_tmp = kernel_def; - + hybrid_model.task_defs_[node] = std::vector({task_def}); AicpuNodeTask aicpu_node_task(node_item, task_def); ASSERT_EQ(aicpu_node_task.Init(hybrid_model), SUCCESS); ASSERT_EQ(aicpu_node_task.LaunchTask(*node_state->GetTaskContext()), SUCCESS); @@ -314,7 +441,7 @@ TEST_F(UtestAicpuNodeExecutor, aicpu_blocking_node_task_fail) { kernel_def.set_args_size(args.head.length); domi::KernelDef *kernel_def_tmp = task_def.mutable_kernel(); *kernel_def_tmp = kernel_def; - + hybrid_model.task_defs_[node] = std::vector({task_def}); AicpuNodeTask aicpu_node_task(node_item, task_def); RTS_STUB_RETURN_VALUE(rtGetDevice, rtError_t, 0x78000001); diff --git a/tests/ut/ge/single_op/single_op_model_unittest.cc b/tests/ut/ge/single_op/single_op_model_unittest.cc index 7b7a05d8..c3439aac 100644 --- a/tests/ut/ge/single_op/single_op_model_unittest.cc +++ b/tests/ut/ge/single_op/single_op_model_unittest.cc @@ -23,6 +23,7 @@ #include "graph/utils/graph_utils.h" #include "runtime/rt.h" #include "single_op/single_op_model.h" +#include "aicpu/common/aicpu_task_struct.h" #include "single_op/task/tbe_task_builder.h" #include "single_op/task/rts_kernel_task_builder.h" #include "single_op/task/op_task.h" @@ -43,6 +44,10 @@ constexpr char const *kAttrSupportDynamicShape = "support_dynamicshape"; const char *const kEngineNameAiCore = "AIcoreEngine"; const char *const kEngineNameAiCpu = "aicpu_ascend_kernel"; const char *const kEngineNameAiCpuTf = "aicpu_tf_kernel"; +struct AicpuTaskStruct { + aicpu::AicpuParamHead head; + uint64_t io_addrp[6]; +}__attribute__((packed)); } // namespace class UtestSingleOpModel : public testing::Test { @@ -315,7 +320,7 @@ TEST_F(UtestSingleOpModel, BuildTaskList) { ASSERT_EQ(mem_task.LaunchKernel(0), SUCCESS); } -TEST_F(UtestSingleOpModel, build_dynamic_task) { +TEST_F(UtestSingleOpModel, build_dynamic_task01) { ComputeGraphPtr graph = make_shared("single_op"); GeModelPtr ge_model = make_shared(); ge_model->SetGraph(GraphUtils::CreateGraphFromComputeGraph(graph)); @@ -366,3 +371,68 @@ TEST_F(UtestSingleOpModel, build_dynamic_task) { op_desc->SetOpKernelLibName(kEngineNameAiCpu); model.BuildTaskListForDynamicOp(res, single_op); } +TEST_F(UtestSingleOpModel, build_dynamic_task02) { + ComputeGraphPtr graph = make_shared("single_op"); + GeModelPtr ge_model = make_shared(); + ge_model->SetGraph(GraphUtils::CreateGraphFromComputeGraph(graph)); + shared_ptr model_task_def = make_shared(); + ge_model->SetModelTaskDef(model_task_def); + + AicpuTaskStruct args; + args.head.length = sizeof(args); + args.head.ioAddrNum = 6; + domi::TaskDef *task_def = model_task_def->add_task(); + task_def->set_type(RT_MODEL_TASK_KERNEL); + domi::KernelDef *kernel_def = task_def->mutable_kernel(); + kernel_def->set_args(reinterpret_cast(&args), args.head.length); + kernel_def->set_args_size(args.head.length); + ge::hybrid::AicpuExtInfo aicpu_ext_info; + aicpu_ext_info.infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_SHAPE_TYPE; + aicpu_ext_info.infoLen = sizeof(int32_t); + int32_t type = ge::DEPEND_COMPUTE; + memcpy_s(aicpu_ext_info.infoMsg, sizeof(int32_t), &type, sizeof(int32_t)); + char *ext_mem = (char*)malloc(sizeof(ge::hybrid::AicpuExtInfo) + sizeof(int32_t)); + memcpy_s(ext_mem, sizeof(ge::hybrid::AicpuExtInfo) + sizeof(int32_t), &aicpu_ext_info, + sizeof(ge::hybrid::AicpuExtInfo) + sizeof(int32_t)); + kernel_def->set_kernel_ext_info(ext_mem, sizeof(ge::hybrid::AicpuExtInfo) + sizeof(int32_t)); + kernel_def->set_kernel_ext_info_size(sizeof(ge::hybrid::AicpuExtInfo) + sizeof(int32_t)); + domi::KernelContext *context = kernel_def->mutable_context(); + context->set_kernel_type(6); // ccKernelType::AI_CPU + + string model_data_str = "dynamic_model"; + SingleOpModel model("model", model_data_str.c_str(), model_data_str.size()); + std::mutex stream_mu; + rtStream_t stream = nullptr; + rtStreamCreate(&stream, 0); + DynamicSingleOp single_op(0, &stream_mu, stream); + model.model_helper_.model_ = ge_model; + auto op_desc = std::make_shared("add", "Add"); + AttrUtils::SetInt(op_desc, ::ge::ATTR_NAME_UNKNOWN_SHAPE_TYPE, ge::DEPEND_COMPUTE); + NodePtr node = graph->AddNode(op_desc); + model.op_list_[0] = node; + StreamResource *res = new (std::nothrow) StreamResource(1); + + ASSERT_EQ(model.ParseTasks(), SUCCESS); + model.node_tasks_[node] = { *task_def, *task_def }; + op_desc->SetOpKernelLibName(kEngineNameAiCpu); + model.BuildTaskListForDynamicOp(res, single_op); + model.node_tasks_[node] = { *task_def}; + model.BuildTaskListForDynamicOp(res, single_op); +} + +TEST_F(UtestSingleOpModel, build_memcpoy_task) { + AicpuTaskStruct args; + args.head.length = sizeof(args); + args.head.ioAddrNum = 6; + domi::KernelDef kernel_def; + kernel_def.set_args(reinterpret_cast(&args), args.head.length); + kernel_def.set_args_size(args.head.length); + AiCpuCCTask aicpu_task; + ASSERT_EQ(aicpu_task.SetMemCopyTask(kernel_def), INTERNAL_ERROR); + kernel_def.set_args_size(0); + ASSERT_EQ(aicpu_task.SetMemCopyTask(kernel_def), FAILED); + char* args2 = "123"; + kernel_def.set_args(reinterpret_cast(&args2), 3); + kernel_def.set_args_size(3); + ASSERT_EQ(aicpu_task.SetMemCopyTask(kernel_def), FAILED); +} \ No newline at end of file diff --git a/tests/ut/ge/single_op/single_op_task_unittest.cc b/tests/ut/ge/single_op/single_op_task_unittest.cc index 52091856..609bff65 100644 --- a/tests/ut/ge/single_op/single_op_task_unittest.cc +++ b/tests/ut/ge/single_op/single_op_task_unittest.cc @@ -16,7 +16,7 @@ #include #include - +#include #include "graph/load/model_manager/model_utils.h" #include "graph/utils/graph_utils.h" #include "hybrid/node_executor/aicpu/aicpu_ext_info.h" @@ -25,6 +25,7 @@ #define protected public #define private public #include "single_op/single_op_model.h" +#include "aicpu/common/aicpu_task_struct.h" #include "single_op/task/tbe_task_builder.h" #include "single_op/task/op_task.h" #include "single_op/task/tbe_task_builder.h" @@ -38,6 +39,13 @@ using namespace testing; using namespace ge; using namespace optiling; +namespace { + struct AicpuTaskStruct { + aicpu::AicpuParamHead head; + uint64_t io_addrp[3]; +}__attribute__((packed)); +} // namespace + class UtestSingleOpTask : public testing::Test { protected: void SetUp() { @@ -196,6 +204,45 @@ TEST_F(UtestSingleOpTask, test_atomic_exec) { task.CalcTilingInfo(run_info); } +TEST_F(UtestSingleOpTask, test_aicpu_task_launch_kernel) { + AiCpuCCTask task; + rtStream_t stream; + ASSERT_EQ(rtStreamCreate(&stream, 0), RT_ERROR_NONE); + task.num_inputs_ = 2; + task.num_outputs_ = 1; + task.input_is_const_ = {true, false}; + int total_addr = 3; + uint32_t* addrs[total_addr] = {nullptr, nullptr, nullptr}; + task.io_addr_ = reinterpret_cast(addrs); + task.io_addr_num_ = total_addr; + vector outputs(1, DataBuffer()); + outputs[0].data = 0; + task.unknown_type_ = ge::DEPEND_COMPUTE; + ASSERT_EQ(task.InitForSummaryAndCopy(), SUCCESS); + auto &summary = task.output_summary_host_[0]; + summary.shape_data_ptr = 0; + summary.shape_data_size = 1; + summary.raw_data_ptr = 0; + summary.raw_data_size = 1; + void *shape_buffer = nullptr; + rtMalloc(&shape_buffer, 1, RT_MEMORY_HBM); + task.out_shape_hbm_.emplace_back(shape_buffer); + task.memcpy_so_name_ = "libcpu_kernel.so"; + task.memcpy_kernel_name_ = "RunCpuKernel"; + AicpuTaskStruct args; + args.head.length = sizeof(args); + args.head.ioAddrNum = 3; + domi::TaskDef task_def; + domi::KernelDef *kernel_def = task_def.mutable_kernel(); + kernel_def->set_args(reinterpret_cast(&args), args.head.length); + kernel_def->set_args_size(args.head.length); + auto &memcpy_args = kernel_def->args(); + task.memcpy_args_size_ = kernel_def->args_size(); + task.memcpy_args_.reset(new(std::nothrow) uint8_t[task.memcpy_args_size_]()); + memcpy_s(task.memcpy_args_.get(), task.memcpy_args_size_, memcpy_args.c_str(), memcpy_args.size()); + ASSERT_EQ(task.CopyDataToHbm(outputs, stream), SUCCESS); +} + TEST_F(UtestSingleOpTask, test_aicpu_task_update_io_addr) { AiCpuCCTask task; task.num_inputs_ = 2;