Browse Source

fix ut

pull/2046/head
guopeian 3 years ago
parent
commit
99a51046dc
9 changed files with 242 additions and 93 deletions
  1. +40
    -36
      ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc
  2. +2
    -1
      ge/hybrid/node_executor/aicpu/aicpu_node_executor.h
  3. +3
    -1
      ge/single_op/single_op.cc
  4. +72
    -40
      ge/single_op/task/op_task.cc
  5. +3
    -2
      ge/single_op/task/op_task.h
  6. +1
    -1
      ge/single_op/task/tbe_task_builder.cc
  7. +21
    -11
      tests/ut/ge/hybrid/node_executor/aicpu/aicpu_node_executor_unittest.cc
  8. +54
    -1
      tests/ut/ge/single_op/single_op_model_unittest.cc
  9. +46
    -0
      tests/ut/ge/single_op/single_op_task_unittest.cc

+ 40
- 36
ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc View File

@@ -477,12 +477,11 @@ Status AicpuNodeTask::CopyDataToHbm(TaskContext &context,
GE_CHK_STATUS_RET_NOLOG(PrepareCopyInputs(context, out_shape_hbm));

RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[LaunchCopy] Start");
uint32_t flag = RT_KERNEL_DEFAULT;
auto rt_ret = rtCpuKernelLaunchWithFlag(reinterpret_cast<const void *>(memcpy_so_name_.c_str()),
reinterpret_cast<const void *>(memcpy_kernel_name_.c_str()),
1, // default core dim is 1
memcpy_args_.get(), memcpy_args_size_,
nullptr, context.GetStream(), flag);
nullptr, context.GetStream(), RT_KERNEL_DEFAULT);
GE_CHK_RT_RET(rt_ret);

RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[LaunchCopy] End");
@@ -704,50 +703,55 @@ Status AicpuNodeTask::SetMemCopyTask(const domi::TaskDef &task_def) {
memcpy_args_size_ = kernel_def.args_size();
memcpy_so_name_ = kernel_def.so_name();
memcpy_kernel_name_ = kernel_def.kernel_name();
GE_IF_BOOL_EXEC(memcpy_args.size() != memcpy_args_size_,
REPORT_INNER_ERROR("E19999", "MemCopy task def args.size=%zu, but args_size=%u not equal.",
memcpy_args.size(), memcpy_args_size_);
GELOGE(FAILED, "[Check][Size]MemCopy task def args.size=%zu, but args_size=%u not equal.",
memcpy_args.size(), memcpy_args_size_);
return FAILED;);
GE_IF_BOOL_EXEC(memcpy_args_size_ < sizeof(aicpu::AicpuParamHead),
REPORT_INNER_ERROR("E19999",
"Task def args_size=%u is less than aicpu param head len=%zu.",
memcpy_args_size_, sizeof(aicpu::AicpuParamHead));
GELOGE(FAILED,
"[Check][Size] Task def args_size=%u is less than aicpu param head len=%zu.",
memcpy_args_size_, sizeof(aicpu::AicpuParamHead));
return FAILED;);
if (memcpy_args.size() != memcpy_args_size_) {
REPORT_INNER_ERROR("E19999", "MemCopy task def args.size=%zu, but args_size=%u not equal.",
memcpy_args.size(), memcpy_args_size_);
GELOGE(FAILED, "[Check][Size]MemCopy task def args.size=%zu, but args_size=%u not equal.",
memcpy_args.size(), memcpy_args_size_);
return FAILED;
}

if (memcpy_args_size_ < sizeof(aicpu::AicpuParamHead)) {
REPORT_INNER_ERROR("E19999", "Task def args_size=%u is less than aicpu param head len=%zu.",
memcpy_args_size_, sizeof(aicpu::AicpuParamHead));
GELOGE(FAILED, "[Check][Size] Task def args_size=%u is less than aicpu param head len=%zu.",
memcpy_args_size_, sizeof(aicpu::AicpuParamHead));
return FAILED;
}

memcpy_args_.reset(new(std::nothrow) uint8_t[memcpy_args_size_]());
GE_IF_BOOL_EXEC(memcpy_args_ == nullptr,
REPORT_INNER_ERROR("E19999", "new memory failed for Node[MemCopy], task_size[%u].",
memcpy_args_size_);
GELOGE(FAILED, "[Malloc][Memory] failed for Node[MemCopy], task_size[%u].",
memcpy_args_size_);
return FAILED;);
if (memcpy_args_ == nullptr) {
REPORT_INNER_ERROR("E19999", "new memory failed for Node[MemCopy], task_size[%u].",
memcpy_args_size_);
GELOGE(FAILED, "[Malloc][Memory] failed for Node[MemCopy], task_size[%u].",
memcpy_args_size_);
return FAILED;
}

errno_t sec_ret = memcpy_s(memcpy_args_.get(), memcpy_args_size_, memcpy_args.c_str(), memcpy_args.size());
GE_IF_BOOL_EXEC(sec_ret != EOK,
REPORT_INNER_ERROR("E19999",
"memcpy_s argc_ failed for Node[MemCopy], ret: %d", sec_ret);
GELOGE(INTERNAL_ERROR,
"[Update][args] failed for Node[MemCopy], ret: %d", sec_ret);
return sec_ret;);
if (sec_ret != EOK) {
REPORT_INNER_ERROR("E19999",
"memcpy_s argc_ failed for Node[MemCopy], ret: %d", sec_ret);
GELOGE(INTERNAL_ERROR,
"[Update][args] failed for Node[MemCopy], ret: %d", sec_ret);
return sec_ret;
}

auto memcpy_param_head = reinterpret_cast<aicpu::AicpuParamHead *>(memcpy_args_.get());
uint32_t memcpy_io_num = memcpy_param_head->ioAddrNum;
auto memcpy_io_addr = memcpy_args_.get() + sizeof(aicpu::AicpuParamHead);
// if has input and output, need copy to ioaddr
int cpy_ret = memcpy_s(memcpy_io_addr, memcpy_args_size_ - sizeof(aicpu::AicpuParamHead),
&copy_io_addr_[0], sizeof(uint64_t) * memcpy_io_num);
GE_IF_BOOL_EXEC(cpy_ret != 0,
REPORT_INNER_ERROR("E19999", "Node[Memcpoy] memcpy io addr to AicpuParamHead failed,"
"ret=%d, args_size=%u, io nums=%u.",
cpy_ret, memcpy_args_size_, memcpy_io_num);
GELOGE(INTERNAL_ERROR, "[Update][io_addr]Node[MemCopy] memcpy io addr to AicpuParamHead failed,"
"ret=%d, args_size=%u, io nums=%u.",
cpy_ret, memcpy_args_size_, memcpy_io_num);
return INTERNAL_ERROR;);
if (cpy_ret != 0) {
REPORT_INNER_ERROR("E19999", "Node[Memcpoy] memcpy io addr to AicpuParamHead failed,"
"ret=%d, args_size=%u, io nums=%u.",
cpy_ret, memcpy_args_size_, memcpy_io_num);
GELOGE(INTERNAL_ERROR, "[Update][io_addr]Node[MemCopy] memcpy io addr to AicpuParamHead failed,"
"ret=%d, args_size=%u, io nums=%u.",
cpy_ret, memcpy_args_size_, memcpy_io_num);
return INTERNAL_ERROR;
}
GELOGD("Set memcpy task for node[MemCopy] successfully.");
return SUCCESS;
}


+ 2
- 1
ge/hybrid/node_executor/aicpu/aicpu_node_executor.h View File

@@ -108,7 +108,6 @@ class AicpuNodeTaskBase : public NodeTask {
std::unique_ptr<TensorBuffer> copy_input_data_size_dev_;
std::unique_ptr<TensorBuffer> copy_input_src_dev_;
std::unique_ptr<TensorBuffer> copy_input_dst_dev_;
bool need_sync_ = false;
};

class AicpuTfNodeTask : public AicpuNodeTaskBase {
@@ -151,6 +150,8 @@ class AicpuTfNodeTask : public AicpuNodeTaskBase {
std::unique_ptr<TensorBuffer> copy_ioaddr_dev_;

std::unique_ptr<TensorBuffer> copy_workspace_buf_;

bool need_sync_ = false;
};

class AicpuNodeTask : public AicpuNodeTaskBase {


+ 3
- 1
ge/single_op/single_op.cc View File

@@ -433,11 +433,13 @@ Status DynamicSingleOp::ExecuteAsync(const vector<GeTensorDesc> &input_desc,
if (!inputs_size.empty()) {
StreamResource *stream_resource = SingleOpManager::GetInstance().GetResource(resource_id_, stream_);
GE_CHK_STATUS_RET_NOLOG(UpdateInputsBufferAddr(stream_resource, stream_, inputs_size, update_buffers));
GE_CHK_STATUS_RET_NOLOG(SetHostTensorValue(input_desc, input_buffers));
}

if (hybrid_model_executor_ != nullptr) {
GELOGD("Execute multi-task dynamic single op by hybrid model executor");
if (!inputs_size.empty()) {
GE_CHK_STATUS_RET_NOLOG(SetHostTensorValue(input_desc, input_buffers));
}
hybrid::HybridModelExecutor::ExecuteArgs args;
GE_CHK_STATUS_RET_NOLOG(InitHybridModelArgs(update_buffers, output_buffers, input_desc, args));



+ 72
- 40
ge/single_op/task/op_task.cc View File

@@ -293,6 +293,9 @@ Status TbeOpTask::UpdateNodeByShape(const vector<GeTensorDesc> &input_desc, cons
}

Status TbeOpTask::EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size) {
node_ = node;
tiling_buffer_ = tiling_buffer;
max_tiling_size_ = max_tiling_size;
if (tiling_buffer != nullptr) {
uintptr_t *arg_base = nullptr;
size_t arg_num = 0;
@@ -310,9 +313,6 @@ Status TbeOpTask::EnableDynamicSupport(const NodePtr &node, void *tiling_buffer,
}
arg_base[tiling_index] = reinterpret_cast<uintptr_t>(tiling_buffer);
}
node_ = node;
tiling_buffer_ = tiling_buffer;
max_tiling_size_ = max_tiling_size;
return SUCCESS;
}

@@ -481,6 +481,25 @@ void TbeOpTask::GetIoAddr(uintptr_t *&arg_base, size_t &arg_count) {
}
}

Status AtomicAddrCleanOpTask::EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size) {
node_ = node;
tiling_buffer_ = tiling_buffer;
max_tiling_size_ = max_tiling_size;
if (tiling_buffer != nullptr) {
uintptr_t *arg_base = nullptr;
size_t arg_num = 0;
GetIoAddr(arg_base, arg_num);
uint32_t tiling_index = atomic_output_indices_.size();
if (arg_num == 0 || arg_num < tiling_index) {
GELOGE(ACL_ERROR_GE_INTERNAL_ERROR, "[Check][Size]Tiling index %u, arg number %zu is invalid.",
tiling_index, arg_num);
return ACL_ERROR_GE_INTERNAL_ERROR;
}
arg_base[tiling_index] = reinterpret_cast<uintptr_t>(tiling_buffer);
}
return SUCCESS;
}

Status AtomicAddrCleanOpTask::UpdateNodeByShape(const vector<GeTensorDesc> &input_desc,
const vector<GeTensorDesc> &output_desc) {
return SUCCESS;
@@ -585,7 +604,7 @@ Status AiCpuBaseTask::SetExtInfoAndType(const std::string &kernel_ext_info, uint

int32_t unknown_shape_type_val = 0;
(void) AttrUtils::GetInt(op_desc_, ::ge::ATTR_NAME_UNKNOWN_SHAPE_TYPE, unknown_shape_type_val);
GELOGD("Get unknown_type is %d.", unknown_shape_type_val);
GELOGI("Get unknown_type is %d.", unknown_shape_type_val);
unknown_type_ = static_cast<UnknowShapeOpType>(unknown_shape_type_val);

aicpu_ext_handle_.reset(new(std::nothrow) ::ge::hybrid::AicpuExtInfoHandler(op_desc_->GetName(),
@@ -892,7 +911,7 @@ Status AiCpuCCTask::CopyDataToHbm(vector<DataBuffer> &outputs,
auto ret = rtCpuKernelLaunchWithFlag(static_cast<const void *>(memcpy_so_name_.data()),
static_cast<const void *>(memcpy_kernel_name_.data()),
block_dim_, memcpy_args_.get(), static_cast<uint32_t>(memcpy_args_size_),
nullptr, stream, dump_flag_);
nullptr, stream, RT_KERNEL_DEFAULT);
GE_CHK_RT_RET(ret);
GE_CHK_RT_RET(rtStreamSynchronize(stream));
return SUCCESS;
@@ -1112,7 +1131,15 @@ Status AiCpuCCTask::LaunchKernel(const std::vector<GeTensorDesc> &input_desc,
std::vector<DataBuffer> &output_buffers,
rtStream_t stream) {
GE_CHK_STATUS_RET_NOLOG(UpdateExtInfo(input_desc, output_desc, stream));
GE_CHK_STATUS_RET_NOLOG(UpdateIoAddr(input_buffers, output_buffers));
if (unknown_type_ == DEPEND_COMPUTE) {
std::vector<DataBuffer> summary_buffers;
for (size_t i = 0; i < num_outputs_; ++i) {
summary_buffers.emplace_back(output_summary_[i], sizeof(aicpu::FWKAdapter::ResultSummary), false);
}
GE_CHK_STATUS_RET_NOLOG(UpdateIoAddr(input_buffers, summary_buffers));
} else {
GE_CHK_STATUS_RET_NOLOG(UpdateIoAddr(input_buffers, output_buffers));
}
GE_CHK_STATUS_RET_NOLOG(LaunchKernel(stream));
if (unknown_type_ == DEPEND_SHAPE_RANGE) {
GE_CHK_RT_RET(rtStreamSynchronize(stream));
@@ -1157,50 +1184,55 @@ Status AiCpuCCTask::SetMemCopyTask(const domi::KernelDef &kernel_def) {
memcpy_args_size_ = kernel_def.args_size();
memcpy_so_name_ = kernel_def.so_name();
memcpy_kernel_name_ = kernel_def.kernel_name();
GE_IF_BOOL_EXEC(memcpy_args.size() != memcpy_args_size_,
REPORT_INNER_ERROR("E19999", "MemCopy task def args.size=%zu, but args_size=%u not equal.",
memcpy_args.size(), memcpy_args_size_);
GELOGE(FAILED, "[Check][Size]MemCopy task def args.size=%zu, but args_size=%u not equal.",
memcpy_args.size(), memcpy_args_size_);
return FAILED;);
GE_IF_BOOL_EXEC(memcpy_args_size_ < sizeof(aicpu::AicpuParamHead),
REPORT_INNER_ERROR("E19999",
"Task def args_size=%u is less than aicpu param head len=%zu.",
memcpy_args_size_, sizeof(aicpu::AicpuParamHead));
GELOGE(FAILED,
"[Check][Size] Task def args_size=%u is less than aicpu param head len=%zu.",
memcpy_args_size_, sizeof(aicpu::AicpuParamHead));
return FAILED;);
if (memcpy_args.size() != memcpy_args_size_) {
REPORT_INNER_ERROR("E19999", "MemCopy task def args.size=%zu, but args_size=%u not equal.",
memcpy_args.size(), memcpy_args_size_);
GELOGE(FAILED, "[Check][Size]MemCopy task def args.size=%zu, but args_size=%u not equal.",
memcpy_args.size(), memcpy_args_size_);
return FAILED;
}
if (memcpy_args_size_ < sizeof(aicpu::AicpuParamHead)) {
REPORT_INNER_ERROR("E19999",
"Task def args_size=%u is less than aicpu param head len=%zu.",
memcpy_args_size_, sizeof(aicpu::AicpuParamHead));
GELOGE(FAILED,
"[Check][Size] Task def args_size=%u is less than aicpu param head len=%zu.",
memcpy_args_size_, sizeof(aicpu::AicpuParamHead));
return FAILED;
}

memcpy_args_.reset(new(std::nothrow) uint8_t[memcpy_args_size_]());
GE_IF_BOOL_EXEC(memcpy_args_ == nullptr,
REPORT_INNER_ERROR("E19999", "new memory failed for Node[MemCopy], task_size[%u].",
memcpy_args_size_);
GELOGE(FAILED, "[Malloc][Memory] failed for Node[MemCopy], task_size[%u].",
memcpy_args_size_);
return FAILED;);
if (memcpy_args_ == nullptr) {
REPORT_INNER_ERROR("E19999", "new memory failed for Node[MemCopy], task_size[%u].",
memcpy_args_size_);
GELOGE(FAILED, "[Malloc][Memory] failed for Node[MemCopy], task_size[%u].",
memcpy_args_size_);
return FAILED;
}

errno_t sec_ret = memcpy_s(memcpy_args_.get(), memcpy_args_size_, memcpy_args.c_str(), memcpy_args.size());
GE_IF_BOOL_EXEC(sec_ret != EOK,
REPORT_INNER_ERROR("E19999",
"memcpy_s argc_ failed for Node[MemCopy], ret: %d", sec_ret);
GELOGE(INTERNAL_ERROR,
"[Update][args] failed for Node[MemCopy], ret: %d", sec_ret);
return sec_ret;);
if (sec_ret != EOK) {
REPORT_INNER_ERROR("E19999",
"memcpy_s argc_ failed for Node[MemCopy], ret: %d", sec_ret);
GELOGE(INTERNAL_ERROR,
"[Update][args] failed for Node[MemCopy], ret: %d", sec_ret);
return sec_ret;
}
auto memcpy_param_head = reinterpret_cast<aicpu::AicpuParamHead *>(memcpy_args_.get());
uint32_t memcpy_io_num = memcpy_param_head->ioAddrNum;
auto memcpy_io_addr = memcpy_args_.get() + sizeof(aicpu::AicpuParamHead);
// if has input and output, need copy to ioaddr
int cpy_ret = memcpy_s(memcpy_io_addr, memcpy_args_size_ - sizeof(aicpu::AicpuParamHead),
&copy_io_addr_[0], sizeof(uint64_t) * memcpy_io_num);
GE_IF_BOOL_EXEC(cpy_ret != 0,
REPORT_INNER_ERROR("E19999", "Node[Memcpoy] memcpy io addr to AicpuParamHead failed,"
"ret=%d, args_size=%u, io nums=%u.",
cpy_ret, memcpy_args_size_, memcpy_io_num);
GELOGE(INTERNAL_ERROR, "[Update][io_addr]Node[MemCopy] memcpy io addr to AicpuParamHead failed,"
"ret=%d, args_size=%u, io nums=%u.",
cpy_ret, memcpy_args_size_, memcpy_io_num);
return INTERNAL_ERROR;);
if (cpy_ret != 0) {
REPORT_INNER_ERROR("E19999", "Node[Memcpoy] memcpy io addr to AicpuParamHead failed,"
"ret=%d, args_size=%u, io nums=%u.",
cpy_ret, memcpy_args_size_, memcpy_io_num);
GELOGE(INTERNAL_ERROR, "[Update][io_addr]Node[MemCopy] memcpy io addr to AicpuParamHead failed,"
"ret=%d, args_size=%u, io nums=%u.",
cpy_ret, memcpy_args_size_, memcpy_io_num);
return INTERNAL_ERROR;
}
GELOGD("Set memcpy task for node[MemCopy] successfully.");
return SUCCESS;
}


+ 3
- 2
ge/single_op/task/op_task.h View File

@@ -97,7 +97,7 @@ class TbeOpTask : public OpTask {
const void *GetArgs() const;
size_t GetArgSize() const;
const std::string &GetStubName() const;
Status EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size);
virtual Status EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size);
const std::string &GetTaskType() const override;
void SetHandle(void *handle);

@@ -149,6 +149,7 @@ class TbeOpTask : public OpTask {
class AtomicAddrCleanOpTask : public TbeOpTask {
public:
Status InitAtomicAddrCleanIndices();
Status EnableDynamicSupport(const NodePtr &node, void *tiling_buffer, uint32_t max_tiling_size) override;

private:
Status UpdateNodeByShape(const vector<GeTensorDesc> &input_desc,
@@ -156,8 +157,8 @@ class AtomicAddrCleanOpTask : public TbeOpTask {
Status UpdateIoAddr(const vector<DataBuffer> &inputs, const vector<DataBuffer> &outputs) override;
Status UpdateTilingArgs(rtStream_t stream) override;
Status CalcTilingInfo(optiling::utils::OpRunInfo &run_info) override;
std::vector<int> atomic_output_indices_;

std::vector<int> atomic_output_indices_;
};

class AiCpuBaseTask : public OpTask {


+ 1
- 1
ge/single_op/task/tbe_task_builder.cc View File

@@ -425,7 +425,7 @@ Status TbeTaskBuilder::InitTilingInfo(TbeOpTask &task) {
GELOGD("[%s] Done allocating tiling buffer, size=%ld.", op_desc_->GetName().c_str(), max_size);
}

task.EnableDynamicSupport(node_, tiling_buffer, static_cast<uint32_t>(max_size));
GE_CHK_STATUS_RET_NOLOG(task.EnableDynamicSupport(node_, tiling_buffer, static_cast<uint32_t>(max_size)));
return SUCCESS;
}



+ 21
- 11
tests/ut/ge/hybrid/node_executor/aicpu/aicpu_node_executor_unittest.cc View File

@@ -148,20 +148,30 @@ TEST_F(UtestAicpuNodeExecutor, aicpu_tf_node_task) {
domi::TaskDef task_def2;
task_def2.set_type(RT_MODEL_TASK_ALL_KERNEL);
task_def2.mutable_kernel()->set_args(reinterpret_cast<const char *>(&args), args.head.length);
task_def2.mutable_kernel()->set_args_size(args.head.length);
hybrid_model.task_defs_[node] = std::vector<domi::TaskDef>({task_def2});
AicpuNodeTask aicpu_node_task(node_item, task_def);
ASSERT_EQ(aicpu_node_task.Init(hybrid_model), FAILED);
domi::KernelDef *kernel_def = task_def2.mutable_kernel();
kernel_def->set_args(reinterpret_cast<const char *>(&args), args.head.length);
kernel_def->set_args_size(args.head.length);
AicpuExtInfo aicpu_ext_info2;
aicpu_ext_info2.infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_SHAPE_TYPE;
aicpu_ext_info2.infoLen = sizeof(int32_t);
memcpy_s(aicpu_ext_info2.infoMsg, sizeof(int32_t), &type, sizeof(int32_t));
char *ext_mem2 = (char*)malloc(sizeof(AicpuExtInfo) + sizeof(int32_t));
memcpy_s(ext_mem2, sizeof(AicpuExtInfo) + sizeof(int32_t), &aicpu_ext_info2, sizeof(AicpuExtInfo) + sizeof(int32_t));
kernel_def->set_kernel_ext_info(ext_mem2, sizeof(AicpuExtInfo) + sizeof(int32_t));
kernel_def->set_kernel_ext_info_size(sizeof(AicpuExtInfo) + sizeof(int32_t));
hybrid_model.task_defs_[node] = std::vector<domi::TaskDef>({task_def2, task_def2});
AicpuNodeTask aicpu_node_task(node_item, task_def2);
ASSERT_EQ(aicpu_node_task.Init(hybrid_model), SUCCESS);
ASSERT_EQ(aicpu_node_task.UpdateIoAddr(*node_state->GetTaskContext()), SUCCESS);
ASSERT_EQ(aicpu_node_task.LaunchTask(*node_state->GetTaskContext()), SUCCESS);
node_item->is_dynamic = false;
ASSERT_EQ(aicpu_node_task.UpdateIoAddr(*node_state->GetTaskContext()), SUCCESS);
//kernel_ex_def->set_allocated_kernel_ext_info(nullptr);
free(ext_mem);
free(ext_mem2);
}
} // namespace ge

+ 54
- 1
tests/ut/ge/single_op/single_op_model_unittest.cc View File

@@ -25,6 +25,7 @@
#include "single_op/single_op_model.h"
#include "single_op/task/tbe_task_builder.h"
#include "single_op/task/rts_kernel_task_builder.h"
#include "aicpu/common/aicpu_task_struct.h"
#include "single_op/task/op_task.h"
#include "framework/common/helper/model_helper.h"
#include "single_op/single_op.h"
@@ -43,6 +44,11 @@ constexpr char const *kAttrSupportDynamicShape = "support_dynamicshape";
const char *const kEngineNameAiCore = "AIcoreEngine";
const char *const kEngineNameAiCpu = "aicpu_ascend_kernel";
const char *const kEngineNameAiCpuTf = "aicpu_tf_kernel";

struct AicpuTaskStruct {
aicpu::AicpuParamHead head;
uint64_t io_addrp[6];
}__attribute__((packed));
} // namespace

class UtestSingleOpModel : public testing::Test {
@@ -315,7 +321,7 @@ TEST_F(UtestSingleOpModel, BuildTaskList) {
ASSERT_EQ(mem_task.LaunchKernel(0), SUCCESS);
}

TEST_F(UtestSingleOpModel, build_dynamic_task) {
TEST_F(UtestSingleOpModel, build_dynamic_task01) {
ComputeGraphPtr graph = make_shared<ComputeGraph>("single_op");
GeModelPtr ge_model = make_shared<GeModel>();
ge_model->SetGraph(GraphUtils::CreateGraphFromComputeGraph(graph));
@@ -366,3 +372,50 @@ TEST_F(UtestSingleOpModel, build_dynamic_task) {
op_desc->SetOpKernelLibName(kEngineNameAiCpu);
model.BuildTaskListForDynamicOp(res, single_op);
}

TEST_F(UtestSingleOpModel, build_dynamic_task02) {
ComputeGraphPtr graph = make_shared<ComputeGraph>("single_op");
GeModelPtr ge_model = make_shared<GeModel>();
ge_model->SetGraph(GraphUtils::CreateGraphFromComputeGraph(graph));
shared_ptr<domi::ModelTaskDef> model_task_def = make_shared<domi::ModelTaskDef>();
ge_model->SetModelTaskDef(model_task_def);
AicpuTaskStruct args;
args.head.length = sizeof(args);
args.head.ioAddrNum = 6;
domi::TaskDef *task_def = model_task_def->add_task();
task_def->set_type(RT_MODEL_TASK_KERNEL);
domi::KernelDef *kernel_def = task_def->mutable_kernel();
kernel_def->set_args(reinterpret_cast<const char *>(&args), args.head.length);
kernel_def->set_args_size(args.head.length);
ge::hybrid::AicpuExtInfo aicpu_ext_info;
aicpu_ext_info.infoType = aicpu::FWKAdapter::FWK_ADPT_EXT_SHAPE_TYPE;
aicpu_ext_info.infoLen = sizeof(int32_t);
int32_t type = ge::DEPEND_COMPUTE;
memcpy_s(aicpu_ext_info.infoMsg, sizeof(int32_t), &type, sizeof(int32_t));
char *ext_mem = (char*)malloc(sizeof(ge::hybrid::AicpuExtInfo) + sizeof(int32_t));
memcpy_s(ext_mem, sizeof(ge::hybrid::AicpuExtInfo) + sizeof(int32_t), &aicpu_ext_info,
sizeof(ge::hybrid::AicpuExtInfo) + sizeof(int32_t));
kernel_def->set_kernel_ext_info(ext_mem, sizeof(ge::hybrid::AicpuExtInfo) + sizeof(int32_t));
kernel_def->set_kernel_ext_info_size(sizeof(ge::hybrid::AicpuExtInfo) + sizeof(int32_t));
domi::KernelContext *context = kernel_def->mutable_context();
context->set_kernel_type(6); // ccKernelType::AI_CPU

string model_data_str = "dynamic_model";
SingleOpModel model("model", model_data_str.c_str(), model_data_str.size());
std::mutex stream_mu;
rtStream_t stream = nullptr;
rtStreamCreate(&stream, 0);
DynamicSingleOp single_op(0, &stream_mu, stream);
model.model_helper_.model_ = ge_model;
auto op_desc = std::make_shared<ge::OpDesc>("add", "Add");
AttrUtils::SetInt(op_desc, ::ge::ATTR_NAME_UNKNOWN_SHAPE_TYPE, ge::DEPEND_COMPUTE);
NodePtr node = graph->AddNode(op_desc);
model.op_list_[0] = node;
StreamResource *res = new (std::nothrow) StreamResource(1);

ASSERT_EQ(model.ParseTasks(), SUCCESS);
model.node_tasks_[node] = { *task_def, *task_def };
op_desc->SetOpKernelLibName(kEngineNameAiCpu);
model.BuildTaskListForDynamicOp(res, single_op);
}

+ 46
- 0
tests/ut/ge/single_op/single_op_task_unittest.cc View File

@@ -237,3 +237,49 @@ TEST_F(UtestSingleOpTask, test_aicpu_task_update_io_addr) {
ASSERT_EQ(ret, PARAM_INVALID);
}
}

TEST_F(UtestSingleOpTask, test_aicpu_task_launch_kernel) {
AiCpuCCTask task;
rtStream_t stream = nullptr;
task.num_inputs_ = 2;
task.num_outputs_ = 1;
task.input_is_const_ = {true, false};
int total_addr = 3;
uint32_t* addrs[total_addr] = {nullptr, nullptr, nullptr};
task.io_addr_ = reinterpret_cast<uintptr_t*>(addrs);
task.io_addr_num_ = total_addr;

{
vector<DataBuffer> inputs(2, DataBuffer());
vector<DataBuffer> outputs(1, DataBuffer());
vector<GeTensorDesc> inputs_desc(2, GeTensorDesc(GeShape(), FORMAT_NCHW, DT_FLOAT));
vector<GeTensorDesc> outputs_desc(1, GeTensorDesc(GeShape(), FORMAT_NCHW, DT_FLOAT));
task.unknown_type_ = ge::DEPEND_COMPUTE;
task.num_outputs_ = 1;
ASSERT_EQ(task.InitForSummaryAndCopy(), SUCCESS);
ASSERT_EQ(task.LaunchKernel(inputs_desc, inputs, outputs_desc, outputs, stream), SUCCESS);
}

TEST_F(UtestSingleOpTask, test_dynamic_support) {
auto graph = make_shared<ComputeGraph>("graph");
auto op_desc = make_shared<OpDesc>("Add", "Add");
auto node = graph->AddNode(op_desc);
AtomicAddrCleanOpTask atomic_task;
TbeOpTask tbe_task;

tbe_task.arg_size_ = sizeof(void *) * 1;
tbe_task.args_.reset(new (std::nothrow) uint8_t[tbe_task.arg_size_]);
atomic_task.arg_size_ = sizeof(void *) * 1;
atomic_task.args_.reset(new (std::nothrow) uint8_t[atomic_task.arg_size_]);
ASSERT_EQ(tbe_task.EnableDynamicSupport(node, (void *)0x0001, 1), ACL_ERROR_GE_INTERNAL_ERROR);
ASSERT_EQ(atomic_task.EnableDynamicSupport(node, (void *)0x0001, 1), ACL_ERROR_GE_INTERNAL_ERROR);

tbe_task.arg_size_ = sizeof(void *) * 2;
tbe_task.args_.reset(new (std::nothrow) uint8_t[tbe_task.arg_size_]);
atomic_task.arg_size_ = sizeof(void *) * 2;
atomic_task.args_.reset(new (std::nothrow) uint8_t[atomic_task.arg_size_]);
ASSERT_EQ(tbe_task.EnableDynamicSupport(node, (void *)0x0001, 1), SUCCESS);
ASSERT_EQ(atomic_task.EnableDynamicSupport(node, (void *)0x0001, 1), SUCCESS);
tbe_task.tiling_buffer_ = nullptr;
atomic_task.tiling_buffer_ = nullptr;
}

Loading…
Cancel
Save