Browse Source

fix

pull/2052/head
guopeian 3 years ago
parent
commit
359eb10817
2 changed files with 216 additions and 72 deletions
  1. +167
    -38
      ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc
  2. +49
    -34
      ge/hybrid/node_executor/aicpu/aicpu_node_executor.h

+ 167
- 38
ge/hybrid/node_executor/aicpu/aicpu_node_executor.cc View File

@@ -548,8 +548,8 @@ Status AicpuTfNodeTask::EnsureSessionCreated(uint64_t session_id) {
return SUCCESS;
}

Status AicpuTfNodeTask::ReadResultSummaryAndPrepareMemory(TaskContext &context,
std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) {
Status AicpuNodeTaskBase::ReadResultSummaryAndPrepareMemory(TaskContext &context,
std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) {
for (auto i = 0; i < node_item_->num_outputs; ++i) {
auto &result_summary = output_summary_host_[i];
GE_CHK_RT_RET(rtMemcpy(&result_summary, sizeof(aicpu::FWKAdapter::ResultSummary),
@@ -574,6 +574,30 @@ Status AicpuTfNodeTask::ReadResultSummaryAndPrepareMemory(TaskContext &context,
return SUCCESS;
}

Status AicpuNodeTask::CopyDataToHbm(TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) {
GE_CHK_BOOL_RET_STATUS(out_shape_hbm.size() == static_cast<std::size_t>(node_item_->num_outputs),
INTERNAL_ERROR,
"[Check][Size]Node[%s] has %d outputs but out shape is %zu not equal.",
node_name_.c_str(), node_item_->num_outputs, out_shape_hbm.size());

GE_CHK_STATUS_RET_NOLOG(PrepareCopyInputs(context, out_shape_hbm));

RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[LaunchCopy] Start");
auto rt_ret = rtCpuKernelLaunchWithFlag(reinterpret_cast<const void *>(memcpy_so_name_.c_str()),
reinterpret_cast<const void *>(memcpy_kernel_name_.c_str()),
1, // default core dim is 1
memcpy_args_.get(), memcpy_args_size_,
nullptr, context.GetStream(), RT_KERNEL_DEFAULT);
GE_CHK_RT_RET(rt_ret);

RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[LaunchCopy] End");

GE_CHK_RT_RET(rtStreamSynchronize(context.GetStream()));
RECORD_CALLBACK_EVENT(context.GetExecutionContext(), node_name_.c_str(), "[SynchronizeCopy] End");
return SUCCESS;
}

Status AicpuTfNodeTask::CopyDataToHbm(TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) {
GE_CHK_BOOL_RET_STATUS(out_shape_hbm.size() == static_cast<std::size_t>(node_item_->num_outputs),
@@ -593,8 +617,8 @@ Status AicpuTfNodeTask::CopyDataToHbm(TaskContext &context,
return SUCCESS;
}

Status AicpuTfNodeTask::PrepareCopyInputs(const TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) {
Status AicpuNodeTaskBase::PrepareCopyInputs(const TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) {
std::vector<uint64_t> copy_input_release_flag;
std::vector<uint64_t> copy_input_data_size;
std::vector<uint64_t> copy_input_src;
@@ -635,8 +659,8 @@ Status AicpuTfNodeTask::PrepareCopyInputs(const TaskContext &context,
return SUCCESS;
}

Status AicpuTfNodeTask::UpdateShapeByHbmBuffer(TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) {
Status AicpuNodeTaskBase::UpdateShapeByHbmBuffer(TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) {
GE_CHK_BOOL_RET_STATUS(out_shape_hbm.size() == static_cast<std::size_t>(node_item_->num_outputs),
INTERNAL_ERROR,
"Node[%s] has %d outputs but out shape is %zu",
@@ -667,7 +691,7 @@ Status AicpuTfNodeTask::UpdateShapeByHbmBuffer(TaskContext &context,
return SUCCESS;
}

Status AicpuTfNodeTask::UpdateShapeAndDataByResultSummary(TaskContext &context) {
Status AicpuNodeTaskBase::UpdateShapeAndDataByResultSummary(TaskContext &context) {
GELOGD("Node[%s] update shape and data by result summary begin.", node_name_.c_str());

std::vector<std::unique_ptr<TensorBuffer>> out_shape_hbm;
@@ -762,7 +786,7 @@ Status AicpuTfNodeTask::LaunchTask(TaskContext &context) {
return SUCCESS;
}

Status AicpuTfNodeTask::TaskCallback(TaskContext &context) {
Status AicpuNodeTaskBase::TaskCallback(TaskContext &context) {
GELOGD("Node[%s] task callback start. is_dynamic=%s, unknown_type=%d.",
node_name_.c_str(), node_item_->is_dynamic ? "true" : "false", unknown_type_);
Status callback_ret = SUCCESS;
@@ -779,14 +803,115 @@ Status AicpuTfNodeTask::TaskCallback(TaskContext &context) {
return callback_ret;
}

Status AicpuNodeTask::SetMemCopyTask(const domi::TaskDef &task_def) {
if (node_item_->num_outputs == 0) {
GELOGD("Node[%s] type[%s] has no output, no need set mem_copy task.",
node_name_.c_str(), node_item_->node_type.c_str());
return SUCCESS;
}

GELOGD("Start to set memcpy task for node[%s].", node_name_.c_str());
const domi::KernelDef &kernel_def = task_def.kernel();
auto &memcpy_args = kernel_def.args();
memcpy_args_size_ = kernel_def.args_size();
memcpy_so_name_ = kernel_def.so_name();
memcpy_kernel_name_ = kernel_def.kernel_name();
if (memcpy_args.size() != memcpy_args_size_) {
REPORT_INNER_ERROR("E19999", "MemCopy task def args.size=%zu, but args_size=%u not equal.",
memcpy_args.size(), memcpy_args_size_);
GELOGE(FAILED, "[Check][Size]MemCopy task def args.size=%zu, but args_size=%u not equal.",
memcpy_args.size(), memcpy_args_size_);
return FAILED;
}

if (memcpy_args_size_ < sizeof(aicpu::AicpuParamHead)) {
REPORT_INNER_ERROR("E19999", "Task def args_size=%u is less than aicpu param head len=%zu.",
memcpy_args_size_, sizeof(aicpu::AicpuParamHead));
GELOGE(FAILED, "[Check][Size] Task def args_size=%u is less than aicpu param head len=%zu.",
memcpy_args_size_, sizeof(aicpu::AicpuParamHead));
return FAILED;
}

memcpy_args_.reset(new(std::nothrow) uint8_t[memcpy_args_size_]());
if (memcpy_args_ == nullptr) {
REPORT_INNER_ERROR("E19999", "new memory failed for Node[MemCopy], task_size[%u].",
memcpy_args_size_);
GELOGE(FAILED, "[Malloc][Memory] failed for Node[MemCopy], task_size[%u].",
memcpy_args_size_);
return FAILED;
}

errno_t sec_ret = memcpy_s(memcpy_args_.get(), memcpy_args_size_, memcpy_args.c_str(), memcpy_args.size());
if (sec_ret != EOK) {
REPORT_INNER_ERROR("E19999",
"memcpy_s argc_ failed for Node[MemCopy], ret: %d", sec_ret);
GELOGE(INTERNAL_ERROR,
"[Update][args] failed for Node[MemCopy], ret: %d", sec_ret);
return sec_ret;
}

auto memcpy_param_head = reinterpret_cast<aicpu::AicpuParamHead *>(memcpy_args_.get());
uint32_t memcpy_io_num = memcpy_param_head->ioAddrNum;
auto memcpy_io_addr = memcpy_args_.get() + sizeof(aicpu::AicpuParamHead);
// if has input and output, need copy to ioaddr
int cpy_ret = memcpy_s(memcpy_io_addr, memcpy_args_size_ - sizeof(aicpu::AicpuParamHead),
&copy_io_addr_[0], sizeof(uint64_t) * memcpy_io_num);
if (cpy_ret != 0) {
REPORT_INNER_ERROR("E19999", "Node[Memcpoy] memcpy io addr to AicpuParamHead failed,"
"ret=%d, args_size=%u, io nums=%u.",
cpy_ret, memcpy_args_size_, memcpy_io_num);
GELOGE(INTERNAL_ERROR, "[Update][io_addr]Node[MemCopy] memcpy io addr to AicpuParamHead failed,"
"ret=%d, args_size=%u, io nums=%u.",
cpy_ret, memcpy_args_size_, memcpy_io_num);
return INTERNAL_ERROR;
}
GELOGD("Set memcpy task for node[MemCopy] successfully.");
return SUCCESS;
}

Status AicpuNodeTask::InitForDependComputeTask() {
if ((unknown_type_ != DEPEND_COMPUTE) || (node_item_->num_outputs == 0)) {
GELOGD("Node[%s] type[%s] unknown_type is %d, output num is %d.",
node_name_.c_str(), node_item_->node_type.c_str(), unknown_type_, node_item_->num_outputs);
return SUCCESS;
}

output_summary_.resize(node_item_->num_outputs);
constexpr auto result_summary_size = sizeof(aicpu::FWKAdapter::ResultSummary);
for (auto i = 0; i < node_item_->num_outputs; ++i) {
GE_CHK_STATUS_RET(AllocTensorBuffer(result_summary_size, output_summary_[i]),
"[Alloc][TensorBuffer] failed for Node[%s] to copy result summary info, size=%zu.",
node_name_.c_str(), result_summary_size);
}
output_summary_host_.resize(node_item_->num_outputs);

// init for mem copy task
// copy task need copy output_data and output_shape, max len is 2 * output_num
const size_t copy_input_buf_len = node_item_->num_outputs * 2 * sizeof(uint64_t);
GE_CHK_STATUS_RET(AllocTensorBuffer(copy_input_buf_len, copy_input_release_flag_dev_),
"[Alloc][TensorBuffer] failed for Node[%s] to copy task input release_flag, size=%zu",
node_name_.c_str(), copy_input_buf_len);
GE_CHK_STATUS_RET(AllocTensorBuffer(copy_input_buf_len, copy_input_data_size_dev_),
"[Alloc][TensorBuffer] failed for Node[%s] to copy task input data_size, size=%zu",
node_name_.c_str(), copy_input_buf_len);
GE_CHK_STATUS_RET(AllocTensorBuffer(copy_input_buf_len, copy_input_src_dev_),
"[Alloc][TensorBuffer] failed for Node[%s] to copy task input src, size=%zu",
node_name_.c_str(), copy_input_buf_len);
GE_CHK_STATUS_RET(AllocTensorBuffer(copy_input_buf_len, copy_input_dst_dev_),
"[Alloc][TensorBuffer] failed for Node[%s] to copy task input dst, size=%zu",
node_name_.c_str(), copy_input_buf_len);

copy_io_addr_.emplace_back(reinterpret_cast<uintptr_t>(copy_input_release_flag_dev_->GetData()));
copy_io_addr_.emplace_back(reinterpret_cast<uintptr_t>(copy_input_data_size_dev_->GetData()));
copy_io_addr_.emplace_back(reinterpret_cast<uintptr_t>(copy_input_src_dev_->GetData()));
copy_io_addr_.emplace_back(reinterpret_cast<uintptr_t>(copy_input_dst_dev_->GetData()));
return SUCCESS;
}

Status AicpuNodeTask::Init(const HybridModel &model) {
auto node_name = node_name_;
GELOGD("Node[%s] init start.", node_name.c_str());

GE_CHK_BOOL_RET_STATUS(unknown_type_ != DEPEND_COMPUTE, FAILED,
"[Check][Type]Node[%s] unknown type[%d] is depend compute, it's not supported now.",
node_name.c_str(), unknown_type_);

GE_CHK_BOOL_RET_STATUS(task_def_.has_kernel(), FAILED,
"[Check][task_def_]Node[%s] task def does not has kernel.", node_name.c_str());
auto &kernel_def = task_def_.kernel();
@@ -877,7 +1002,9 @@ Status AicpuNodeTask::Init(const HybridModel &model) {
GELOGD("Get op:%s attribute(is_blocking_op), value:%d", op_desc->GetName().c_str(), is_blocking_aicpu_op_);
GE_CHK_STATUS_RET(InitExtInfo(kernel_ext_info, ext_session_id),
"[Init][ExtInfo] failed for Node[%s].", node_name.c_str());

GE_CHK_STATUS_RET(InitForDependComputeTask(),
"[Init][DependComputeTask] failed for Node[%s].",
node_name_.c_str());
if (ext_info_addr_dev_ == nullptr) {
aicpu_param_head->extInfoLength = 0;
aicpu_param_head->extInfoAddr = 0;
@@ -885,7 +1012,11 @@ Status AicpuNodeTask::Init(const HybridModel &model) {
aicpu_param_head->extInfoLength = ext_info_addr_dev_->GetSize();
aicpu_param_head->extInfoAddr = reinterpret_cast<uintptr_t>(ext_info_addr_dev_->GetData());
}

auto task_defs = model.GetTaskDefs(node_item_->node);
GE_CHECK_NOTNULL(task_defs);
if (unknown_type_ == DEPEND_COMPUTE) {
GE_CHK_STATUS_RET_NOLOG(SetMemCopyTask((*task_defs)[1]));
}
GELOGD("Node[%s] init end.", node_name.c_str());
return SUCCESS;
}
@@ -900,14 +1031,29 @@ Status AicpuNodeTask::UpdateIoAddr(TaskContext &context) {
GELOGD("Node[%s] input[%d] = %p, size = %zu", node_name_.c_str(), i, inputData->GetData(), inputData->GetSize());
io_addrs.emplace_back(reinterpret_cast<uintptr_t>(inputData->GetData()));
}
// known shape or not depend compute
if (!node_item_->is_dynamic || unknown_type_ != DEPEND_COMPUTE) {
// unknown type 4 do this in call back.
GE_CHK_STATUS_RET_NOLOG(context.AllocateOutputs());
for (auto j = 0; j < node_item_->num_outputs; ++j) {
auto outputData = context.GetOutput(j);
GE_CHECK_NOTNULL(outputData);
GELOGD("Node[%s] output[%d] addr = %p, size = %zu",
node_name_.c_str(), j, outputData->GetData(), outputData->GetSize());
io_addrs.emplace_back(reinterpret_cast<uintptr_t>(outputData->GetData()));
}
} else {
// unknown type 4 use result summary update ioaddr.
GELOGD("Node[%s] is depend compute node, use result summary as out addr.", node_name_.c_str());
GE_CHK_BOOL_RET_STATUS(output_summary_.size() == static_cast<std::size_t>(node_item_->num_outputs),
INTERNAL_ERROR,
"[Check][Size]Node[%s] has %d output but %zu output summary not equal.",
node_name_.c_str(), node_item_->num_outputs, output_summary_.size());

GE_CHK_STATUS_RET_NOLOG(context.AllocateOutputs());
for (auto j = 0; j < node_item_->num_outputs; ++j) {
auto outputData = context.GetOutput(j);
GE_CHECK_NOTNULL(outputData);
GELOGD("Node[%s] output[%d] addr = %p, size = %zu", node_name_.c_str(), j,
outputData->GetData(), outputData->GetSize());
io_addrs.emplace_back(reinterpret_cast<uintptr_t>(outputData->GetData()));
for (auto j = 0; j < node_item_->num_outputs; ++j) {
void *summary_addr = output_summary_[j]->GetData();
io_addrs.emplace_back(reinterpret_cast<uintptr_t>(summary_addr));
}
}

auto io_addr = args_.get() + sizeof(aicpu::AicpuParamHead);
@@ -951,23 +1097,6 @@ Status AicpuNodeTask::LaunchTask(TaskContext &context) {
return SUCCESS;
}

Status AicpuNodeTask::TaskCallback(TaskContext &context) {
GELOGD("Node[%s] task callback start, is_dynamic = %s, unknown_type=%d.",
node_name_.c_str(), node_item_->is_dynamic ? "true" : "false", unknown_type_);
Status callback_ret = SUCCESS;

// check need update shape, call update shape.
if (node_item_->is_dynamic && unknown_type_ == DEPEND_SHAPE_RANGE) {
// check result
callback_ret = UpdateOutputShapeFromExtInfo(context);
} else {
GELOGD("Node[%s] unknown shape type is %d no need update output shape.",
node_name_.c_str(), unknown_type_);
}
GELOGD("Node[%s] task callback end.", node_name_.c_str());
return callback_ret;
}

Status AiCpuNodeExecutor::PrepareTask(NodeTask &task, TaskContext &context) const {
// malloc HBM memory at Init, here just update them
RECORD_EXECUTION_EVENT(context.GetExecutionContext(), context.GetNodeName(), "[AiCpuNodeExecutorPrepareTask] Start");


+ 49
- 34
ge/hybrid/node_executor/aicpu/aicpu_node_executor.h View File

@@ -55,11 +55,33 @@ class AicpuNodeTaskBase : public NodeTask {

virtual Status LaunchTask(TaskContext &context) = 0;

virtual Status TaskCallback(TaskContext &context) = 0;
virtual Status InitForDependComputeTask() = 0;

Status TaskCallback(TaskContext &context);

virtual Status UpdateShapeAndDataByResultSummary(TaskContext &context);

virtual Status UpdateIoAddr(TaskContext &context) = 0;

static Status AllocTensorBuffer(size_t size, std::unique_ptr<TensorBuffer> &tensor_buffer);
virtual Status CopyDataToHbm(TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) = 0;

///
/// read result summary and prepare copy task memory.
/// @param context task context
/// @param out_shape_hbm if scalar, TensorBuffer->data is null, size=0
/// @return SUCCESS:success other:failed
///
Status ReadResultSummaryAndPrepareMemory(TaskContext &context,
std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm);

Status UpdateShapeByHbmBuffer(TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm);

Status PrepareCopyInputs(const TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm);

Status DistributeWaitTaskForAicpuBlockingOp(rtStream_t stream);
Status CheckDeviceSupportBlockingAicpuOpProcess(bool &is_support);
@@ -83,6 +105,13 @@ class AicpuNodeTaskBase : public NodeTask {
// ext info addr, device mem
std::unique_ptr<TensorBuffer> ext_info_addr_dev_;

std::vector<std::unique_ptr<TensorBuffer>> output_summary_;
std::vector<aicpu::FWKAdapter::ResultSummary> output_summary_host_;

std::unique_ptr<TensorBuffer> copy_input_release_flag_dev_;
std::unique_ptr<TensorBuffer> copy_input_data_size_dev_;
std::unique_ptr<TensorBuffer> copy_input_src_dev_;
std::unique_ptr<TensorBuffer> copy_input_dst_dev_;
// for blocking aicpu op
bool is_blocking_aicpu_op_ = false;
rtEvent_t rt_event_ = nullptr;
@@ -101,33 +130,14 @@ class AicpuTfNodeTask : public AicpuNodeTaskBase {

Status LaunchTask(TaskContext &context) override;

Status TaskCallback(TaskContext &context) override;

Status UpdateIoAddr(TaskContext &context) override;

private:
Status SetMemCopyTask(const domi::TaskDef &task_def);
Status InitForDependComputeTask() override;

Status InitForDependComputeTask();

Status UpdateShapeAndDataByResultSummary(TaskContext &context);

///
/// read result summary and prepare copy task memory.
/// @param context task context
/// @param out_shape_hbm if scalar, TensorBuffer->data is null, size=0
/// @return SUCCESS:success other:failed
///
Status ReadResultSummaryAndPrepareMemory(TaskContext &context,
std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm);
Status CopyDataToHbm(TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm);

Status UpdateShapeByHbmBuffer(TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm);

Status PrepareCopyInputs(const TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm);
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) override;
private:
Status SetMemCopyTask(const domi::TaskDef &task_def);

static Status EnsureSessionCreated(uint64_t session_id);
static uint64_t GetStepIdAddr(const HybridModel &model);
@@ -142,16 +152,7 @@ class AicpuTfNodeTask : public AicpuNodeTaskBase {

// just used for depend DEPEND_COMPUTE op
std::unique_ptr<TensorBuffer> copy_task_args_buf_;

std::vector<std::unique_ptr<TensorBuffer>> output_summary_;
std::vector<aicpu::FWKAdapter::ResultSummary> output_summary_host_;

std::unique_ptr<TensorBuffer> copy_ioaddr_dev_;

std::unique_ptr<TensorBuffer> copy_input_release_flag_dev_;
std::unique_ptr<TensorBuffer> copy_input_data_size_dev_;
std::unique_ptr<TensorBuffer> copy_input_src_dev_;
std::unique_ptr<TensorBuffer> copy_input_dst_dev_;
bool need_sync_ = false;

std::unique_ptr<TensorBuffer> copy_workspace_buf_;
@@ -170,14 +171,28 @@ class AicpuNodeTask : public AicpuNodeTaskBase {

Status LaunchTask(TaskContext &context) override;

Status TaskCallback(TaskContext &context) override;
Status CopyDataToHbm(TaskContext &context,
const std::vector<std::unique_ptr<TensorBuffer>> &out_shape_hbm) override;

Status UpdateIoAddr(TaskContext &context) override;

Status InitForDependComputeTask() override;
private:
Status SetMemCopyTask(const domi::TaskDef &task_def);

protected:
// host mem
std::unique_ptr<uint8_t[]> args_;
// host memcpy mem
std::unique_ptr<uint8_t[]> memcpy_args_;

std::string memcpy_so_name_;

std::string memcpy_kernel_name_;
// args size
uint32_t memcpy_args_size_ = 0;

std::vector<uint64_t> copy_io_addr_;
// args size
uint32_t args_size_ = 0;
};


Loading…
Cancel
Save