Browse Source

Eliminate data_op_list_

tags/v1.2.0
zhangxiaokun 4 years ago
parent
commit
6ce14620cc
11 changed files with 539 additions and 362 deletions
  1. +1
    -38
      ge/executor/ge_executor.cc
  2. +2
    -26
      ge/graph/execute/graph_execute.cc
  3. +1
    -5
      ge/graph/execute/graph_execute.h
  4. +171
    -150
      ge/graph/load/new_model_manager/davinci_model.cc
  5. +23
    -32
      ge/graph/load/new_model_manager/davinci_model.h
  6. +0
    -75
      ge/graph/load/new_model_manager/davinci_model_parser.cc
  7. +2
    -12
      ge/graph/load/new_model_manager/model_manager.cc
  8. +1
    -15
      ge/graph/load/new_model_manager/model_manager.h
  9. +0
    -3
      inc/framework/executor/ge_executor.h
  10. +338
    -5
      tests/ut/ge/graph/load/davinci_model_unittest.cc
  11. +0
    -1
      tests/ut/ge/graph/load/kernel_task_info_unittest.cc

+ 1
- 38
ge/executor/ge_executor.cc View File

@@ -676,7 +676,7 @@ Status GeExecutor::GetAIPPInfo(uint32_t model_id, uint32_t index, AippConfigInfo
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "not inited yet!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}
Status ret = GraphExecutor::GetAIPPInfo(model_id, index, aipp_info);
Status ret = GraphExecutor::GetAippInfo(model_id, index, aipp_info);
if (ret != SUCCESS) {
GELOGW("GetAIPPInfo is not success.");
return ret;
@@ -713,43 +713,6 @@ Status GeExecutor::GetModelAttr(uint32_t model_id, std::vector<std::string> &dyn
return SUCCESS;
}

Status GeExecutor::GetModelDescInfoForZeroCopy(uint32_t model_id, std::vector<ge::TensorDesc> &input_desc,
std::vector<TensorDesc> &output_desc) {
GELOGI("get model desc info for zero copy begin.");
if (!isInit_) {
GELOGE(ACL_ERROR_GE_EXEC_NOT_INIT, "GeExecutor has not been initialized!");
return ACL_ERROR_GE_EXEC_NOT_INIT;
}

std::vector<InputOutputDescInfo> input_desc_infos;
std::vector<InputOutputDescInfo> output_desc_infos;
std::vector<uint32_t> input_formats;
std::vector<uint32_t> output_formats;

Status ret = GraphExecutor::GetInputOutputDescInfoForZeroCopy(model_id, input_desc_infos, output_desc_infos,
input_formats, output_formats);
if (ret != domi::SUCCESS) {
GELOGE(ret, "Get DescInfo from zero copy failed. ret = %u", ret);
return ACL_ERROR_GE_GET_TENSOR_INFO;
}

if (input_formats.size() != input_desc_infos.size()) {
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "input_formats.size() != input_desc_infos.size().");
return ACL_ERROR_GE_PARAM_INVALID;
}

if (output_formats.size() != output_desc_infos.size()) {
GELOGE(ACL_ERROR_GE_PARAM_INVALID, "output_formats.size() != output_desc_infos.size().");
return ACL_ERROR_GE_PARAM_INVALID;
}

GetGeTensorDescFromDomiInfo(input_desc, input_desc_infos, input_formats);
GetGeTensorDescFromDomiInfo(output_desc, output_desc_infos, output_formats);

GELOGI("get model desc info from zero copy end.");
return ge::SUCCESS;
}

Status GeExecutor::CommandHandle(const Command &command) {
Status ret = GraphLoader::CommandHandle(command);
if (ret != SUCCESS) {


+ 2
- 26
ge/graph/execute/graph_execute.cc View File

@@ -560,34 +560,10 @@ Status GraphExecutor::GetModelAttr(uint32_t model_id, std::vector<string> &dynam
return SUCCESS;
}

Status GraphExecutor::GetInputOutputDescInfoForZeroCopy(uint32_t model_id, vector<InputOutputDescInfo> &input_desc,
vector<InputOutputDescInfo> &output_desc,
std::vector<uint32_t> &input_formats,
std::vector<uint32_t> &out_formats) {
try {
auto model_manager = ge::ModelManager::GetInstance();
GE_CHECK_NOTNULL(model_manager);
Status ret =
model_manager->GetInputOutputDescInfoForZeroCopy(model_id, input_desc, output_desc, input_formats, out_formats);
if (ret != SUCCESS) {
GELOGE(ret, "GetInputOutputDescInfoForZeroCopy failed.");
return ret;
}
} catch (std::bad_alloc &) {
GELOGE(MEMALLOC_FAILED, "GetInputOutputDescInfoForZeroCopy failed, bad memory allocation occur !");
return MEMALLOC_FAILED;
} catch (...) {
GELOGE(FAILED, "GetInputOutputDescInfoForZeroCopy failed, some exceptions occur !");
return FAILED;
}

return SUCCESS;
}

Status GraphExecutor::GetAIPPInfo(uint32_t model_id, uint32_t index, AippConfigInfo &aipp_info) {
Status GraphExecutor::GetAippInfo(uint32_t model_id, uint32_t index, AippConfigInfo &aipp_info) {
auto model_manager = ge::ModelManager::GetInstance();
GE_CHECK_NOTNULL(model_manager);
Status ret = model_manager->GetAIPPInfo(model_id, index, aipp_info);
Status ret = model_manager->GetAippInfo(model_id, index, aipp_info);
if (ret != SUCCESS) {
GELOGW("GetAIPPInfo is not success.");
return ret;


+ 1
- 5
ge/graph/execute/graph_execute.h View File

@@ -73,7 +73,7 @@ class GraphExecutor {
vector<InputOutputDescInfo> &output_desc, std::vector<uint32_t> &input_formats,
std::vector<uint32_t> &output_formats, bool new_model_desc = false);

static Status GetAIPPInfo(uint32_t model_id, uint32_t index, AippConfigInfo &aipp_info);
static Status GetAippInfo(uint32_t model_id, uint32_t index, AippConfigInfo &aipp_info);

static Status GetAippType(uint32_t model_id, uint32_t index, InputAippType &type, size_t &aipp_index);

@@ -110,10 +110,6 @@ class GraphExecutor {

static Status GetModelAttr(uint32_t model_id, std::vector<string> &dynamic_output_shape_info);

static Status GetInputOutputDescInfoForZeroCopy(uint32_t model_id, vector<InputOutputDescInfo> &input_desc,
vector<InputOutputDescInfo> &output_desc,
std::vector<uint32_t> &input_formats,
std::vector<uint32_t> &output_formats);
static Status GetOrigInputInfo(uint32_t model_id, uint32_t index, OriginInputInfo &orig_input_info);
static Status GetAllAippInputOutputDims(uint32_t model_id, uint32_t index, std::vector<InputOutputDims> &input_dims,
std::vector<InputOutputDims> &output_dims);


+ 171
- 150
ge/graph/load/new_model_manager/davinci_model.cc View File

@@ -156,7 +156,6 @@ DavinciModel::~DavinciModel() {
GE_CHK_STATUS(ModelRunStop());

op_list_.clear();
data_op_list_.clear();
tensor_name_to_fixed_addr_size_.clear();
tensor_name_to_peer_output_index_.clear();
GE_DELETE_NEW_SINGLE(data_inputer_);
@@ -878,7 +877,7 @@ Status DavinciModel::InitNodes(const ComputeGraphPtr &compute_graph) {
auto it = op_desc_handle.find(op_desc->GetType());
if (it != op_desc_handle.end()) {
if ((this->*it->second)(op_desc) != SUCCESS) {
GELOGE(PARAM_INVALID, "NetOutput init failed, Name: %s", op_desc->GetName().c_str());
GELOGE(PARAM_INVALID, "Node init failed, Name: %s", op_desc->GetName().c_str());
return PARAM_INVALID;
}
continue;
@@ -931,7 +930,7 @@ Status DavinciModel::InitNodes(const ComputeGraphPtr &compute_graph) {

GE_TIMESTAMP_CALLNUM_END(LoadTBEKernelBinToOpDesc, "GraphLoader::LoadTBEKernelBinToOpDesc.");
GE_TIMESTAMP_CALLNUM_END(InitTbeHandle, "GraphLoader::InitTbeHandle.");
return OptInputOutputInfo(data_by_index, output_op_list);
return GenInputOutputInfo(data_by_index, output_op_list);
}

void DavinciModel::SetLabelForDynamic(const NodePtr &node) {
@@ -974,9 +973,6 @@ Status DavinciModel::InitDataOp(const ComputeGraphPtr &graph, const NodePtr &nod
}

data_by_index[data_index] = op_desc;
auto data_op = AttrUtils::CopyOpDesc(op_desc);
GE_CHECK_NOTNULL(data_op);
data_op_list_.push_back(data_op);
if (known_node_) {
return SUCCESS;
}
@@ -1022,23 +1018,18 @@ Status DavinciModel::InitDataOp(const ComputeGraphPtr &graph, const NodePtr &nod
/// @param [in] output_op_list: list of NetOutput op.
/// @return Status
///
Status DavinciModel::OptInputOutputInfo(const map<uint32_t, OpDescPtr> &data_by_index,
Status DavinciModel::GenInputOutputInfo(const map<uint32_t, OpDescPtr> &data_by_index,
const vector<OpDescPtr> &output_op_list) {
GELOGD("Data node size: %zu, NetOutput node size: %zu", data_op_list_.size(), output_op_list.size());
if (data_by_index.size() != data_op_list_.size()) {
GELOGE(INTERNAL_ERROR, "Data map size: %zu, Data list size: %zu.", data_by_index.size(), data_op_list_.size());
return INTERNAL_ERROR;
}

data_op_list_.clear();
GELOGD("Data node size: %zu, NetOutput node size: %zu", data_by_index.size(), output_op_list.size());
for (auto &item : data_by_index) {
auto data_op = AttrUtils::CopyOpDesc(item.second);
GE_CHECK_NOTNULL(data_op);
data_op_list_.emplace_back(data_op);
auto output_addrs = ModelUtils::GetOutputDataAddrs(runtime_param_, item.second);
GELOGD("Data node: %s, output addr size: %zu", item.second->GetName().c_str(), output_addrs.size());
input_addrs_list_.emplace_back(output_addrs);

GE_CHK_STATUS_RET(InitAippInfo(item.first, item.second), "Init AIPP Info failed");
GE_CHK_STATUS_RET(InitAippType(item.first, item.second, data_by_index), "Init AIPP Type failed");
GE_CHK_STATUS_RET(InitOrigInputInfo(item.first, item.second), "Init Orig input failed");
GE_CHK_STATUS_RET(InitAippInputOutputDims(item.first, item.second), "Init AIPP dims failed");
if (item.second->GetType() == AIPP_DATA_TYPE) {
GELOGI("This is dynamic aipp model, Node: %s", item.second->GetName().c_str());
is_dynamic_aipp_ = true;
@@ -1066,7 +1057,8 @@ Status DavinciModel::OptInputOutputInfo(const map<uint32_t, OpDescPtr> &data_by_
}
}

return InitOutputDescInfo(output_op_list, output_descs_, output_formats_);
GE_CHK_STATUS_RET(InitInputDescInfo(data_by_index), "Init input desc info failed");
return InitOutputDescInfo(output_op_list);
}

bool DavinciModel::IsGetNextSinkDynamic(const OpDescPtr &op_desc) {
@@ -1791,73 +1783,101 @@ void DavinciModel::GetUserDesignateShapeOrder(std::vector<std::string> &user_inp
/// @ingroup ge
/// @brief Get AIPP input info
/// @param [in] index
/// @param [out] aipp_info
/// @param [int] OpDescPtr
/// @return execute result
///
Status DavinciModel::GetAIPPInfo(uint32_t index, AippConfigInfo &aipp_info) {
GE_CHK_BOOL_RET_STATUS(index < data_op_list_.size(), PARAM_INVALID, "Index %u is invalid.", index);
OpDescPtr data_op = data_op_list_[index];
if (!data_op->HasAttr(ATTR_NAME_AIPP)) {
GELOGW("GetAIPPInfo: there is not AIPP related with index %u.", index);
return ACL_ERROR_GE_AIPP_NOT_EXIST;
Status DavinciModel::InitAippInfo(uint32_t index, const OpDescPtr &op_desc) {
if (!op_desc->HasAttr(ATTR_NAME_AIPP)) {
GELOGW("there is not AIPP related with index %u.", index);
return SUCCESS;
}

std::unique_ptr<domi::AippOpParams> aipp_params(new (std::nothrow) domi::AippOpParams());
GE_CHECK_NOTNULL(aipp_params);

ge::GeAttrValue::NAMED_ATTRS aipp_attr;
GE_CHK_BOOL_RET_STATUS(AttrUtils::GetNamedAttrs(data_op, ATTR_NAME_AIPP, aipp_attr), GE_AIPP_NOT_EXIST,
domi::AippOpParams aipp_params;
GeAttrValue::NAMED_ATTRS aipp_attr;
GE_CHK_BOOL_RET_STATUS(AttrUtils::GetNamedAttrs(op_desc, ATTR_NAME_AIPP, aipp_attr), GE_AIPP_NOT_EXIST,
"Data node do not contain param aipp!");
GE_CHK_STATUS_RET(OpUtils::ConvertAippParams(aipp_attr, aipp_params.get()), "get aipp params failed");
GELOGI("GetAIPPInfo: node data: %s, type: %s, current index: %u, current node related input rank: %u",
data_op->GetName().c_str(), data_op->GetType().c_str(), index, aipp_params->related_input_rank());
GE_CHK_STATUS_RET(OpUtils::ConvertAippParams(aipp_attr, &aipp_params), "get aipp params failed");
GELOGI("node data: %s, type: %s, current index: %u, current node related input rank: %u",
op_desc->GetName().c_str(), op_desc->GetType().c_str(), index, aipp_params.related_input_rank());

GE_CHK_STATUS_RET(AippUtils::ConvertAippParams2AippInfo(aipp_params.get(), aipp_info),
AippConfigInfo aipp_info;
GE_CHK_STATUS_RET(AippUtils::ConvertAippParams2AippInfo(&aipp_params, aipp_info),
"convert aipp params to aipp config info failed");

aipp_info_list_[index] = aipp_info;
return SUCCESS;
}

Status DavinciModel::GetAippType(uint32_t index, InputAippType &type, size_t &aipp_index) {
GE_CHK_BOOL_RET_STATUS(index < data_op_list_.size(), PARAM_INVALID, "Index %u is invalid.", index);
// Set default value
type = DATA_WITHOUT_AIPP;
aipp_index = 0xFFFFFFFF; // default invalid value
OpDescPtr data_op = data_op_list_[index];
GE_CHECK_NOTNULL(data_op);
if (!data_op->HasAttr(ATTR_DATA_RELATED_AIPP_MODE)) {
///
/// @ingroup ge
/// @brief Get AIPP input info
/// @param [in] index
/// @param [out] aipp_info
/// @return execute result
///
Status DavinciModel::GetAippInfo(uint32_t index, AippConfigInfo &aipp_info) const {
const auto it = aipp_info_list_.find(index);
if (it == aipp_info_list_.end()) {
GELOGW("there is not AIPP related with index %u.", index);
return ACL_ERROR_GE_AIPP_NOT_EXIST;
}

aipp_info = it->second;
return SUCCESS;
}

Status DavinciModel::InitAippType(uint32_t index, const OpDescPtr &op_desc, const map<uint32_t, OpDescPtr> &data_list) {
if (!op_desc->HasAttr(ATTR_DATA_RELATED_AIPP_MODE)) {
GELOGW("There is no aipp releated info with index %u.", index);
return SUCCESS;
}
std::string data_mode;
(void)AttrUtils::GetStr(data_op, ATTR_DATA_RELATED_AIPP_MODE, data_mode);

// Set default value
InputAippType aipp_type = DATA_WITHOUT_AIPP;
string data_mode;
(void)AttrUtils::GetStr(op_desc, ATTR_DATA_RELATED_AIPP_MODE, data_mode);
if (data_mode == "static_aipp") {
type = DATA_WITH_STATIC_AIPP;
aipp_type = DATA_WITH_STATIC_AIPP;
} else if (data_mode == "dynamic_aipp") {
type = DATA_WITH_DYNAMIC_AIPP;
aipp_type = DATA_WITH_DYNAMIC_AIPP;
} else if (data_mode == "dynamic_aipp_conf") {
type = DYNAMIC_AIPP_NODE;
aipp_type = DYNAMIC_AIPP_NODE;
} else {
GELOGE(ACL_ERROR_GE_AIPP_MODE_INVALID,
"The info of aipp releated info %s is invalid with index %u.", data_mode.c_str(), index);
return ACL_ERROR_GE_AIPP_MODE_INVALID;
}

if (type == DATA_WITH_DYNAMIC_AIPP) {
size_t aipp_index = 0xFFFFFFFF; // default invalid value
if (aipp_type == DATA_WITH_DYNAMIC_AIPP) {
string releated_name;
(void)AttrUtils::GetStr(data_op, ATTR_DATA_AIPP_DATA_NAME_MAP, releated_name);
for (size_t i = 0; i < data_op_list_.size(); ++i) {
GE_CHECK_NOTNULL(data_op_list_[i]);
if (data_op_list_[i]->GetName() == releated_name) {
GELOGI("Find aipp_data [%s] index %zu from index %u", releated_name.c_str(), i, index);
aipp_index = i;
(void)AttrUtils::GetStr(op_desc, ATTR_DATA_AIPP_DATA_NAME_MAP, releated_name);
for (const auto item : data_list) {
if (item.second->GetName() == releated_name) {
GELOGI("Find aipp_data [%s] index %zu from index %u", releated_name.c_str(), item.first, index);
aipp_index = item.first;
}
}

if (aipp_index == 0xFFFFFFFF) {
GELOGE(ACL_ERROR_GE_AIPP_NOT_EXIST, "Can not find aipp data node from index %u", index);
return ACL_ERROR_GE_AIPP_NOT_EXIST;
GELOGW("Can not find aipp data node from index %u", index);
return SUCCESS;
}
}

aipp_type_list_[index] = { aipp_type, aipp_index };
return SUCCESS;
}

Status DavinciModel::GetAippType(uint32_t index, InputAippType &aipp_type, size_t &aipp_index) const {
const auto it = aipp_type_list_.find(index);
if (it == aipp_type_list_.end()) {
GELOGW("There is no aipp releated info with index %u.", index);
return SUCCESS;
}

aipp_type = it->second.first;
aipp_index = it->second.second;
return SUCCESS;
}

@@ -1873,7 +1893,7 @@ void DavinciModel::SetDynamicSize(const std::vector<uint64_t> &batch_num, int32_
dynamic_type_ = dynamic_type;
}

void DavinciModel::GetCurShape(std::vector<int64_t> &batch_info, int32_t &dynamic_type) {
void DavinciModel::GetCurShape(std::vector<int64_t> &batch_info, int32_t &dynamic_type) const {
if (batch_size_.empty()) {
GELOGD("User does not set dynamic size");
}
@@ -1885,38 +1905,10 @@ void DavinciModel::GetCurShape(std::vector<int64_t> &batch_info, int32_t &dynami
dynamic_type = dynamic_type_;
}

void DavinciModel::GetModelAttr(vector<string> &out_shape_info) {
void DavinciModel::GetModelAttr(vector<string> &out_shape_info) const {
out_shape_info.insert(out_shape_info.end(), dynamic_output_shape_info_.begin(), dynamic_output_shape_info_.end());
}

Status DavinciModel::GetInputOutputDescInfoForZeroCopy(vector<InputOutputDescInfo> &input_desc,
vector<InputOutputDescInfo> &output_desc,
std::vector<uint32_t> &input_formats,
std::vector<uint32_t> &output_formats) {
if (input_addrs_list_.empty() || input_addrs_list_[0].size() != kOutputNum) {
GELOGE(FAILED, "OP List Pointer is null or input_desc size is not 1!");
return FAILED;
}

GE_CHK_STATUS_RET(GetInputDescInfo(input_desc, input_formats), "get input desc info failed");

GE_CHK_STATUS_RET(GetOutputDescInfo(output_desc, output_formats), "get ouput desc info failed");

GE_CHK_BOOL_RET_STATUS(output_desc.size() == output_memory_size_list_.size(), INTERNAL_ERROR,
"output_desc size[%zu] not equal output_size_list_[%zu] size!", output_desc.size(),
output_memory_size_list_.size());

/// For function zero copy,the momery should be aligned by 512 bytes.
/// And, because of the cce op limit, size should be lager than the real shape size. The memory should be padded by 32
/// bytes.
/// *size equals to ((tensorDesc->dataSize + 2 * 32 - 1) / 32) * 32;
for (size_t i = 0; i < output_memory_size_list_.size(); i++) {
output_desc[i].size = output_memory_size_list_[i];
}

return SUCCESS;
}

void DavinciModel::SetInputDimsInfo(const vector<int64_t> &model_input_dims, Format &format,
InputOutputDescInfo &input) {
uint32_t n, c, h, w;
@@ -1966,24 +1958,30 @@ void DavinciModel::CreateInputDimsInfo(const OpDescPtr &op_desc, Format format,
}
}

Status DavinciModel::GetInputDescInfo(vector<InputOutputDescInfo> &input_desc, std::vector<uint32_t> &formats) {
for (size_t index = 0; index < data_op_list_.size(); ++index) {
InputOutputDescInfo input;
GE_CHECK_NOTNULL(data_op_list_[index]);
GE_CHECK_NOTNULL(data_op_list_[index]->GetInputDescPtr(0));
Status DavinciModel::InitInputDescInfo(const map<uint32_t, OpDescPtr> &data_by_index) {
for (const auto &item : data_by_index) {
const auto op_desc = item.second;
GE_CHECK_NOTNULL(op_desc->GetInputDescPtr(0));

Format format = data_op_list_[index]->GetInputDescPtr(0)->GetFormat();
CreateInputDimsInfo(data_op_list_[index], format, input);
InputOutputDescInfo input;
Format format = op_desc->GetInputDescPtr(0)->GetFormat();
CreateInputDimsInfo(op_desc, format, input);

input.data_type = data_op_list_[index]->GetInputDescPtr(0)->GetDataType();
input.name = data_op_list_[index]->GetName();
input.data_type = op_desc->GetInputDescPtr(0)->GetDataType();
input.name = op_desc->GetName();
int64_t input_size = 0;
GE_CHK_STATUS_RET(TensorUtils::GetSize(*data_op_list_[index]->GetInputDescPtr(0), input_size),
"get input size failed.");
GE_CHK_STATUS_RET(TensorUtils::GetSize(*op_desc->GetInputDescPtr(0), input_size), "get input size failed.");
input.size = input_size;
formats.push_back(format);
input_desc.push_back(input);
input_formats_.push_back(format);
input_descs_.push_back(input);
}
return SUCCESS;
}

Status DavinciModel::GetInputDescInfo(vector<InputOutputDescInfo> &input_descs, vector<uint32_t> &input_formats) {
input_descs.insert(input_descs.end(), input_descs_.begin(), input_descs_.end());
input_formats.insert(input_formats.end(), input_formats_.begin(), input_formats_.end());

// cause GetInputDescInfo called not only once, set is_new_model_desc_ to false after calc the model input dims
is_new_model_desc_ = false;
return SUCCESS;
@@ -2042,8 +2040,7 @@ void DavinciModel::CreateOutput(uint32_t index, const OpDescPtr &op_desc, InputO
output.data_type = op_desc->GetInputDescPtr(index)->GetDataType();
}

Status DavinciModel::InitOutputDescInfo(const vector<OpDescPtr> &output_op_list,
vector<InputOutputDescInfo> &output_descs, vector<uint32_t> &output_formats) {
Status DavinciModel::InitOutputDescInfo(const vector<OpDescPtr> &output_op_list) {
GELOGD("Output node size: %zu", output_op_list.size());
for (const auto &op_desc : output_op_list) {
uint32_t out_size = static_cast<uint32_t>(op_desc->GetInputsSize());
@@ -2068,28 +2065,20 @@ Status DavinciModel::InitOutputDescInfo(const vector<OpDescPtr> &output_op_list,
std::to_string(src_index[index]);
}
output.name = output_name;
output_descs.push_back(output);
output_formats.push_back(format_result);
output_descs_.push_back(output);
output_formats_.push_back(format_result);
}
}
return SUCCESS;
}

Status DavinciModel::GetOutputDescInfo(vector<InputOutputDescInfo> &output_descs, vector<uint32_t> &output_formats) {
Status DavinciModel::GetOutputDescInfo(vector<InputOutputDescInfo> &output_descs,
vector<uint32_t> &output_formats) const {
output_descs.insert(output_descs.end(), output_descs_.begin(), output_descs_.end());
output_formats.insert(output_formats.end(), output_formats_.begin(), output_formats_.end());
return SUCCESS;
}

ge::Format DavinciModel::GetFormat() {
if ((data_op_list_.empty()) || data_op_list_[0] == nullptr || data_op_list_[0]->GetInputDescPtr(0) == nullptr) {
GELOGW("OP List Pointer is null or input_desc size is not 1!");
return FORMAT_NCHW;
}

return data_op_list_[0]->GetInputDescPtr(0)->GetFormat();
}

Status DavinciModel::CopyInputData(const InputData &input_data, bool device_data) {
rtMemcpyKind_t kind = device_data ? RT_MEMCPY_DEVICE_TO_DEVICE : RT_MEMCPY_HOST_TO_DEVICE;
const std::vector<DataBuffer> &blobs = input_data.blobs;
@@ -4004,25 +3993,45 @@ void DavinciModel::SetTotalFixedAddrsSize(string tensor_name, int64_t fix_addr_s
}
}

Status DavinciModel::GetOrigInputInfo(uint32_t index, OriginInputInfo &orig_input_info) {
GE_CHK_BOOL_RET_STATUS(index < data_op_list_.size(), PARAM_INVALID, "Index %u is invalid.", index);
OpDescPtr data_op = data_op_list_[index];
if (!data_op->HasAttr(ATTR_NAME_AIPP_INPUTS) || !data_op->HasAttr(ATTR_NAME_AIPP_OUTPUTS)) {
GELOGE(ACL_ERROR_GE_AIPP_NOT_EXIST, "GetOrigInputInfo: there is not AIPP related with index %u.", index);
return ACL_ERROR_GE_AIPP_NOT_EXIST;
Status DavinciModel::InitOrigInputInfo(uint32_t index, const OpDescPtr &op_desc) {
if (!op_desc->HasAttr(ATTR_NAME_AIPP_INPUTS) || !op_desc->HasAttr(ATTR_NAME_AIPP_OUTPUTS)) {
GELOGI("there is not AIPP related with index %u, node: %s.", index, op_desc->GetName().c_str());
return SUCCESS;
}

vector<std::string> inputs;
if (AttrUtils::GetListStr(data_op, ATTR_NAME_AIPP_INPUTS, inputs) && !inputs.empty()) {
vector<string> inputs;
if (AttrUtils::GetListStr(op_desc, ATTR_NAME_AIPP_INPUTS, inputs) && !inputs.empty()) {
std::string input = inputs[kAippOriginInputIndex];
GELOGI("GetOrigInputInfo: origin input str: %s", input.c_str());
GELOGI("origin input str: %s", input.c_str());
std::vector<std::string> infos = ge::StringUtils::Split(input, ':');
if (infos.size() != kAippInfoNum) {
GELOGW("origin input str is invalid.");
GELOGE(ACL_ERROR_GE_AIPP_MODE_INVALID, "origin input str is invalid[%zu, %u].", infos.size(), kAippInfoNum);
return ACL_ERROR_GE_AIPP_MODE_INVALID;
}
orig_input_info.format = TypeUtils::SerialStringToFormat(infos[kAippInfoFormat]);
orig_input_info.data_type = TypeUtils::SerialStringToDataType(infos[kAippInfoDataType]);
orig_input_info.dim_num = std::strtol(infos[kAippInfoDimNum].c_str(), nullptr, kDecimal);

OriginInputInfo input_info;
input_info.format = TypeUtils::SerialStringToFormat(infos[kAippInfoFormat]);
input_info.data_type = TypeUtils::SerialStringToDataType(infos[kAippInfoDataType]);
input_info.dim_num = std::strtol(infos[kAippInfoDimNum].c_str(), nullptr, kDecimal);
orig_input_info_[index] = input_info;
} else {
OriginInputInfo input_info = { FORMAT_RESERVED, DT_UNDEFINED, 0 };
orig_input_info_[index] = input_info;
}

return SUCCESS;
}

Status DavinciModel::GetOrigInputInfo(uint32_t index, OriginInputInfo &orig_input_info) const {
const auto it = orig_input_info_.find(index);
if (it == orig_input_info_.end()) {
GELOGE(ACL_ERROR_GE_AIPP_NOT_EXIST, "there is not AIPP related with index %u.", index);
return ACL_ERROR_GE_AIPP_NOT_EXIST;
}

const OriginInputInfo &input_info = it->second;
if (input_info.format != FORMAT_RESERVED || input_info.data_type != DT_UNDEFINED) {
orig_input_info = input_info;
}

return SUCCESS;
@@ -4032,7 +4041,8 @@ void DavinciModel::ParseAIPPInfo(std::string in_out_info, InputOutputDims &dims_
GELOGI("ParseAIPPInfo: origin str: %s", in_out_info.c_str());
std::vector<std::string> infos = ge::StringUtils::Split(in_out_info, ':');
if (infos.size() != kAippInfoNum) {
GELOGW("origin input str is invalid.");
GELOGE(ACL_ERROR_GE_AIPP_MODE_INVALID, "origin input str is invalid[%zu, %u].", infos.size(), kAippInfoNum);
return;
}
dims_info.name = infos[kAippInfoTensorName];
dims_info.size = std::strtol(infos[kAippInfoTensorSize].c_str(), nullptr, kDecimal);
@@ -4047,47 +4057,58 @@ void DavinciModel::ParseAIPPInfo(std::string in_out_info, InputOutputDims &dims_
}
}

Status DavinciModel::GetAllAippInputOutputDims(uint32_t index, std::vector<InputOutputDims> &input_dims,
std::vector<InputOutputDims> &output_dims) {
GE_CHK_BOOL_RET_STATUS(index < data_op_list_.size(), PARAM_INVALID, "Index %u is invalid.", index);
OpDescPtr data_op = data_op_list_[index];
if (!data_op->HasAttr(ATTR_NAME_AIPP_INPUTS) || !data_op->HasAttr(ATTR_NAME_AIPP_OUTPUTS)) {
GELOGE(ACL_ERROR_GE_AIPP_NOT_EXIST, "GetAllAippInputOutputDims: there is not AIPP related with index %u.", index);
return ACL_ERROR_GE_AIPP_NOT_EXIST;
Status DavinciModel::InitAippInputOutputDims(uint32_t index, const OpDescPtr &op_desc) {
if (!op_desc->HasAttr(ATTR_NAME_AIPP_INPUTS) || !op_desc->HasAttr(ATTR_NAME_AIPP_OUTPUTS)) {
GELOGI("there is not AIPP related with index %u.", index);
return SUCCESS;
}

vector<std::string> inputs;
if (AttrUtils::GetListStr(data_op, ATTR_NAME_AIPP_INPUTS, inputs) && !inputs.empty()) {
GELOGI("GetAllAippInputOutputDims: Data: %s has %zu related aippInfo.", data_op->GetName().c_str(), inputs.size());
vector<string> inputs;
vector<InputOutputDims> input_dims;
if (AttrUtils::GetListStr(op_desc, ATTR_NAME_AIPP_INPUTS, inputs) && !inputs.empty()) {
GELOGI("Data: %s has %zu related aippInfo.", op_desc->GetName().c_str(), inputs.size());
for (auto it : inputs) {
InputOutputDims input_info;
ParseAIPPInfo(it, input_info);
input_dims.emplace_back(input_info);
GELOGD("GetAllAippInputOutputDims Aipp origin input dims info: %s", it.c_str());
GELOGD("Aipp origin input dims info: %s", it.c_str());

ConstGeTensorDescPtr data_input_desc = data_op->GetInputDescPtr(kDataIndex);
ConstGeTensorDescPtr data_input_desc = op_desc->GetInputDescPtr(kDataIndex);
int64_t data_input_size;
(void)TensorUtils::GetSize(*(data_op->GetInputDescPtr(kDataIndex)), data_input_size);
GELOGD(
"GetAllAippInputOutputDims related Data[%d]: tensor_name is %s, dim_num is %zu, tensor_size: %zu, format: "
"%s, data_type: %s, shape: %s .",
index, data_op->GetName().c_str(), data_input_desc->GetShape().GetDimNum(), data_input_size,
TypeUtils::FormatToSerialString(data_input_desc->GetFormat()).c_str(),
TypeUtils::DataTypeToSerialString(data_input_desc->GetDataType()).c_str(),
formats::JoinToString(data_input_desc->GetShape().GetDims()).c_str());
(void)TensorUtils::GetSize(*(op_desc->GetInputDescPtr(kDataIndex)), data_input_size);
GELOGD("related Data[%d]: tensor_name: %s, dim_num: %zu, tensor_size: %zu, format: %s, data_type: %s, shape: %s",
index, op_desc->GetName().c_str(), data_input_desc->GetShape().GetDimNum(), data_input_size,
TypeUtils::FormatToSerialString(data_input_desc->GetFormat()).c_str(),
TypeUtils::DataTypeToSerialString(data_input_desc->GetDataType()).c_str(),
formats::JoinToString(data_input_desc->GetShape().GetDims()).c_str());
}
}

vector<std::string> outputs;
if (AttrUtils::GetListStr(data_op, ATTR_NAME_AIPP_OUTPUTS, outputs) && !outputs.empty()) {
vector<string> outputs;
vector<InputOutputDims> output_dims;
if (AttrUtils::GetListStr(op_desc, ATTR_NAME_AIPP_OUTPUTS, outputs) && !outputs.empty()) {
for (auto it : outputs) {
InputOutputDims output_info;
ParseAIPPInfo(it, output_info);
output_dims.emplace_back(output_info);
GELOGD("GetAllAippInputOutputDims Aipp output dims info: %s", it.c_str());
GELOGD("Aipp output dims info: %s", it.c_str());
}
}

aipp_dims_info_[index] = { input_dims, input_dims };
return SUCCESS;
}

Status DavinciModel::GetAllAippInputOutputDims(uint32_t index, vector<InputOutputDims> &input_dims,
vector<InputOutputDims> &output_dims) const {
const auto it = aipp_dims_info_.find(index);
if (it == aipp_dims_info_.end()) {
GELOGE(ACL_ERROR_GE_AIPP_NOT_EXIST, "there is not AIPP related with index %u.", index);
return ACL_ERROR_GE_AIPP_NOT_EXIST;
}

input_dims = it->second.first;
output_dims = it->second.second;
return SUCCESS;
}



+ 23
- 32
ge/graph/load/new_model_manager/davinci_model.h View File

@@ -286,13 +286,6 @@ class DavinciModel {
// Modified from KernelTaskInfo.
SuperKernelTaskInfo &GetSuperKernelTaskInfo() { return skt_info_; }

///
/// @ingroup ge
/// @brief get model input and output format
/// @return ccTensorFormat_t current model input and output format
///
Format GetFormat();

rtModel_t GetRtModelHandle() const { return rt_model_handle_; }

rtStream_t GetRtModelStream() const { return rt_model_stream_; }
@@ -326,7 +319,7 @@ class DavinciModel {
Status GetInputOutputDescInfo(vector<InputOutputDescInfo> &input_desc, vector<InputOutputDescInfo> &output_desc);

Status GetInputOutputDescInfo(vector<InputOutputDescInfo> &input_desc, vector<InputOutputDescInfo> &output_desc,
vector<uint32_t> &inputFormats, vector<uint32_t> &output_formats);
vector<uint32_t> &input_formats, vector<uint32_t> &output_formats);

///
/// @ingroup ge
@@ -347,9 +340,9 @@ class DavinciModel {

void GetUserDesignateShapeOrder(vector<string> &user_input_shape_order) const;

void GetCurShape(vector<int64_t> &batch_info, int32_t &dynamic_type);
void GetCurShape(vector<int64_t> &batch_info, int32_t &dynamic_type) const;

void GetModelAttr(vector<string> &dynamic_output_shape_info);
void GetModelAttr(vector<string> &dynamic_output_shape_info) const;

///
/// @ingroup ge
@@ -358,9 +351,9 @@ class DavinciModel {
/// @param [out] aipp_info
/// @return execute result
///
Status GetAIPPInfo(uint32_t index, AippConfigInfo &aipp_info);
Status GetAippInfo(uint32_t index, AippConfigInfo &aipp_info) const;

Status GetAippType(uint32_t index, InputAippType &type, size_t &aipp_index);
Status GetAippType(uint32_t index, InputAippType &type, size_t &aipp_index) const;

///
/// @ingroup ge
@@ -378,17 +371,6 @@ class DavinciModel {
///
void GetUniqueId(const OpDescPtr &op_desc, string &unique_identification);

///
/// @ingroup ge
/// @brief get model input and output desc for zero copy
/// @param [out] input_shape model input size
/// @param [out] output_shape model output size
/// @return execute result
///
Status GetInputOutputDescInfoForZeroCopy(vector<InputOutputDescInfo> &input_desc,
vector<InputOutputDescInfo> &output_desc,
vector<uint32_t> &inputFormats, vector<uint32_t> &output_formats);

Status ReturnResult(uint32_t data_id, const bool rslt_flg, const bool seq_end_flg, OutputData *output_data);

Status ReturnNoOutput(uint32_t data_id);
@@ -538,9 +520,9 @@ class DavinciModel {
Status UpdateKnownZeroCopyAddr(vector<void *> &total_io_addrs, bool update_args = true);
void SetKnownNodeAddrNotChanged(bool base_addr_not_changed) { base_addr_not_changed_ = base_addr_not_changed; }

Status GetOrigInputInfo(uint32_t index, OriginInputInfo &orig_input_info);
Status GetOrigInputInfo(uint32_t index, OriginInputInfo &orig_input_info) const;
Status GetAllAippInputOutputDims(uint32_t index, vector<InputOutputDims> &input_dims,
vector<InputOutputDims> &output_dims);
vector<InputOutputDims> &output_dims) const;
void SetModelDescVersion(bool is_new_model_desc) { is_new_model_desc_ = is_new_model_desc; }
// om file name
void SetOmName(string om_name) { om_name_ = om_name; }
@@ -626,7 +608,7 @@ class DavinciModel {
void SetInputDimsInfo(const vector<int64_t> &model_input_dims, Format &format, InputOutputDescInfo &input);

Status GetInputDescInfo(vector<InputOutputDescInfo> &input_desc, vector<uint32_t> &input_formats);
Status GetOutputDescInfo(vector<InputOutputDescInfo> &output_desc, vector<uint32_t> &output_formats);
Status GetOutputDescInfo(vector<InputOutputDescInfo> &output_desc, vector<uint32_t> &output_formats) const;

Status InitTaskInfo(domi::ModelTaskDef &modelTaskInfo);

@@ -688,7 +670,7 @@ class DavinciModel {
/// @param [in] output_op_list: list of NetOutput op.
/// @return Status
///
Status OptInputOutputInfo(const map<uint32_t, OpDescPtr> &data_by_index, const vector<OpDescPtr> &output_op_list);
Status GenInputOutputInfo(const map<uint32_t, OpDescPtr> &data_by_index, const vector<OpDescPtr> &output_op_list);

///
/// @ingroup ge
@@ -856,8 +838,13 @@ class DavinciModel {
Status InitOutputTensorInfo(const OpDescPtr &op_desc);
Status GenOutputTensorInfo(OutputData *output_data, vector<OutputTensorInfo> &outputs);

Status InitOutputDescInfo(const vector<OpDescPtr> &output_op_list,
vector<InputOutputDescInfo> &output_desc, vector<uint32_t> &formats);
Status InitInputDescInfo(const map<uint32_t, OpDescPtr> &data_by_index);
Status InitOutputDescInfo(const vector<OpDescPtr> &output_op_list);

Status InitOrigInputInfo(uint32_t index, const OpDescPtr &op_desc);
Status InitAippInfo(uint32_t index, const OpDescPtr &op_desc);
Status InitAippType(uint32_t index, const OpDescPtr &op_desc, const map<uint32_t, OpDescPtr> &data_list);
Status InitAippInputOutputDims(uint32_t index, const OpDescPtr &op_desc);

void ParseAIPPInfo(string in_out_info, InputOutputDims &dims_info);
void SetLabelForDynamic(const NodePtr &node);
@@ -890,9 +877,6 @@ class DavinciModel {

map<uint32_t, OpDescPtr> op_list_; // release after DavinciModel::Init

// data op_desc
vector<OpDescPtr> data_op_list_;

vector<OpDescPtr> variable_op_list_;

map<uint32_t, ZeroCopyOffset> new_input_data_info_;
@@ -1048,6 +1032,13 @@ class DavinciModel {
vector<int64_t> output_buffer_size_;
vector<GeShape> output_shape_info_;

map<uint32_t, OriginInputInfo> orig_input_info_;
map<uint32_t, AippConfigInfo> aipp_info_list_;
map<uint32_t, pair<InputAippType, size_t>> aipp_type_list_;
map<uint32_t, pair<vector<InputOutputDims>, vector<InputOutputDims>>> aipp_dims_info_;

vector<InputOutputDescInfo> input_descs_;
vector<uint32_t> input_formats_;
vector<InputOutputDescInfo> output_descs_;
vector<uint32_t> output_formats_;
};


+ 0
- 75
ge/graph/load/new_model_manager/davinci_model_parser.cc View File

@@ -16,82 +16,7 @@

#include "graph/load/new_model_manager/davinci_model_parser.h"

#include <fstream>
#include <memory>
#include <vector>
#include "securec.h"

#include "common/debug/log.h"
#include "graph/load/new_model_manager/davinci_model.h"

namespace ge {
FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelInfoParser(const ModelData &model, ModelInfo &model_info) {
GE_CHK_RT_RET(rtSetDevice(0));
try {
uint32_t model_len = 0;
uint8_t *model_data = nullptr;

Status ret = DavinciModelParser::ParseModelContent(model, model_data, model_len);

GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(ret != SUCCESS, GE_CHK_RT(rtDeviceReset(0)); return ret, "Parse model failed");

auto *file_header = reinterpret_cast<ModelFileHeader *>(model.model_data);

GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(file_header == nullptr, GE_CHK_RT(rtDeviceReset(0));
return PARAM_INVALID, "file_header is null.");

model_info.version = file_header->version;
model_info.is_encrypt = false;
GE_IF_BOOL_EXEC(ENCRYPTED == file_header->is_encrypt, model_info.is_encrypt = true);

std::shared_ptr<DavinciModel> davinci_model =
std::shared_ptr<DavinciModel>(new (std::nothrow) DavinciModel(model.priority, nullptr));

GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(davinci_model == nullptr, GE_CHK_RT(rtDeviceReset(0));
return PARAM_INVALID, "davinci_model is null.");

GE_MAKE_GUARD(davinci_model, [&] { davinci_model = nullptr; });

ModelHelper model_helper;
ret = model_helper.LoadModel(model);
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG((ret != SUCCESS), GE_CHK_RT(rtDeviceReset(0)); return FAILED, "load model failed");

ret = davinci_model->Assign(model_helper.GetGeModel());
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(ret != SUCCESS, GE_CHK_RT(rtDeviceReset(0));
return ret, "Parse davinci model data failed");

ret = davinci_model->Init();

GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(ret != SUCCESS, GE_CHK_RT(rtDeviceReset(0));
return ret, "Davinci model init failed");

vector<InputOutputDescInfo> input_list;
vector<InputOutputDescInfo> output_list;

ret = davinci_model->GetInputOutputDescInfo(input_list, output_list);

GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(ret != SUCCESS, GE_CHK_RT(rtDeviceReset(0));
return ret, "Davinci model GetInputOutputDescInfo failed");

for (const auto &desc : input_list) {
model_info.input_desc.push_back(desc.shape_info);
}
for (const auto &desc : output_list) {
model_info.output_desc.push_back(desc.shape_info);
}

model_info.name = davinci_model->Name();
} catch (...) {
DOMI_LOGE("OM model parser failed, some exceptions occur !");
GE_CHK_RT(rtDeviceReset(0));
return FAILED;
}

GE_CHK_RT(rtDeviceReset(0));

return SUCCESS;
}

DavinciModelParser::DavinciModelParser() {}

DavinciModelParser::~DavinciModelParser() {}


+ 2
- 12
ge/graph/load/new_model_manager/model_manager.cc View File

@@ -995,16 +995,6 @@ Status ModelManager::GetModelAttr(uint32_t model_id, std::vector<string> &dynami
return SUCCESS;
}

Status ModelManager::GetInputOutputDescInfoForZeroCopy(const uint32_t model_id, vector<InputOutputDescInfo> &input_desc,
vector<InputOutputDescInfo> &output_desc,
std::vector<uint32_t> &inputFormats,
std::vector<uint32_t> &outputFormats) {
std::shared_ptr<DavinciModel> davinci_model = GetModel(model_id);
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, ACL_ERROR_GE_EXEC_MODEL_ID_INVALID,
"GetInputOutputDescInfo Failed, Invalid model id %u!", model_id);
return davinci_model->GetInputOutputDescInfoForZeroCopy(input_desc, output_desc, inputFormats, outputFormats);
}

///
/// @ingroup ge
/// @brief Get AIPP info
@@ -1013,11 +1003,11 @@ Status ModelManager::GetInputOutputDescInfoForZeroCopy(const uint32_t model_id,
/// @param [out] aipp_info
/// @return execute result
///
Status ModelManager::GetAIPPInfo(const uint32_t model_id, uint32_t index, AippConfigInfo &aipp_info) {
Status ModelManager::GetAippInfo(const uint32_t model_id, uint32_t index, AippConfigInfo &aipp_info) {
std::shared_ptr<DavinciModel> davinci_model = GetModel(model_id);
GE_CHK_BOOL_RET_STATUS(davinci_model != nullptr, ACL_ERROR_GE_EXEC_MODEL_ID_INVALID,
"GetAIPPInfo failed, invalid model_id is %u.", model_id);
return davinci_model->GetAIPPInfo(index, aipp_info);
return davinci_model->GetAippInfo(index, aipp_info);
}

Status ModelManager::GetAippType(uint32_t model_id, uint32_t index, InputAippType &type, size_t &aipp_index) {


+ 1
- 15
ge/graph/load/new_model_manager/model_manager.h View File

@@ -239,24 +239,10 @@ class FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY ModelManager {
/// @param [out] aipp_info
/// @return execute result
///
ge::Status GetAIPPInfo(const uint32_t model_id, uint32_t index, AippConfigInfo &aipp_info);
ge::Status GetAippInfo(const uint32_t model_id, uint32_t index, AippConfigInfo &aipp_info);

ge::Status GetAippType(uint32_t model_id, uint32_t index, InputAippType &type, size_t &aipp_index);

///
/// @ingroup domi_ome
/// @brief set model input and output size zero copy
/// @param [in] model_id model id
/// @param [out] input_shape input tensor
/// @param [out] output_shape output tensor
/// @return SUCCESS success
/// @return PARAM_INVALID parameter invalid
///
ge::Status GetInputOutputDescInfoForZeroCopy(const uint32_t model_id, std::vector<InputOutputDescInfo> &input_desc,
std::vector<InputOutputDescInfo> &output_desc,
std::vector<uint32_t> &inputFormats,
std::vector<uint32_t> &outputFormats);

ge::Status GetCurShape(const uint32_t model_id, std::vector<int64_t> &batch_info, int32_t &dynamic_type);

ge::Status GetModelAttr(uint32_t model_id, std::vector<string> &dynamic_output_shape_info);


+ 0
- 3
inc/framework/executor/ge_executor.h View File

@@ -157,9 +157,6 @@ class GE_FUNC_DEV_VISIBILITY GE_FUNC_HOST_VISIBILITY GeExecutor {

ge::Status GetAippType(uint32_t model_id, uint32_t index, InputAippType &type, size_t &aipp_index);

ge::Status GetModelDescInfoForZeroCopy(uint32_t model_id, std::vector<ge::TensorDesc> &input_desc,
std::vector<ge::TensorDesc> &output_desc);

ge::Status CommandHandle(const ge::Command &command);

ge::Status SetDump(const DumpConfig &dump_config);


+ 338
- 5
tests/ut/ge/graph/load/davinci_model_unittest.cc View File

@@ -139,13 +139,14 @@ TEST_F(UtestDavinciModel, init_data_op) {
model.runtime_param_.mem_size = 5120000;
ComputeGraphPtr graph = make_shared<ComputeGraph>("default");

OpDescPtr op_input = CreateOpDesc("data", DATA);
GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
TensorUtils::SetSize(tensor, 512);

OpDescPtr op_input = CreateOpDesc("data", DATA);
op_input->AddInputDesc(tensor);
op_input->AddOutputDesc(tensor);
op_input->SetInputOffset({1024});
op_input->SetOutputOffset({5120});
op_input->SetOutputOffset({1024});
NodePtr node_input = graph->AddNode(op_input);

OpDescPtr op_output = CreateOpDesc("output", NETOUTPUT);
@@ -168,12 +169,14 @@ TEST_F(UtestDavinciModel, init_data_op_subgraph) {
model.runtime_param_.mem_size = 5120000;
ComputeGraphPtr graph = make_shared<ComputeGraph>("default");

OpDescPtr op_input = CreateOpDesc("data", DATA);
GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
TensorUtils::SetSize(tensor, 512);

OpDescPtr op_input = CreateOpDesc("data", DATA);
op_input->AddInputDesc(tensor);
op_input->AddOutputDesc(tensor);
op_input->SetInputOffset({1024});
op_input->SetOutputOffset({5120});
op_input->SetOutputOffset({1024});
NodePtr node = graph->AddNode(op_input);

uint32_t data_op_index = 0;
@@ -192,8 +195,10 @@ TEST_F(UtestDavinciModel, init_netoutput_op_subgraph) {
model.runtime_param_.mem_size = 5120000;
ComputeGraphPtr graph = make_shared<ComputeGraph>("default");

OpDescPtr op_output = CreateOpDesc("output", NETOUTPUT);
GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
TensorUtils::SetSize(tensor, 512);

OpDescPtr op_output = CreateOpDesc("output", NETOUTPUT);
op_output->AddInputDesc(tensor);
op_output->SetInputOffset({1024});
op_output->SetSrcName( { "data" } );
@@ -426,4 +431,332 @@ TEST_F(UtestDavinciModel, InitRealSizeAndShapeInfo_succ3) {
EXPECT_EQ(ret, SUCCESS);
}

TEST_F(UtestDavinciModel, init_data_aipp_info) {
DavinciModel model(0, nullptr);
model.ge_model_ = make_shared<GeModel>(); // for CustAICPUKernelStore::GetCustAICPUKernelStore()
model.runtime_param_.mem_base = (uint8_t *)0x08000000;
model.runtime_param_.mem_size = 5120000;
ComputeGraphPtr graph = make_shared<ComputeGraph>("default");

GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
TensorUtils::SetSize(tensor, 512);

OpDescPtr op_desc = CreateOpDesc("data", DATA);
op_desc->AddInputDesc(tensor);
op_desc->AddOutputDesc(tensor);
op_desc->SetInputOffset({1024});
op_desc->SetOutputOffset({1024});
NodePtr node = graph->AddNode(op_desc);

GeAttrValue::NAMED_ATTRS aipp_attr;
aipp_attr.SetAttr("aipp_mode", GeAttrValue::CreateFrom<GeAttrValue::INT>(domi::AippOpParams::dynamic));
aipp_attr.SetAttr("related_input_rank", GeAttrValue::CreateFrom<GeAttrValue::INT>(0));
aipp_attr.SetAttr("max_src_image_size", GeAttrValue::CreateFrom<GeAttrValue::INT>(2048));
aipp_attr.SetAttr("support_rotation", GeAttrValue::CreateFrom<GeAttrValue::INT>(1));
EXPECT_TRUE(AttrUtils::SetNamedAttrs(op_desc, ATTR_NAME_AIPP, aipp_attr));

AippConfigInfo aipp_info;
EXPECT_EQ(model.GetAippInfo(0, aipp_info), ACL_ERROR_GE_AIPP_NOT_EXIST);
EXPECT_EQ(model.InitNodes(graph), SUCCESS);
EXPECT_EQ(model.GetAippInfo(0, aipp_info), SUCCESS);
EXPECT_EQ(aipp_info.aipp_mode, domi::AippOpParams::dynamic);

EXPECT_EQ(model.input_addrs_list_.size(), 1);
EXPECT_EQ(model.output_addrs_list_.size(), 0);
EXPECT_EQ(model.op_list_.size(), 1);
}

TEST_F(UtestDavinciModel, init_data_aipp_static) {
DavinciModel model(0, nullptr);
model.ge_model_ = make_shared<GeModel>(); // for CustAICPUKernelStore::GetCustAICPUKernelStore()
model.runtime_param_.mem_base = (uint8_t *)0x08000000;
model.runtime_param_.mem_size = 5120000;
ComputeGraphPtr graph = make_shared<ComputeGraph>("default");

GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
TensorUtils::SetSize(tensor, 512);

OpDescPtr op_desc = CreateOpDesc("data", DATA);
op_desc->AddInputDesc(tensor);
op_desc->AddOutputDesc(tensor);
op_desc->SetInputOffset({1024});
op_desc->SetOutputOffset({1024});
NodePtr node = graph->AddNode(op_desc);

AttrUtils::SetStr(op_desc, ATTR_DATA_RELATED_AIPP_MODE, "static_aipp");

InputAippType aipp_type;
size_t aipp_index = 0;
EXPECT_EQ(model.GetAippType(0, aipp_type, aipp_index), SUCCESS);
EXPECT_EQ(model.InitNodes(graph), SUCCESS);
EXPECT_EQ(model.GetAippType(0, aipp_type, aipp_index), SUCCESS);
EXPECT_EQ(aipp_type, DATA_WITH_STATIC_AIPP);
EXPECT_EQ(aipp_index, 0xFFFFFFFFu);

EXPECT_EQ(model.input_addrs_list_.size(), 1);
EXPECT_EQ(model.output_addrs_list_.size(), 0);
EXPECT_EQ(model.op_list_.size(), 1);
}

TEST_F(UtestDavinciModel, init_data_aipp_dynamic) {
DavinciModel model(0, nullptr);
model.ge_model_ = make_shared<GeModel>(); // for CustAICPUKernelStore::GetCustAICPUKernelStore()
model.runtime_param_.mem_base = (uint8_t *)0x08000000;
model.runtime_param_.mem_size = 5120000;
ComputeGraphPtr graph = make_shared<ComputeGraph>("default");

GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
TensorUtils::SetSize(tensor, 512);

OpDescPtr op_desc = CreateOpDesc("data", DATA);
op_desc->AddInputDesc(tensor);
op_desc->AddOutputDesc(tensor);
op_desc->SetInputOffset({1024});
op_desc->SetOutputOffset({1024});
NodePtr node = graph->AddNode(op_desc); // op_index 0
AttrUtils::SetStr(op_desc, ATTR_DATA_RELATED_AIPP_MODE, "dynamic_aipp");
AttrUtils::SetStr(op_desc, ATTR_DATA_AIPP_DATA_NAME_MAP, "releated_aipp");

InputAippType aipp_type;
size_t aipp_index = 0;
EXPECT_EQ(model.GetAippType(0, aipp_type, aipp_index), SUCCESS);
EXPECT_EQ(model.InitNodes(graph), SUCCESS);
EXPECT_EQ(model.GetAippType(0, aipp_type, aipp_index), SUCCESS);

EXPECT_EQ(model.input_addrs_list_.size(), 1);
EXPECT_EQ(model.output_addrs_list_.size(), 0);
EXPECT_EQ(model.op_list_.size(), 1);
}

TEST_F(UtestDavinciModel, init_data_aipp_releated) {
DavinciModel model(0, nullptr);
model.ge_model_ = make_shared<GeModel>(); // for CustAICPUKernelStore::GetCustAICPUKernelStore()
model.runtime_param_.mem_base = (uint8_t *)0x08000000;
model.runtime_param_.mem_size = 5120000;
ComputeGraphPtr graph = make_shared<ComputeGraph>("default");

GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
TensorUtils::SetSize(tensor, 512);

{
OpDescPtr op_desc = CreateOpDesc("data", DATA);
op_desc->AddInputDesc(tensor);
op_desc->AddOutputDesc(tensor);
op_desc->SetInputOffset({1024});
op_desc->SetOutputOffset({1024});
NodePtr node = graph->AddNode(op_desc); // op_index 0
AttrUtils::SetStr(op_desc, ATTR_DATA_RELATED_AIPP_MODE, "dynamic_aipp");
AttrUtils::SetStr(op_desc, ATTR_DATA_AIPP_DATA_NAME_MAP, "releated_aipp");
}
{
OpDescPtr op_desc = CreateOpDesc("releated_aipp", DATA);
op_desc->AddInputDesc(tensor);
op_desc->AddOutputDesc(tensor);
op_desc->SetInputOffset({1024});
op_desc->SetOutputOffset({1024});
NodePtr node = graph->AddNode(op_desc); // op_index 1
}

InputAippType aipp_type;
size_t aipp_index = 0;
EXPECT_EQ(model.GetAippType(0, aipp_type, aipp_index), SUCCESS);
EXPECT_EQ(model.InitNodes(graph), SUCCESS);
EXPECT_EQ(model.GetAippType(0, aipp_type, aipp_index), SUCCESS);
EXPECT_EQ(aipp_type, DATA_WITH_DYNAMIC_AIPP);
EXPECT_EQ(aipp_index, 1);

EXPECT_EQ(model.input_addrs_list_.size(), 2);
EXPECT_EQ(model.output_addrs_list_.size(), 0);
EXPECT_EQ(model.op_list_.size(), 2);
}

TEST_F(UtestDavinciModel, init_data_aipp_dynamic_conf) {
DavinciModel model(0, nullptr);
model.ge_model_ = make_shared<GeModel>(); // for CustAICPUKernelStore::GetCustAICPUKernelStore()
model.runtime_param_.mem_base = (uint8_t *)0x08000000;
model.runtime_param_.mem_size = 5120000;
ComputeGraphPtr graph = make_shared<ComputeGraph>("default");

GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
TensorUtils::SetSize(tensor, 512);

OpDescPtr op_desc = CreateOpDesc("data", DATA);
op_desc->AddInputDesc(tensor);
op_desc->AddOutputDesc(tensor);
op_desc->SetInputOffset({1024});
op_desc->SetOutputOffset({1024});
NodePtr node = graph->AddNode(op_desc); // op_index 0
AttrUtils::SetStr(op_desc, ATTR_DATA_RELATED_AIPP_MODE, "dynamic_aipp_conf");

InputAippType aipp_type;
size_t aipp_index = 0;
EXPECT_EQ(model.GetAippType(0, aipp_type, aipp_index), SUCCESS);
EXPECT_EQ(model.InitNodes(graph), SUCCESS);
EXPECT_EQ(model.GetAippType(0, aipp_type, aipp_index), SUCCESS);
EXPECT_EQ(aipp_type, DYNAMIC_AIPP_NODE);
EXPECT_EQ(aipp_index, 0xFFFFFFFFU);

EXPECT_EQ(model.input_addrs_list_.size(), 1);
EXPECT_EQ(model.output_addrs_list_.size(), 0);
EXPECT_EQ(model.op_list_.size(), 1);
}

TEST_F(UtestDavinciModel, init_data_aipp_dynamic_invalid) {
DavinciModel model(0, nullptr);
model.ge_model_ = make_shared<GeModel>(); // for CustAICPUKernelStore::GetCustAICPUKernelStore()
model.runtime_param_.mem_base = (uint8_t *)0x08000000;
model.runtime_param_.mem_size = 5120000;
ComputeGraphPtr graph = make_shared<ComputeGraph>("default");

GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
TensorUtils::SetSize(tensor, 512);

OpDescPtr op_desc = CreateOpDesc("data", DATA);
op_desc->AddInputDesc(tensor);
op_desc->AddOutputDesc(tensor);
op_desc->SetInputOffset({1024});
op_desc->SetOutputOffset({1024});
NodePtr node = graph->AddNode(op_desc); // op_index 0
AttrUtils::SetStr(op_desc, ATTR_DATA_RELATED_AIPP_MODE, "dynamic_aipp_invalid");

InputAippType aipp_type;
size_t aipp_index = 0;
EXPECT_EQ(model.GetAippType(0, aipp_type, aipp_index), SUCCESS);
EXPECT_EQ(model.InitNodes(graph), ACL_ERROR_GE_AIPP_MODE_INVALID);

EXPECT_EQ(model.input_addrs_list_.size(), 1);
EXPECT_EQ(model.output_addrs_list_.size(), 0);
EXPECT_EQ(model.op_list_.size(), 1);
}

TEST_F(UtestDavinciModel, init_data_aipp_input_info_empty) {
DavinciModel model(0, nullptr);
model.ge_model_ = make_shared<GeModel>(); // for CustAICPUKernelStore::GetCustAICPUKernelStore()
model.runtime_param_.mem_base = (uint8_t *)0x08000000;
model.runtime_param_.mem_size = 5120000;
ComputeGraphPtr graph = make_shared<ComputeGraph>("default");

GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
TensorUtils::SetSize(tensor, 512);

OpDescPtr op_desc = CreateOpDesc("data", DATA);
op_desc->AddInputDesc(tensor);
op_desc->AddOutputDesc(tensor);
op_desc->SetInputOffset({1024});
op_desc->SetOutputOffset({1024});
NodePtr node = graph->AddNode(op_desc); // op_index 0

vector<string> inputs = {};
AttrUtils::SetListStr(op_desc, ATTR_NAME_AIPP_INPUTS, inputs);
vector<string> outputs = {};
AttrUtils::SetListStr(op_desc, ATTR_NAME_AIPP_OUTPUTS, outputs);

OriginInputInfo orig_input_info;
EXPECT_EQ(model.GetOrigInputInfo(0, orig_input_info), ACL_ERROR_GE_AIPP_NOT_EXIST);
EXPECT_EQ(model.InitNodes(graph), SUCCESS);
EXPECT_EQ(model.GetOrigInputInfo(0, orig_input_info), SUCCESS);

EXPECT_EQ(model.input_addrs_list_.size(), 1);
EXPECT_EQ(model.output_addrs_list_.size(), 0);
EXPECT_EQ(model.op_list_.size(), 1);
}

TEST_F(UtestDavinciModel, init_data_aipp_input_info_normal) {
DavinciModel model(0, nullptr);
model.ge_model_ = make_shared<GeModel>(); // for CustAICPUKernelStore::GetCustAICPUKernelStore()
model.runtime_param_.mem_base = (uint8_t *)0x08000000;
model.runtime_param_.mem_size = 5120000;
ComputeGraphPtr graph = make_shared<ComputeGraph>("default");

GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
TensorUtils::SetSize(tensor, 512);

OpDescPtr op_desc = CreateOpDesc("data", DATA);
op_desc->AddInputDesc(tensor);
op_desc->AddOutputDesc(tensor);
op_desc->SetInputOffset({1024});
op_desc->SetOutputOffset({1024});
NodePtr node = graph->AddNode(op_desc); // op_index 0

vector<string> inputs = { "NCHW:DT_FLOAT:TensorName:TensorSize:3:1,2,8" };
AttrUtils::SetListStr(op_desc, ATTR_NAME_AIPP_INPUTS, inputs);
vector<string> outputs = { "NCHW:DT_FLOAT:TensorName:TensorSize:3:1,2,8" };
AttrUtils::SetListStr(op_desc, ATTR_NAME_AIPP_OUTPUTS, outputs);

OriginInputInfo orig_input_info;
EXPECT_EQ(model.GetOrigInputInfo(0, orig_input_info), ACL_ERROR_GE_AIPP_NOT_EXIST);
EXPECT_EQ(model.InitNodes(graph), SUCCESS);
EXPECT_EQ(model.GetOrigInputInfo(0, orig_input_info), SUCCESS);

EXPECT_EQ(model.input_addrs_list_.size(), 1);
EXPECT_EQ(model.output_addrs_list_.size(), 0);
EXPECT_EQ(model.op_list_.size(), 1);
}

TEST_F(UtestDavinciModel, init_data_aipp_input_info_invalid) {
DavinciModel model(0, nullptr);
model.ge_model_ = make_shared<GeModel>(); // for CustAICPUKernelStore::GetCustAICPUKernelStore()
model.runtime_param_.mem_base = (uint8_t *)0x08000000;
model.runtime_param_.mem_size = 5120000;
ComputeGraphPtr graph = make_shared<ComputeGraph>("default");

GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
TensorUtils::SetSize(tensor, 512);

OpDescPtr op_desc = CreateOpDesc("data", DATA);
op_desc->AddInputDesc(tensor);
op_desc->AddOutputDesc(tensor);
op_desc->SetInputOffset({1024});
op_desc->SetOutputOffset({1024});
NodePtr node = graph->AddNode(op_desc); // op_index 0

vector<string> inputs = { "NCHW:DT_FLOAT:TensorName" }; // Invalid
AttrUtils::SetListStr(op_desc, ATTR_NAME_AIPP_INPUTS, inputs);
vector<string> outputs = { "NCHW:DT_FLOAT:TensorName:TensorSize:3:1,2,8" };
AttrUtils::SetListStr(op_desc, ATTR_NAME_AIPP_OUTPUTS, outputs);

OriginInputInfo orig_input_info;
EXPECT_EQ(model.GetOrigInputInfo(0, orig_input_info), ACL_ERROR_GE_AIPP_NOT_EXIST);
EXPECT_EQ(model.InitNodes(graph), ACL_ERROR_GE_AIPP_MODE_INVALID);
EXPECT_EQ(model.GetOrigInputInfo(0, orig_input_info), ACL_ERROR_GE_AIPP_NOT_EXIST);

EXPECT_EQ(model.input_addrs_list_.size(), 1);
EXPECT_EQ(model.output_addrs_list_.size(), 0);
EXPECT_EQ(model.op_list_.size(), 1);
}

TEST_F(UtestDavinciModel, init_data_aipp_input_dims_normal) {
DavinciModel model(0, nullptr);
model.ge_model_ = make_shared<GeModel>(); // for CustAICPUKernelStore::GetCustAICPUKernelStore()
model.runtime_param_.mem_base = (uint8_t *)0x08000000;
model.runtime_param_.mem_size = 5120000;
ComputeGraphPtr graph = make_shared<ComputeGraph>("default");

GeTensorDesc tensor(GeShape(), FORMAT_NCHW, DT_FLOAT);
TensorUtils::SetSize(tensor, 512);

OpDescPtr op_desc = CreateOpDesc("data", DATA);
op_desc->AddInputDesc(tensor);
op_desc->AddOutputDesc(tensor);
op_desc->SetInputOffset({1024});
op_desc->SetOutputOffset({1024});
NodePtr node = graph->AddNode(op_desc); // op_index 0

vector<string> inputs = { "NCHW:DT_FLOAT:TensorName:TensorSize:3:1,2,8" };
AttrUtils::SetListStr(op_desc, ATTR_NAME_AIPP_INPUTS, inputs);
vector<string> outputs = { "NCHW:DT_FLOAT:TensorName:TensorSize:3:1,2,8" };
AttrUtils::SetListStr(op_desc, ATTR_NAME_AIPP_OUTPUTS, outputs);

vector<InputOutputDims> input_dims;
vector<InputOutputDims> output_dims;
EXPECT_EQ(model.GetAllAippInputOutputDims(0, input_dims, output_dims), ACL_ERROR_GE_AIPP_NOT_EXIST);
EXPECT_EQ(model.InitNodes(graph), SUCCESS);
EXPECT_EQ(model.GetAllAippInputOutputDims(0, input_dims, output_dims), SUCCESS);
EXPECT_EQ(input_dims.size(), 1);
EXPECT_EQ(output_dims.size(), 1);

EXPECT_EQ(model.input_addrs_list_.size(), 1);
EXPECT_EQ(model.output_addrs_list_.size(), 0);
EXPECT_EQ(model.op_list_.size(), 1);
}
} // namespace ge

+ 0
- 1
tests/ut/ge/graph/load/kernel_task_info_unittest.cc View File

@@ -1120,7 +1120,6 @@ TEST_F(UtestKernelTaskInfo, kernel_task_info_init_success) {
op_desc->AddOutputDesc(descout);
op_desc->SetId(0);

model.data_op_list_.push_back(op_desc);
model.op_list_[0] = op_desc;

domi::TaskDef task_def;


Loading…
Cancel
Save