Browse Source

!156 aipp config add input related input name and modify proto

Merge pull request !156 from zhengyuanhua/development
tags/v1.1.0
王涛 Gitee 4 years ago
parent
commit
ff99761582
14 changed files with 153 additions and 14 deletions
  1. +3
    -0
      ge/client/proto/insert_op.proto
  2. +3
    -0
      ge/common/proto/insert_op.proto
  3. +3
    -0
      ge/executor/proto/insert_op.proto
  4. +37
    -0
      ge/graph/preprocess/insert_op/ge_aipp_op.cc
  5. +1
    -0
      ge/graph/preprocess/insert_op/ge_aipp_op.h
  6. +82
    -9
      ge/graph/preprocess/insert_op/util_insert_aipp_op.cc
  7. +4
    -0
      ge/graph/preprocess/insert_op/util_insert_aipp_op.h
  8. +3
    -0
      ge/offline/proto/insert_op.proto
  9. +3
    -0
      ge/proto/insert_op.proto
  10. +2
    -0
      ge/session/omg.cc
  11. +2
    -0
      inc/framework/omg/omg_inner_types.h
  12. +8
    -3
      inc/framework/omg/parser/parser_inner_ctx.h
  13. +1
    -1
      metadef
  14. +1
    -1
      parser

+ 3
- 0
ge/client/proto/insert_op.proto View File

@@ -45,6 +45,9 @@ message AippOpParams {
// 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。
uint32 related_input_rank = 2;

// related_input_name is optional and the top name of data node which inserts aipp
string related_input_name = 6;

// input_edge_idx参数为可选,类型为整型,配置范围为>=0。
// 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。
// 配置值 <= Data算子输出边的个数。


+ 3
- 0
ge/common/proto/insert_op.proto View File

@@ -45,6 +45,9 @@ message AippOpParams {
// 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。
uint32 related_input_rank = 2;

// related_input_name is optional and the top name of data node which inserts aipp
string related_input_name = 6;

// input_edge_idx参数为可选,类型为整型,配置范围为>=0。
// 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。
// 配置值 <= Data算子输出边的个数。


+ 3
- 0
ge/executor/proto/insert_op.proto View File

@@ -45,6 +45,9 @@ message AippOpParams {
// 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。
uint32 related_input_rank = 2;

// related_input_name is optional and the top name of data node which inserts aipp
string related_input_name = 6;

// input_edge_idx参数为可选,类型为整型,配置范围为>=0。
// 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。
// 配置值 <= Data算子输出边的个数。


+ 37
- 0
ge/graph/preprocess/insert_op/ge_aipp_op.cc View File

@@ -183,6 +183,11 @@ Status AippOp::InsertAippToGraph(ComputeGraphPtr &graph, std::string &aippConfig
GE_CHECK_NOTNULL(graph);
NodePtr target_input = nullptr;
std::vector<std::pair<OutDataAnchorPtr, InDataAnchorPtr>> target_edges;

if (this->ConvertRelatedInputNameToRank() != SUCCESS) {
GELOGE(FAILED, "AippOp: convert related input name to rank failed.");
return FAILED;
}
GE_CHK_STATUS_RET(this->GetTargetPosition(graph, target_input, target_edges), "Get data nodes position failed");

std::map<OutDataAnchorPtr, NodePtr> out_anchors_to_aipp;
@@ -410,6 +415,38 @@ Status AippOp::GetStaticTargetNode(const ComputeGraphPtr &graph, NodePtr &data_n

return SUCCESS;
}
Status AippOp::ConvertRelatedInputNameToRank() {
GE_CHECK_NOTNULL(aipp_params_);

string related_input_name = aipp_params_->related_input_name();
if(related_input_name.empty()) {
return SUCCESS;
}

std::vector<std::string> data_top_names = domi::GetContext().data_top_names;
GELOGI("Convert name to rank start: data size[%zu]", data_top_names.size());
uint32_t index = 0;
bool convert_flag = false;
for (const auto &data_top_name : data_top_names) {
if (related_input_name == data_top_name) {
aipp_params_->set_related_input_rank(index);
convert_flag = true;
GELOGI("AippOp: rank: %u, top name: %s.", index, data_top_name.c_str());
break;
}
index++;
}
if (!convert_flag) {
string error_msg = "Top name " + related_input_name + "convert rank failed, Please"
" ensure top name in aipp config is the top name of data node.";
ErrorManager::GetInstance().ATCReportErrMessage("E10043", {"reason"}, {error_msg});
GELOGE(PARAM_INVALID, "Top name[%s] converts rank failed.", related_input_name.c_str());
return PARAM_INVALID;
}

return SUCCESS;
}


Status AippOp::GetTargetPosition(ComputeGraphPtr graph, NodePtr &target_input,
std::vector<std::pair<OutDataAnchorPtr, InDataAnchorPtr>> &target_edges) {


+ 1
- 0
ge/graph/preprocess/insert_op/ge_aipp_op.h View File

@@ -82,6 +82,7 @@ class AippOp : public InsertOpBase {
Status AddNodeToGraph(const NodePtr &aipp_node, int64_t max_dynamic_aipp_size);
Status AddAippAttrbutes(const OpDescPtr &op_desc, const std::string &aipp_cfg_path, const uint32_t &index);
Status AddAttrToAippData(const OpDescPtr &aipp_data_op_desc);
Status ConvertRelatedInputNameToRank();

domi::AippOpParams *aipp_params_ = nullptr;
ge::NodePtr aipp_node_ = nullptr;


+ 82
- 9
ge/graph/preprocess/insert_op/util_insert_aipp_op.cc View File

@@ -34,6 +34,7 @@
#include "graph/utils/op_desc_utils.h"
#include "graph/utils/tensor_utils.h"
#include "graph/utils/type_utils.h"
#include "util_insert_aipp_op.h"

using domi::AippOpParams;

@@ -115,22 +116,94 @@ void InsertNewOpUtil::ClearNewOps() {
}
}

Status InsertNewOpUtil::CheckPositionNotRepeat() {
Status InsertNewOpUtil::CheckInputNamePositionNotRepeat() {
for (int i = 0; i < insert_op_conf_->aipp_op_size(); i++) {
const domi::AippOpParams *item = insert_op_conf_->mutable_aipp_op(i);
GE_CHECK_NOTNULL(item);

for (int j = i + 1; j < insert_op_conf_->aipp_op_size(); j++) {
const domi::AippOpParams *another_item = insert_op_conf_->mutable_aipp_op(j);
GE_CHECK_NOTNULL(another_item);
if (another_item->related_input_name().empty()) {
string error_msg = "Can not both set related_input_name and related_input_rank!"
" Please ensure param is the same with the first aipp config(related_input_name).";
ErrorManager::GetInstance().ATCReportErrMessage("E10043", {"reason"}, {error_msg});
GELOGE(PARAM_INVALID,
"Can not both set related_input_rank and related_input_name!"
" Please ensure param is the same with the first aipp config(related_input_name).");
return PARAM_INVALID;
}
if (item->related_input_name() == another_item->related_input_name()) {
string error_msg = "Can not insert aipp to the same postion! Please ensure related_input_name"
" param is different in different aipp config.";
ErrorManager::GetInstance().ATCReportErrMessage("E10043", {"reason"}, {error_msg});
GELOGE(PARAM_INVALID,
"Can not insert aipp op to the same postion! Please ensure related_input_rank param "
"is different in different aipp config.");
return PARAM_INVALID;
}
}
}

return SUCCESS;
}

Status InsertNewOpUtil::CheckInputRankPositionNoRepeat() {
for (int i = 0; i < insert_op_conf_->aipp_op_size(); i++) {
const domi::AippOpParams *item = insert_op_conf_->mutable_aipp_op(i);
GE_CHECK_NOTNULL(item);

for (int j = i + 1; j < insert_op_conf_->aipp_op_size(); j++) {
const domi::AippOpParams *another_item = insert_op_conf_->mutable_aipp_op(j);
GE_IF_BOOL_EXEC(item->related_input_rank() == another_item->related_input_rank(),
string errormsg = "Can not insert aipp to the same postion! Please ensure related_input_rank"
" param is different in different aipp config.";
ErrorManager::GetInstance().ATCReportErrMessage("E10043", {"reason"}, {errormsg});
GELOGE(PARAM_INVALID,
"Can not insert aipp op to the same postion! Please ensure related_input_rank param "
"is different in different aipp config.");
return PARAM_INVALID;);
GE_CHECK_NOTNULL(another_item);
if (!another_item->related_input_name().empty()) {
string error_msg = "Can not both set related_input_rank and related_input_name!"
" Please ensure param is the same with the first aipp config(related_input_rank).";
ErrorManager::GetInstance().ATCReportErrMessage("E10043", {"reason"}, {error_msg});
GELOGE(PARAM_INVALID,
"Can not both set related_input_rank and related_input_name!"
" Please ensure param is the same with the first aipp config(related_input_rank).");
return PARAM_INVALID;
}
if (item->related_input_rank() == another_item->related_input_rank()) {
string error_msg = "Can not insert aipp to the same postion! Please ensure related_input_rank"
" param is different in different aipp config.";
ErrorManager::GetInstance().ATCReportErrMessage("E10043", {"reason"}, {error_msg});
GELOGE(PARAM_INVALID,
"Can not insert aipp op to the same postion! Please ensure related_input_rank param "
"is different in different aipp config.");
return PARAM_INVALID;
}
}
}

return SUCCESS;

}

Status InsertNewOpUtil::CheckPositionNotRepeat() {
GE_CHECK_NOTNULL(insert_op_conf_);

if (insert_op_conf_->aipp_op_size() <= 1) {
GELOGI("Aipp op size[%d] less than 2, no need to check position repeat.", insert_op_conf_->aipp_op_size());
return SUCCESS;
}

const domi::AippOpParams *item = insert_op_conf_->mutable_aipp_op(0);
GE_CHECK_NOTNULL(item);

string related_input_name = item->related_input_name();
Status ret = FAILED;
if (related_input_name.empty()) {
ret = CheckInputRankPositionNoRepeat();
} else {
ret = CheckInputNamePositionNotRepeat();
}
if (ret != SUCCESS) {
GELOGE(FAILED, "Check position not repeat failed.");
return FAILED;
}

return SUCCESS;
}



+ 4
- 0
ge/graph/preprocess/insert_op/util_insert_aipp_op.h View File

@@ -51,6 +51,10 @@ class InsertNewOpUtil {

Status GetAippParams(const std::unique_ptr<domi::AippOpParams> &aippParams, const ge::NodePtr &aipp_node);

Status CheckInputNamePositionNotRepeat();

Status CheckInputRankPositionNoRepeat();

Status CheckGraph(const ge::ComputeGraphPtr &graph);

InsertNewOpUtil() = default;


+ 3
- 0
ge/offline/proto/insert_op.proto View File

@@ -45,6 +45,9 @@ message AippOpParams {
// 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。
uint32 related_input_rank = 2;

// related_input_name is optional and the top name of data node which inserts aipp
string related_input_name = 6;

// input_edge_idx参数为可选,类型为整型,配置范围为>=0。
// 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。
// 配置值 <= Data算子输出边的个数。


+ 3
- 0
ge/proto/insert_op.proto View File

@@ -45,6 +45,9 @@ message AippOpParams {
// 标识对模型的第几个输入做AIPP处理,例如模型有两个输入,需要对第2个输入做AIPP,则配置related_input_rank为1。
uint32 related_input_rank = 2;

// related_input_name is optional and the top name of data node which inserts aipp
string related_input_name = 6;

// input_edge_idx参数为可选,类型为整型,配置范围为>=0。
// 配置该参数的作用,在于对Data算子不同的输出做不同的AIPP处理,如果该参数没有配置,默认对related_input_rank指定的模型输入的所有输出边做AIPP。
// 配置值 <= Data算子输出边的个数。


+ 2
- 0
ge/session/omg.cc View File

@@ -1038,6 +1038,7 @@ void UpdateOmgCtxWithParserCtx() {
domi::GetContext().out_top_names = GetParserContext().out_top_names;
domi::GetContext().user_out_nodes_top_vec = GetParserContext().user_out_nodes_top_vec;
domi::GetContext().default_out_nodes = GetParserContext().default_out_nodes;
domi::GetContext().data_top_names = GetParserContext().data_top_names;
}

void UpdateParserCtxWithOmgCtx() {
@@ -1054,5 +1055,6 @@ void UpdateParserCtxWithOmgCtx() {
GetParserContext().input_nodes_format_map = domi::GetContext().input_nodes_format_map;
GetParserContext().out_top_names = domi::GetContext().out_top_names;
GetParserContext().user_out_nodes_top_vec = domi::GetContext().user_out_nodes_top_vec;
GetParserContext().data_top_names = domi::GetContext().data_top_names;
}
} // namespace ge

+ 2
- 0
inc/framework/omg/omg_inner_types.h View File

@@ -100,6 +100,8 @@ struct OmgContext {
std::vector<std::string> net_out_nodes;
// net out nodes top names(only caffe has top)
std::vector<std::string> out_top_names;
// net data nodes top names(only caffe has top)
std::vector<std::string> data_top_names;
// preferential format used by the entire network
domiTensorFormat_t net_format = DOMI_TENSOR_RESERVED;
domi::FrameworkType type = domi::FRAMEWORK_RESERVED;


+ 8
- 3
inc/framework/omg/parser/parser_inner_ctx.h View File

@@ -49,6 +49,8 @@ struct ParserContext {
std::vector<std::string> user_out_nodes_top_vec;
// net out nodes (where user_out_nodes or leaf nodes)
std::vector<std::string> net_out_nodes;
// net data nodes top names(only caffe has top)
std::vector<std::string> data_top_names;
// net out nodes top names(only caffe has top)
std::vector<std::string> out_top_names;
// Whether to use dynamic batch size or dynamic image size
@@ -57,9 +59,12 @@ struct ParserContext {
domi::domiTensorFormat_t format = domi::DOMI_TENSOR_ND;
domi::FrameworkType type = domi::FRAMEWORK_RESERVED;
RunMode run_mode = ONLY_PRE_CHECK;
std::string custom_proto_path; // save caffe custom proto path, used by caffe parse
std::string caffe_proto_path; // save caffe proto path, used by caffe parse
std::string enable_scope_fusion_passes; // name of the pass that needs to take effect
// save caffe custom proto path, used by caffe parse
std::string custom_proto_path;
// save caffe proto path, used by caffe parse
std::string caffe_proto_path;
// name of the pass that needs to take effect
std::string enable_scope_fusion_passes;
};

ParserContext &GetParserContext();


+ 1
- 1
metadef

@@ -1 +1 @@
Subproject commit 1cc55bcae09902b3d158993dd57bfbd1d3337066
Subproject commit 5d06bc7547189f24195b3cedcb0bfc3d787c80a5

+ 1
- 1
parser

@@ -1 +1 @@
Subproject commit db4e6070bb2cec01cead264a44ceae07e7f3048e
Subproject commit 5af5c72fba1315f3d52113a5e88dc618d68e7dbc

Loading…
Cancel
Save