Browse Source

misra

pull/460/head
lzl 3 years ago
parent
commit
b8c05f7323
31 changed files with 126 additions and 124 deletions
  1. +1
    -1
      parser/caffe/caffe_data_parser.cc
  2. +1
    -1
      parser/caffe/caffe_data_parser.h
  3. +17
    -17
      parser/caffe/caffe_parser.cc
  4. +22
    -21
      parser/caffe/caffe_parser.h
  5. +6
    -6
      parser/common/acl_graph_parser_util.cc
  6. +6
    -6
      parser/common/acl_graph_parser_util.h
  7. +1
    -1
      parser/common/model_saver.cc
  8. +7
    -7
      parser/common/parser_fp16_t.cc
  9. +1
    -1
      parser/common/parser_types.cc
  10. +2
    -2
      parser/common/proto_file_parser.cc
  11. +1
    -1
      parser/common/proto_file_parser.h
  12. +1
    -1
      parser/common/prototype_pass_manager.cc
  13. +1
    -1
      parser/common/prototype_pass_manager.h
  14. +1
    -1
      parser/onnx/onnx_custom_parser_adapter.cc
  15. +1
    -1
      parser/onnx/onnx_custom_parser_adapter.h
  16. +9
    -9
      parser/onnx/onnx_parser.cc
  17. +9
    -8
      parser/onnx/onnx_parser.h
  18. +3
    -3
      parser/onnx/subgraph_adapter/if_subgraph_adapter.cc
  19. +3
    -3
      parser/onnx/subgraph_adapter/if_subgraph_adapter.h
  20. +1
    -1
      parser/onnx/subgraph_adapter/subgraph_adapter_factory.cc
  21. +1
    -1
      parser/tensorflow/graph_functiondef.cc
  22. +3
    -3
      parser/tensorflow/graph_optimizer.cc
  23. +2
    -2
      parser/tensorflow/tensorflow_arg_parser.cc
  24. +4
    -4
      parser/tensorflow/tensorflow_frameworkop_parser.cc
  25. +1
    -1
      parser/tensorflow/tensorflow_fusion_custom_parser_adapter.cc
  26. +1
    -1
      parser/tensorflow/tensorflow_fusion_custom_parser_adapter.h
  27. +1
    -1
      parser/tensorflow/tensorflow_fusion_op_parser.cc
  28. +1
    -1
      parser/tensorflow/tensorflow_fusion_op_parser.h
  29. +14
    -14
      parser/tensorflow/tensorflow_parser.cc
  30. +2
    -2
      parser/tensorflow/tensorflow_parser.h
  31. +2
    -2
      parser/tensorflow/tensorflow_reshape_parser.cc

+ 1
- 1
parser/caffe/caffe_data_parser.cc View File

@@ -29,7 +29,7 @@ using domi::CAFFE;

namespace ge {
Status CaffeDataParser::GetOutputDesc(const string &name, const std::vector<int64_t> &input_dims,
const ge::OpDescPtr &op) {
const ge::OpDescPtr &op) const {
GE_CHECK_NOTNULL(op);
GELOGI("The input dim size is %zu in layer %s.", input_dims.size(), name.c_str());



+ 1
- 1
parser/caffe/caffe_data_parser.h View File

@@ -45,7 +45,7 @@ class PARSER_FUNC_VISIBILITY CaffeDataParser : public CaffeOpParser, public Data
* @return SUCCESS parse successfully
* @return FAILED parse failed
*/
Status GetOutputDesc(const std::string &name, const std::vector<int64_t> &input_dims, const ge::OpDescPtr &op);
Status GetOutputDesc(const std::string &name, const std::vector<int64_t> &input_dims, const ge::OpDescPtr &op) const;

// caffe data layer type could be type of `Input` or `DummyData`
Status ParseParamsForInput(const domi::caffe::LayerParameter *layer, ge::OpDescPtr &op);


+ 17
- 17
parser/caffe/caffe_parser.cc View File

@@ -512,7 +512,7 @@ Status CaffeModelParser::ReadModelWithoutWarning(const char *model_path, google:
return SUCCESS;
}

Status CaffeModelParser::ReadCaffeModelFromText(const char *model_path, google::protobuf::Message *message) {
Status CaffeModelParser::ReadCaffeModelFromText(const char *model_path, google::protobuf::Message *message) const {
GE_CHECK_NOTNULL(model_path);
GE_CHECK_NOTNULL(message);
GELOGI("Start to read model file: %s.", model_path);
@@ -586,7 +586,7 @@ Status CaffeModelParser::ParseLayerParameter(const google::protobuf::Descriptor
}

Status CaffeModelParser::CreateCustomOperator(string op_name, string op_type, const google::protobuf::Message *message,
int index, vector<ge::Operator> &operators) {
int index, vector<ge::Operator> &operators) const {
if (op_name.empty() || op_type.empty()) {
REPORT_INNER_ERROR("E19999", "[Check][Param]Name or type of layer is empty, name: %s, type: %s.",
op_name.c_str(), op_type.c_str());
@@ -616,7 +616,7 @@ Status CaffeModelParser::CreateCustomOperator(string op_name, string op_type, co
return SUCCESS;
}

void CaffeModelParser::AddOutputInfoToContext(string layer_name, int32_t top_index) {
void CaffeModelParser::AddOutputInfoToContext(string layer_name, int32_t top_index) const {
auto iter_node_name = ge::GetParserContext().out_nodes_map.find(layer_name);
if (iter_node_name != ge::GetParserContext().out_nodes_map.end()) {
iter_node_name->second.emplace_back(top_index);
@@ -705,7 +705,7 @@ Status CaffeModelParser::AddBlobsToMap(const domi::caffe::LayerParameter &layer,
return SUCCESS;
}

bool CaffeModelParser::IsOpAttrEmpty(const ge::Operator &op, const std::string &type) {
bool CaffeModelParser::IsOpAttrEmpty(const ge::Operator &op, const std::string &type) const {
std::map<AscendString, AscendString> attrs;
(void)op.GetAllAttrNamesAndTypes(attrs);

@@ -899,7 +899,7 @@ Status CaffeModelParser::AddNode(const domi::caffe::LayerParameter &layer, ge::C
return SUCCESS;
}

Status CaffeModelParser::AddTensorDescToOpDesc(ge::OpDescPtr &op_desc, const domi::caffe::LayerParameter &layer) {
Status CaffeModelParser::AddTensorDescToOpDesc(ge::OpDescPtr &op_desc, const domi::caffe::LayerParameter &layer) const {
GE_CHECK_NOTNULL(op_desc);
// Data node input and output tensordesc added in parserparam
if (op_desc->GetType() == ge::parser::DATA) {
@@ -1069,7 +1069,7 @@ Status CaffeModelParser::AddEdges(ge::ComputeGraphPtr &graph) {
return SUCCESS;
}

bool CaffeModelParser::IsOutputTop(const string &op_name, const int32_t index) {
bool CaffeModelParser::IsOutputTop(const string &op_name, const int32_t index) const {
bool ret = false;
auto iter = ge::GetParserContext().out_nodes_map.find(op_name);
if (iter != ge::GetParserContext().out_nodes_map.end()) {
@@ -1169,7 +1169,7 @@ Status CaffeModelParser::AddOutputTop(const domi::caffe::NetParameter &proto_mes
return SUCCESS;
}

bool CaffeModelParser::CheckValidLayer(const domi::caffe::LayerParameter &layer) {
bool CaffeModelParser::CheckValidLayer(const domi::caffe::LayerParameter &layer) const {
if (layer.include_size() != 0) {
bool filter_flag = false;
for (int32_t j = 0; j < layer.include_size(); j++) {
@@ -1189,7 +1189,7 @@ bool CaffeModelParser::CheckValidLayer(const domi::caffe::LayerParameter &layer)
return true;
}

bool CaffeModelParser::IsInplaceTopBlob(const domi::caffe::LayerParameter &layer, const std::string &top_name) {
bool CaffeModelParser::IsInplaceTopBlob(const domi::caffe::LayerParameter &layer, const std::string &top_name) const {
for (auto &bottom_name : layer.bottom()) {
if (top_name == bottom_name) {
return true;
@@ -1199,7 +1199,7 @@ bool CaffeModelParser::IsInplaceTopBlob(const domi::caffe::LayerParameter &layer
}

std::string CaffeModelParser::RemapTopNameByLayer(const domi::caffe::LayerParameter &layer, const std::string &top_name,
int index) {
int index) const {
return (top_name + "_" + layer.name() + "_" + std::to_string(index));
}

@@ -1294,7 +1294,7 @@ Status CaffeModelParser::ParseFromMemory(const char *data, uint32_t size, ge::Co
"[Check][Layer]layer phase is train, skip this layer, name:%s, type:%s.",
layer.name().c_str(), layer.type().c_str());

CHECK_FALSE_EXEC(!((layer.type() == ge::parser::DATA_TYPE) && (input_data_flag == true)), has_error = true;
CHECK_FALSE_EXEC(!((layer.type() == ge::parser::DATA_TYPE) && input_data_flag), has_error = true;
REPORT_INNER_ERROR("E19999", "net %s has input and data layer simultaneously, check invalid."
"layer name:%s, layer type:%s", proto_message.name().c_str(),
layer.name().c_str(), layer.type().c_str());
@@ -1516,7 +1516,7 @@ Status CaffeModelParser::Parse(const char *model_path, ge::ComputeGraphPtr &grap
"[Check][Layer]layer phase is train, skip this layer, name:%s, type:%s.",
layer.name().c_str(), layer.type().c_str());

CHECK_FALSE_EXEC(!((layer.type() == ge::parser::DATA_TYPE) && (input_data_flag == true)), has_error = true;
CHECK_FALSE_EXEC(!((layer.type() == ge::parser::DATA_TYPE) && input_data_flag), has_error = true;
GELOGE(FAILED, "[Check][Layer]net %s has input and data layer simultaneously, check invalid."
"layer name:%s, layer type:%s", proto_message.name().c_str(),
layer.name().c_str(), layer.type().c_str()));
@@ -1591,7 +1591,7 @@ Status CaffeModelParser::Parse(const char *model_path, ge::ComputeGraphPtr &grap
return SUCCESS;
}

Status CaffeModelParser::FindShareParamLayers(const std::map<std::string, std::vector<std::string>> &layer_params_map) {
Status CaffeModelParser::FindShareParamLayers(const std::map<std::string, std::vector<std::string>> &layer_params_map) const {
for (auto p_iter = layer_params_map.begin(); p_iter != layer_params_map.end(); ++p_iter) {
for (auto p2_iter = p_iter; p2_iter != layer_params_map.end(); ++p2_iter) {
if (p_iter->first != p2_iter->first && p_iter->second == p2_iter->second) {
@@ -1625,7 +1625,7 @@ Status CaffeModelParser::ToJson(const char *model_file, const char *json_file) {
return ModelSaver::SaveJsonToFile(json_file, j);
}

Status CaffeModelParser::ReorderInput(domi::caffe::NetParameter &net) {
Status CaffeModelParser::ReorderInput(domi::caffe::NetParameter &net) const {
int layer_size = net.layer_size();
for (int i = 0; i < layer_size; ++i) {
domi::caffe::LayerParameter *layer = net.mutable_layer(i);
@@ -2018,7 +2018,7 @@ Status CaffeWeightsParser::ConvertBlobsProto(const google::protobuf::Message *me
}

Status CaffeWeightsParser::ConvertBlobShapeProto(const google::protobuf::Message *message,
google::protobuf::Message *dest_message) {
google::protobuf::Message *dest_message) const {
const google::protobuf::Reflection *reflection = message->GetReflection();
CAFFE_CHECK_NULL_AND_REPROT_ERRORMSG(reflection, "Get Reflection failed in google::protobuf::Message");
vector<const google::protobuf::FieldDescriptor *> field_desc;
@@ -2040,7 +2040,7 @@ Status CaffeWeightsParser::ConvertBlobShapeProto(const google::protobuf::Message
}

Status CaffeWeightsParser::ConvertConvParamProto(const google::protobuf::Message *message,
google::protobuf::Message *dest_message) {
google::protobuf::Message *dest_message) const {
const google::protobuf::Reflection *reflection = message->GetReflection();
CAFFE_CHECK_NULL_AND_REPROT_ERRORMSG(reflection, "Get Reflection failed in google::protobuf::Message");
vector<const google::protobuf::FieldDescriptor *> field_desc;
@@ -2060,7 +2060,7 @@ Status CaffeWeightsParser::ConvertConvParamProto(const google::protobuf::Message
}

Status CaffeWeightsParser::ConvertInnerProdcutProto(const google::protobuf::Message *message,
google::protobuf::Message *dest_message) {
google::protobuf::Message *dest_message) const {
const google::protobuf::Reflection *reflection = message->GetReflection();
CAFFE_CHECK_NULL_AND_REPROT_ERRORMSG(reflection, "Get Reflection failed in google::protobuf::Message");
vector<const google::protobuf::FieldDescriptor *> field_desc;
@@ -2079,7 +2079,7 @@ Status CaffeWeightsParser::ConvertInnerProdcutProto(const google::protobuf::Mess
return SUCCESS;
}

Status CaffeWeightsParser::CheckLayersSize(const google::protobuf::Message *message) {
Status CaffeWeightsParser::CheckLayersSize(const google::protobuf::Message *message) const {
const google::protobuf::Reflection *reflection = message->GetReflection();
CAFFE_CHECK_NULL_AND_REPROT_ERRORMSG(reflection, "Get Reflection failed in google::protobuf::Message");
vector<const google::protobuf::FieldDescriptor *> field_desc;


+ 22
- 21
parser/caffe/caffe_parser.h View File

@@ -56,17 +56,17 @@ static std::map<std::vector<std::string>, std::vector<std::string>> params_share
class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser {
public:
CaffeModelParser() {}
virtual ~CaffeModelParser() {}
virtual ~CaffeModelParser() override {}

/**
* @ingroup domi_omg
* @brief Parse the relevant data from the model file and save it to graph
* @param [in] file Path of model file
* @param [in] model_path Path of model file
* @param [in|out] graph graph for saving model information
* @return SUCCESS parse successfully
* @return FAILED parse failed
*/
Status Parse(const char *file, ge::Graph &graph) override;
Status Parse(const char *model_path, ge::Graph &graph) override;

/**
* @ingroup domi_omg
@@ -124,7 +124,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser {
}

private:
Status Parse(const char *file, ge::ComputeGraphPtr &graph);
Status Parse(const char *model_path, ge::ComputeGraphPtr &graph);

/**
* @ingroup domi_omg
@@ -190,7 +190,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser {
* @return SUCCESS read file successfully
* @return FAILED read file failed
*/
Status ReadCaffeModelFromText(const char *model_path, google::protobuf::Message *message);
Status ReadCaffeModelFromText(const char *model_path, google::protobuf::Message *message) const;

/*
* @ingroup domi_omg
@@ -216,7 +216,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser {
* @return FAILED create operator failed
*/
Status CreateCustomOperator(std::string op_name, std::string op_type, const google::protobuf::Message *message,
int index, std::vector<ge::Operator> &operators);
int index, std::vector<ge::Operator> &operators) const;
/**
* @ingroup domi_omg
* @brief Add blob information to the bottom_blobs_map and top_blobs_map_
@@ -259,7 +259,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser {
* @return true valid
* @return false invalid
*/
bool CheckValidLayer(const domi::caffe::LayerParameter &layer);
bool CheckValidLayer(const domi::caffe::LayerParameter &layer) const;

/**
* @ingroup domi_omg
@@ -267,7 +267,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser {
* @return true is 'Inplace'
* @return false not is 'Inplace'
*/
bool IsInplaceTopBlob(const domi::caffe::LayerParameter &layer, const std::string &top_name);
bool IsInplaceTopBlob(const domi::caffe::LayerParameter &layer, const std::string &top_name) const;

/**
* @ingroup domi_omg
@@ -275,7 +275,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser {
* @return true yes
* @return false no
*/
bool IsOutputTop(const string &op_name, int32_t index);
bool IsOutputTop(const string &op_name, const int32_t index) const;

/**
* @ingroup domi_omg
@@ -284,29 +284,30 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser {
* @param [in|out] Layer set of the same param
* @return Status
*/
Status FindShareParamLayers(const std::map<std::string, std::vector<std::string>> &);
Status FindShareParamLayers(const std::map<std::string, std::vector<std::string>> &layer_params_map) const;

Status AddTensorDescToOpDesc(ge::OpDescPtr &op_desc, const domi::caffe::LayerParameter &layer);
Status AddTensorDescToOpDesc(ge::OpDescPtr &op_desc, const domi::caffe::LayerParameter &layer) const;

Status AddTensorDescToOpDescByIr(ge::OpDescPtr &op_desc, const domi::caffe::LayerParameter &layer,
const string &op_type);

Status AddUserOutNodesTop();

std::string RemapTopNameByLayer(const domi::caffe::LayerParameter &layer, const std::string &top_name, int index);
std::string RemapTopNameByLayer(const domi::caffe::LayerParameter &layer, const std::string &top_name,
int index) const;

Status GetCustomOp(const domi::caffe::LayerParameter &layer, vector<ge::Operator> &operators);

bool IsOpAttrEmpty(const ge::Operator &op, const std::string &type);
bool IsOpAttrEmpty(const ge::Operator &op, const std::string &type) const;

Status ParseOpParam(const domi::caffe::LayerParameter &layer, ge::OpDescPtr &op,
std::shared_ptr<ge::OpParser> &op_parser);

void SaveOrigionLayerTops(domi::caffe::LayerParameter &layer);

Status ReorderInput(domi::caffe::NetParameter &net);
Status ReorderInput(domi::caffe::NetParameter &net) const;

void AddOutputInfoToContext(string layer_name, int32_t top_index);
void AddOutputInfoToContext(string layer_name, int32_t top_index) const;

Status ParseOutputNodeTopInfo(const domi::caffe::NetParameter &proto_message);

@@ -357,8 +358,8 @@ class PARSER_FUNC_VISIBILITY CaffeWeightsParser : public domi::WeightsParser {

Status Parse(const char *file, ge::ComputeGraphPtr &graph);

Status ParseWeightByFusionProto(const char *model_path, const string &custom_proto_path,
const string &custom_proto_name, ge::ComputeGraphPtr &graph);
Status ParseWeightByFusionProto(const char *weight_path, const string &fusion_proto_path,
const string &fusion_proto_name, ge::ComputeGraphPtr &graph);

Status ParseLayerParameter(const google::protobuf::Descriptor *layer_descriptor,
const google::protobuf::Message *message,
@@ -367,7 +368,7 @@ class PARSER_FUNC_VISIBILITY CaffeWeightsParser : public domi::WeightsParser {
Status ConvertLayerParameter(const google::protobuf::Message *layer_message,
ge::ComputeGraphPtr &graph);

Status CheckLayersSize(const google::protobuf::Message *message);
Status CheckLayersSize(const google::protobuf::Message *message) const;

Status ConvertLayerProto(const google::protobuf::Message *message,
google::protobuf::Message *layer);
@@ -381,13 +382,13 @@ class PARSER_FUNC_VISIBILITY CaffeWeightsParser : public domi::WeightsParser {
google::protobuf::Message *blobs);

Status ConvertBlobShapeProto(const google::protobuf::Message *message,
google::protobuf::Message *dest_message);
google::protobuf::Message *dest_message) const;

Status ConvertInnerProdcutProto(const google::protobuf::Message *message,
google::protobuf::Message *dest_message);
google::protobuf::Message *dest_message) const;

Status ConvertConvParamProto(const google::protobuf::Message *message,
google::protobuf::Message *dest_message);
google::protobuf::Message *dest_message) const;
/**
* @ingroup domi_omg
* @brief Layer types to be ignored in weight resolution


+ 6
- 6
parser/common/acl_graph_parser_util.cc View File

@@ -341,7 +341,7 @@ domi::Status AclGrphParseUtil::ParseAclOutputNodes(const string &out_nodes) {
return SUCCESS;
}

domi::Status AclGrphParseUtil::ParseAclOutputFp16NodesFormat(const string &is_output_fp16) {
domi::Status AclGrphParseUtil::ParseAclOutputFp16NodesFormat(const string &is_output_fp16) const {
if (is_output_fp16.empty()) {
return SUCCESS;
}
@@ -365,7 +365,7 @@ domi::Status AclGrphParseUtil::ParseAclOutputFp16NodesFormat(const string &is_ou
return SUCCESS;
}

domi::Status AclGrphParseUtil::ParseAclEnableScope(const string &enable_scope_fusion_passes) {
domi::Status AclGrphParseUtil::ParseAclEnableScope(const string &enable_scope_fusion_passes) const {
ge::GetParserContext().enable_scope_fusion_passes.clear();
if (enable_scope_fusion_passes.empty()) {
return SUCCESS;
@@ -387,7 +387,7 @@ void AclGrphParseUtil::AddAttrsForInputNodes(const vector<string> &adjust_fp16_f
}

domi::Status AclGrphParseUtil::ParseAclInputFp16Nodes(const ComputeGraphPtr &graph, const string &input_fp16_nodes,
const string &is_input_adjust_hw_layout) {
const string &is_input_adjust_hw_layout) const {
GE_CHECK_NOTNULL(graph);
vector<string> adjust_fp16_format_vec;
if (!is_input_adjust_hw_layout.empty()) {
@@ -430,7 +430,7 @@ domi::Status AclGrphParseUtil::ParseAclInputFp16Nodes(const ComputeGraphPtr &gra
}

void AclGrphParseUtil::CreateOutputNodesInfo(std::vector<std::pair<ge::NodePtr, int32_t>> &output_nodes_info,
std::vector<std::string> &output_nodes_name) {
std::vector<std::string> &output_nodes_name) const {
output_nodes_name.clear();
auto &out_tensor_names = ge::GetParserContext().out_tensor_names;
if (out_tensor_names.empty()) {
@@ -462,7 +462,7 @@ void AclGrphParseUtil::CreateOutputNodesInfo(std::vector<std::pair<ge::NodePtr,
}

domi::Status AclGrphParseUtil::GetOutputLeaf(NodePtr node,
std::vector<std::pair<ge::NodePtr, int32_t>> &output_nodes_info) {
std::vector<std::pair<ge::NodePtr, int32_t>> &output_nodes_info) const {
ge::OpDescPtr tmpDescPtr = node->GetOpDesc();
if (tmpDescPtr == nullptr) {
REPORT_INNER_ERROR("E19999", "param node has no opdesc.");
@@ -576,7 +576,7 @@ domi::Status AclGrphParseUtil::SetOutputNodeInfo(ge::Graph &graph,
return domi::SUCCESS;
}

domi::Status AclGrphParseUtil::CheckOptions(const std::map<AscendString, AscendString> &parser_params) {
domi::Status AclGrphParseUtil::CheckOptions(const std::map<AscendString, AscendString> &parser_params) const {
for (auto &ele : parser_params) {
const char *key_ascend = ele.first.GetString();
if (key_ascend == nullptr) {


+ 6
- 6
parser/common/acl_graph_parser_util.h View File

@@ -48,18 +48,18 @@ class AclGrphParseUtil {

private:
bool parser_initialized = false;
domi::Status CheckOptions(const std::map<AscendString, AscendString> &parser_params);
domi::Status GetOutputLeaf(NodePtr node, std::vector<std::pair<ge::NodePtr, int32_t>> &output_nodes_info);
domi::Status CheckOptions(const std::map<AscendString, AscendString> &parser_params) const;
domi::Status GetOutputLeaf(NodePtr node, std::vector<std::pair<ge::NodePtr, int32_t>> &output_nodes_info) const;
void CreateOutputNodesInfo(std::vector<std::pair<ge::NodePtr, int32_t>> &output_nodes_info,
std::vector<std::string> &output_nodes_name);
std::vector<std::string> &output_nodes_name) const;
static void SetDefaultFormat();
domi::Status ParseAclOutputNodes(const std::string &out_nodes);
domi::Status ParseAclOutputFp16NodesFormat(const std::string &is_output_fp16);
domi::Status ParseAclEnableScope(const std::string &enable_scope_fusion_passes);
domi::Status ParseAclOutputFp16NodesFormat(const std::string &is_output_fp16) const;
domi::Status ParseAclEnableScope(const std::string &enable_scope_fusion_passes) const;
static void AddAttrsForInputNodes(const vector<string> &adjust_fp16_format_vec, const string &fp16_nodes_name,
size_t index, OpDescPtr &op_desc);
domi::Status ParseAclInputFp16Nodes(const ComputeGraphPtr &graph, const string &input_fp16_nodes,
const string &is_input_adjust_hw_layout);
const string &is_input_adjust_hw_layout) const;
domi::Status GetDefaultOutInfo(ge::ComputeGraphPtr &compute_graph,
std::vector<std::pair<ge::NodePtr, int32_t>> &output_nodes_info);
};


+ 1
- 1
parser/common/model_saver.cc View File

@@ -36,7 +36,7 @@ const uint32_t kInteval = 2;
FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelSaver::SaveJsonToFile(const char *file_path,
const Json &model) {
Status ret = SUCCESS;
if (file_path == nullptr || SUCCESS != CheckPath(file_path)) {
if ((file_path == nullptr) || (CheckPath(file_path) != SUCCESS)) {
REPORT_INNER_ERROR("E19999", "param file_path is nullptr or checkpath not return success");
GELOGE(FAILED, "[Check][Param]Check output file failed.");
return FAILED;


+ 7
- 7
parser/common/parser_fp16_t.cc View File

@@ -500,8 +500,8 @@ static uint16_t Fp16AddCalVal(uint16_t s_ret, int16_t e_ret, uint16_t m_ret, uin
bool b_last_bit = ((m_ret & 1) > 0);
bool b_trunc_high = 0;
bool b_trunc_left = 0;
b_trunc_high = (TagFp16RoundMode::kRoundToNearest == g_round_mode) && ((m_trunc & kFp32SignMask) > 0);
b_trunc_left = (TagFp16RoundMode::kRoundToNearest == g_round_mode) && ((m_trunc & kFp32AbsMax) > 0);
b_trunc_high = (g_round_mode == TagFp16RoundMode::kRoundToNearest) && ((m_trunc & kFp32SignMask) > 0);
b_trunc_left = (g_round_mode == TagFp16RoundMode::kRoundToNearest) && ((m_trunc & kFp32AbsMax) > 0);
m_ret = ManRoundToNearest(b_last_bit, b_trunc_high, b_trunc_left, m_ret, shift_out);
while (m_ret >= m_max) {
m_ret = m_ret >> 1;
@@ -623,8 +623,8 @@ static uint16_t Fp16Mul(uint16_t v_1, uint16_t v_2) {
bool b_last_bit = ((mul_m & 1) > 0);
bool b_trunc_high = 0;
bool b_trunc_left = 0;
b_trunc_high = (TagFp16RoundMode::kRoundToNearest == g_round_mode) && ((m_trunc & kFp32SignMask) > 0);
b_trunc_left = (TagFp16RoundMode::kRoundToNearest == g_round_mode) && ((m_trunc & kFp32AbsMax) > 0);
b_trunc_high = (g_round_mode == TagFp16RoundMode::kRoundToNearest) && ((m_trunc & kFp32SignMask) > 0);
b_trunc_left = (g_round_mode == TagFp16RoundMode::kRoundToNearest) && ((m_trunc & kFp32AbsMax) > 0);
mul_m = ManRoundToNearest(b_last_bit, b_trunc_high, b_trunc_left, mul_m);

while (mul_m >= m_max || e_ret < 0) {
@@ -966,7 +966,7 @@ static void SetValByUint16Val(const uint16_t &input_val, const uint16_t &sign, u
bool b_last_bit = ((m_tmp & 1) > 0);
bool b_trunc_high = 0;
bool b_trunc_left = 0;
if (TagFp16RoundMode::kRoundToNearest == g_round_mode) { // trunc
if (g_round_mode == TagFp16RoundMode::kRoundToNearest) { // trunc
b_trunc_high = ((m_trunc & kFp32SignMask) > 0);
b_trunc_left = ((m_trunc & kFp32AbsMax) > 0);
}
@@ -1025,7 +1025,7 @@ fp16_t &fp16_t::operator=(const uint16_t &ui_val) {
bool b_last_bit = ((m_ret & 1) > 0);
bool b_trunc_high = 0;
bool b_trunc_left = 0;
if (TagFp16RoundMode::kRoundToNearest == g_round_mode) { // trunc
if (g_round_mode == TagFp16RoundMode::kRoundToNearest) { // trunc
b_trunc_high = ((m_trunc & kFp32SignMask) > 0);
b_trunc_left = ((m_trunc & kFp32AbsMax) > 0);
}
@@ -1069,7 +1069,7 @@ static void SetValByUint32Val(const uint32_t &input_val, const uint16_t &sign, u
bool b_last_bit = ((m_tmp & 1) > 0);
bool b_trunc_high = 0;
bool b_trunc_left = 0;
if (TagFp16RoundMode::kRoundToNearest == g_round_mode) { // trunc
if (g_round_mode == TagFp16RoundMode::kRoundToNearest) { // trunc
b_trunc_high = ((m_trunc & kFp32SignMask) > 0);
b_trunc_left = ((m_trunc & kFp32AbsMax) > 0);
}


+ 1
- 1
parser/common/parser_types.cc View File

@@ -16,7 +16,7 @@
#include "framework/omg/parser/parser_types.h"


namespace ge{
namespace ge {
namespace parser {
const char *DATA = "Data";
const char *AIPPDATA = "AippData";


+ 2
- 2
parser/common/proto_file_parser.cc View File

@@ -117,7 +117,7 @@ string GetMessageName(const std::string &line) {
string CreatTmpName(int len) {
std::uniform_int_distribution<int> u(kMinRandomNum, kMaxRandomNum);
std::default_random_engine e;
e.seed(time(0));
e.seed(time(nullptr));
string tmp_name = "";
for (int i = 0; i < len; i++) {
tmp_name += std::to_string(u(e));
@@ -200,7 +200,7 @@ Status ProtoFileParser::CreatProtoFile() {

Status ProtoFileParser::ParseProtoFile(const string &proto_file,
std::map<int, std::pair<string, string>> &identifier_op_map,
std::map<std::string, std::pair<int, string>> &op_identifier_map) {
std::map<std::string, std::pair<int, string>> &op_identifier_map) const {
ifstream read_file;
read_file.open(proto_file, std::ios::in);
if (read_file.fail()) {


+ 1
- 1
parser/common/proto_file_parser.h View File

@@ -34,7 +34,7 @@ private:
Status CreatProtoFile();
Status ParseProtoFile(const std::string &proto_file,
std::map<int, std::pair<std::string, std::string> > &identifier_op_map,
std::map<std::string, std::pair<int, std::string> > &op_identifier_map);
std::map<std::string, std::pair<int, std::string> > &op_identifier_map) const;
Status WriteCaffeProtoFile(const char *custom_proto_file, std::ifstream &read_caffe,
std::ofstream &write_tmp) const;
Status WriteProtoFile(const char *caffe_proto_file, const char *custom_proto_file);


+ 1
- 1
parser/common/prototype_pass_manager.cc View File

@@ -25,7 +25,7 @@ ProtoTypePassManager &ProtoTypePassManager::Instance() {
return instance;
}

Status ProtoTypePassManager::Run(google::protobuf::Message *message, const domi::FrameworkType &fmk_type) {
Status ProtoTypePassManager::Run(google::protobuf::Message *message, const domi::FrameworkType &fmk_type) const {
GE_CHECK_NOTNULL(message);
const auto &pass_vec = ProtoTypePassRegistry::GetInstance().GetCreateFnByType(fmk_type);
for (const auto &pass_item : pass_vec) {


+ 1
- 1
parser/common/prototype_pass_manager.h View File

@@ -24,7 +24,7 @@ class ProtoTypePassManager {
public:
static ProtoTypePassManager &Instance();

Status Run(google::protobuf::Message *message, const domi::FrameworkType &fmk_type);
Status Run(google::protobuf::Message *message, const domi::FrameworkType &fmk_type) const;

~ProtoTypePassManager() = default;



+ 1
- 1
parser/onnx/onnx_custom_parser_adapter.cc View File

@@ -44,7 +44,7 @@ Status OnnxCustomParserAdapter::ParseParams(const Message *op_src, ge::Operator
return SUCCESS;
}

Status OnnxCustomParserAdapter::ParseParams(const Operator &op_src, Operator &op_dest) {
Status OnnxCustomParserAdapter::ParseParams(const Operator &op_src, Operator &op_dest) const {
ParseParamByOpFunc custom_op_parser = domi::OpRegistry::Instance()->GetParseParamByOperatorFunc(
ParserUtils::GetOperatorType(op_src));
GE_CHECK_NOTNULL(custom_op_parser);


+ 1
- 1
parser/onnx/onnx_custom_parser_adapter.h View File

@@ -29,7 +29,7 @@ class PARSER_FUNC_VISIBILITY OnnxCustomParserAdapter : public OnnxOpParser {
/// @return FAILED parse failed
Status ParseParams(const Message *op_src, ge::Operator &op_dest) override;

Status ParseParams(const Operator &op_src, Operator &op_dest);
Status ParseParams(const Operator &op_src, Operator &op_dest) const;
};
} // namespace ge



+ 9
- 9
parser/onnx/onnx_parser.cc View File

@@ -140,7 +140,7 @@ graphStatus aclgrphParseONNXFromMem(const char *buffer, size_t size,
}

// parse caffe model_file to GE graph
ge::graphStatus ret = model_parser->ParseFromMemory(buffer, (uint32_t)size, graph);
ge::graphStatus ret = model_parser->ParseFromMemory(buffer, static_cast<uint32_t>(size), graph);
if (ret != ge::SUCCESS) {
REPORT_CALL_ERROR("E19999", "ParseFromMemory failed");
GELOGE(ret, "[Parser][Graph] %s failed.", ParserUtils::GetGraphName(graph).c_str());
@@ -344,7 +344,7 @@ Status OnnxModelParser::ParseInput(const std::map<std::string, ge::onnx::TensorP
}

Status OnnxModelParser::ParseInitializer(ge::onnx::GraphProto &onnx_graph,
std::map<std::string, ge::onnx::TensorProto> &initializer_name_tensor) {
std::map<std::string, ge::onnx::TensorProto> &initializer_name_tensor) const {
// Construct const node for weight
int index = 0;
for (auto it : initializer_name_tensor) {
@@ -362,7 +362,7 @@ Status OnnxModelParser::ParseInitializer(ge::onnx::GraphProto &onnx_graph,
return SUCCESS;
}

void OnnxModelParser::UpdateAllNodeName(ge::onnx::GraphProto &onnx_graph) {
void OnnxModelParser::UpdateAllNodeName(ge::onnx::GraphProto &onnx_graph) const {
int index = 0;
for (int i = 0; i < onnx_graph.node_size(); i++) {
ge::onnx::NodeProto *node = onnx_graph.mutable_node(i);
@@ -443,7 +443,7 @@ Status OnnxModelParser::AdapterOpType(const ge::onnx::NodeProto *node_proto, std
}

Status OnnxModelParser::TransNodeToOperator(const ge::onnx::NodeProto *node_proto, ge::Operator &op,
const string &op_type) {
const string &op_type) const {
GE_CHECK_NOTNULL(node_proto);
string node_name = node_proto->name();
op = ge::OperatorFactory::CreateOperator(node_name.c_str(), op_type.c_str());
@@ -560,7 +560,7 @@ Status OnnxModelParser::Prechecker(ge::onnx::GraphProto &onnx_graph) {
}

Status OnnxModelParser::ParseOpParam(const ge::onnx::NodeProto *node_proto, ge::Operator &op,
std::shared_ptr<OpParser> &op_parser) {
std::shared_ptr<OpParser> &op_parser) const {
GE_CHECK_NOTNULL(node_proto);
GE_CHECK_NOTNULL(op_parser);
std::string op_type = node_proto->op_type();
@@ -707,7 +707,7 @@ Status OnnxModelParser::GetGraphOutputs(std::vector<std::pair<Operator, std::vec
return SUCCESS;
}

Status OnnxModelParser::GetModelFromFile(const char *file, ge::onnx::ModelProto &onnx_model) {
Status OnnxModelParser::GetModelFromFile(const char *file, ge::onnx::ModelProto &onnx_model) const {
GE_CHECK_NOTNULL(file);
GELOGI("File path is %s.", file);

@@ -720,7 +720,7 @@ Status OnnxModelParser::GetModelFromFile(const char *file, ge::onnx::ModelProto
return SUCCESS;
}

Status OnnxModelParser::GetModelFromMemory(const char *data, uint32_t size, ge::onnx::ModelProto &onnx_model) {
Status OnnxModelParser::GetModelFromMemory(const char *data, uint32_t size, ge::onnx::ModelProto &onnx_model) const {
GE_CHECK_NOTNULL(data);

// 1. Get graph from onnx model file.
@@ -741,7 +741,7 @@ void OnnxModelParser::ClearMembers() {
}

Status OnnxModelParser::AdaptAndFindAllOnnxGraph(ge::onnx::GraphProto &root_onnx_graph,
std::map<std::string, ge::onnx::GraphProto *> &name_to_onnx_graph) {
std::map<std::string, ge::onnx::GraphProto *> &name_to_onnx_graph) const {
std::queue<ge::onnx::GraphProto *> onnx_graph_tasks;
int index = 0;
onnx_graph_tasks.push(&root_onnx_graph);
@@ -1040,7 +1040,7 @@ ge::DataType OnnxModelParser::ConvertToGeDataType(const uint32_t type) {
return ge::OnnxUtil::ConvertOnnxDataType(type);
}

void OnnxModelParser::UpdateDataFormat(ge::Graph &graph) {
void OnnxModelParser::UpdateDataFormat(ge::Graph &graph) const {
for (GNode &gn : graph.GetDirectNode()) {
AscendString type;
(void)gn.GetType(type);


+ 9
- 8
parser/onnx/onnx_parser.h View File

@@ -90,15 +90,15 @@ class PARSER_FUNC_VISIBILITY OnnxModelParser : public domi::ModelParser {
Status ParseOutput(ge::onnx::GraphProto &onnx_graph);

Status ParseInitializer(ge::onnx::GraphProto &onnx_graph,
std::map<std::string, ge::onnx::TensorProto> &initializer_name_tensor);
std::map<std::string, ge::onnx::TensorProto> &initializer_name_tensor) const;

void UpdateAllNodeName(ge::onnx::GraphProto &onnx_graph);
void UpdateAllNodeName(ge::onnx::GraphProto &onnx_graph) const;

Status ConstructOriType(const ge::onnx::NodeProto *node_proto, std::string &ori_type);

Status AdapterOpType(const ge::onnx::NodeProto *node_proto, std::string &ori_type, std::string &om_type);

Status TransNodeToOperator(const ge::onnx::NodeProto *node_proto, ge::Operator &op, const string &op_type);
Status TransNodeToOperator(const ge::onnx::NodeProto *node_proto, ge::Operator &op, const string &op_type) const;

Status ConstructInputOutputContext(const ge::onnx::NodeProto *node_proto);

@@ -111,22 +111,23 @@ class PARSER_FUNC_VISIBILITY OnnxModelParser : public domi::ModelParser {

Status Prechecker(ge::onnx::GraphProto &onnx_graph);
Status GetModelFromFile(const char *file, ge::onnx::ModelProto &onnx_model);
Status GetModelFromFile(const char *file, ge::onnx::ModelProto &onnx_model) const;

Status GetModelFromMemory(const char *data, uint32_t size, ge::onnx::ModelProto &onnx_model);
Status GetModelFromMemory(const char *data, uint32_t size, ge::onnx::ModelProto &onnx_model) const;

Status ModelParseToGraph(const ge::onnx::ModelProto &onnx_model, ge::Graph &graph);

Status ModelParseToGraphImpl(bool is_subgraph, ge::onnx::GraphProto &onnx_graph, ge::Graph &graph);

void UpdateDataFormat(ge::Graph &graph);
void UpdateDataFormat(ge::Graph &graph) const;

void ClearMembers();

Status ParseOpParam(const ge::onnx::NodeProto *node_proto, ge::Operator &op, std::shared_ptr<OpParser> &op_parser);
Status ParseOpParam(const ge::onnx::NodeProto *node_proto, ge::Operator &op,
std::shared_ptr<OpParser> &op_parser) const;

Status AdaptAndFindAllOnnxGraph(ge::onnx::GraphProto &root_onnx_graph,
std::map<std::string, ge::onnx::GraphProto *> &name_to_onnx_graph);
std::map<std::string, ge::onnx::GraphProto *> &name_to_onnx_graph) const;

Status SetOutputsInfo(const ParserUtils::OutputMapping &final_output_nodes,
const ParserUtils::OutputMapping &tensor_to_nodes);


+ 3
- 3
parser/onnx/subgraph_adapter/if_subgraph_adapter.cc View File

@@ -90,7 +90,7 @@ domi::Status IfSubgraphAdapter::ParseIfNodeSubgraphs(
}

domi::Status IfSubgraphAdapter::GetSubgraphsAllInputs(ge::onnx::GraphProto &onnx_graph,
std::set<std::string> &all_inputs) {
std::set<std::string> &all_inputs) const {
std::set<std::string> graph_inputs;
std::set<std::string> graph_outputs;
for (int i = 0; i < onnx_graph.node_size(); i++) {
@@ -115,7 +115,7 @@ domi::Status IfSubgraphAdapter::GetSubgraphsAllInputs(ge::onnx::GraphProto &onnx
}

void IfSubgraphAdapter::AddInputNodeForGraph(const std::set<std::string> &all_inputs,
ge::onnx::GraphProto &onnx_graph) {
ge::onnx::GraphProto &onnx_graph) const {
for (const auto &input_name : all_inputs) {
ge::onnx::ValueInfoProto *value_info = onnx_graph.add_input();
value_info->set_name(input_name);
@@ -123,7 +123,7 @@ void IfSubgraphAdapter::AddInputNodeForGraph(const std::set<std::string> &all_in
}

void IfSubgraphAdapter::AddInputForParentNode(const std::set<std::string> &all_inputs,
ge::onnx::NodeProto &parent_node) {
ge::onnx::NodeProto &parent_node) const {
for (const auto &input_name : all_inputs) {
parent_node.add_input(input_name);
}


+ 3
- 3
parser/onnx/subgraph_adapter/if_subgraph_adapter.h View File

@@ -31,9 +31,9 @@ class PARSER_FUNC_VISIBILITY IfSubgraphAdapter : public SubgraphAdapter {
private:
domi::Status ParseIfNodeSubgraphs(ge::onnx::NodeProto *parent_node, std::vector<ge::onnx::GraphProto *> &onnx_graphs,
std::map<std::string, ge::onnx::GraphProto *> &name_to_onnx_graph);
domi::Status GetSubgraphsAllInputs(ge::onnx::GraphProto &onnx_graph, std::set<std::string> &all_inputs);
void AddInputNodeForGraph(const std::set<std::string> &all_inputs, ge::onnx::GraphProto &onnx_graph);
void AddInputForParentNode(const std::set<std::string> &all_inputs, ge::onnx::NodeProto &parent_node);
domi::Status GetSubgraphsAllInputs(ge::onnx::GraphProto &onnx_graph, std::set<std::string> &all_inputs) const;
void AddInputNodeForGraph(const std::set<std::string> &all_inputs, ge::onnx::GraphProto &onnx_graph) const;
void AddInputForParentNode(const std::set<std::string> &all_inputs, ge::onnx::NodeProto &parent_node) const;
};
} // namespace ge



+ 1
- 1
parser/onnx/subgraph_adapter/subgraph_adapter_factory.cc View File

@@ -17,7 +17,7 @@
#include "subgraph_adapter_factory.h"
#include "framework/common/debug/ge_log.h"

namespace ge{
namespace ge {
SubgraphAdapterFactory* SubgraphAdapterFactory::Instance() {
static SubgraphAdapterFactory instance;
return &instance;


+ 1
- 1
parser/tensorflow/graph_functiondef.cc View File

@@ -78,7 +78,7 @@ string NameMapHelper::UniqueNodeName(const string &name) {

string NameMapHelper::Renormalize(const string &name) const {
const auto iter = name_mapping_.find(name);
if (iter == name_mapping_.end()) return string();
if (iter == name_mapping_.end()) {return string();}
return iter->second;
}



+ 3
- 3
parser/tensorflow/graph_optimizer.cc View File

@@ -301,7 +301,7 @@ Status ParserGraphOptimizer::InsertNode(ge::ComputeGraphPtr sub_graph, vector<ge
vector<ge::NodePtr>::iterator iter = find(nodes.begin(), nodes.end(), peer_in_anchor->GetOwnerNode());
GE_IF_BOOL_EXEC(iter == nodes.end(), output_in_map[out_anchor].emplace_back(peer_in_anchor); hasOutNode = true);
}
GE_IF_BOOL_EXEC(hasOutNode == true, output_anchors.emplace_back(out_anchor));
GE_IF_BOOL_EXEC(hasOutNode, output_anchors.emplace_back(out_anchor));
}

InControlAnchorPtr node_in_control = node->GetInControlAnchor();
@@ -381,7 +381,7 @@ Status ParserGraphOptimizer::RebuildOutputAnchors(vector<ge::OutDataAnchorPtr> &
GE_CHK_BOOL_EXEC(fusion_op_desc->AddOutputDesc(src_out_desc) == ge::GRAPH_SUCCESS, return FAILED);

ge::DataType data_type = src_out_desc.GetDataType();
auto iter = GE_TENSORFLOW_DATA_TYPE_MAP.find((int32_t)data_type);
std::map<int32_t, int32_t>::const_iterator iter = GE_TENSORFLOW_DATA_TYPE_MAP.find((int32_t)data_type);
GE_IF_BOOL_EXEC(
iter == GE_TENSORFLOW_DATA_TYPE_MAP.end(),
REPORT_INNER_ERROR("E19999", "datatype:%d of output:%d in node:%s:%s is not supported",
@@ -417,7 +417,7 @@ Status ParserGraphOptimizer::RebuildInputAnchors(vector<ge::InDataAnchorPtr> &in
return FAILED,
"Add fusion_op_desc AddInputDesc failed");
ge::DataType data_type = tensorDescPtr->GetDataType();
auto iter = GE_TENSORFLOW_DATA_TYPE_MAP.find((int32_t)data_type);
std::map<int32_t, int32_t>::const_iterator iter = GE_TENSORFLOW_DATA_TYPE_MAP.find((int32_t)data_type);
GE_IF_BOOL_EXEC(
iter == GE_TENSORFLOW_DATA_TYPE_MAP.end(),
REPORT_INNER_ERROR("E19999", "datatype:%d of input:%d in node:%s:%s is not supported",


+ 2
- 2
parser/tensorflow/tensorflow_arg_parser.cc View File

@@ -37,8 +37,8 @@ Status ParseParams(const Message *op_src, ArgOpOperator *const op) {
domi::tensorflow::AttrValue output_attr_value;
if (TensorFlowUtil::FindAttrValue(node, ge::ATTR_NAME_OUTPUT_TENSOR_DESC, output_attr_value)) {
GE_CHK_STATUS_RET(
TensorFlowUtil::TransTensorDescriptor(output_attr_value, op, TENSORFLOW_NORMAL_OUTPUT_TENSOR_FLAG),
"trans output_attr_value failed, op: %s", node->name().c_str());
TensorFlowUtil::TransTensorDescriptor(output_attr_value, op, TENSORFLOW_NORMAL_OUTPUT_TENSOR_FLAG),
"trans output_attr_value failed, op: %s", node->name().c_str());
// For the needs of the Data operator, copy the output description to the input description
GE_CHK_STATUS_RET(TensorFlowUtil::TransTensorDescriptor(output_attr_value, op, TENSORFLOW_NORMAL_INPUT_TENSOR_FLAG),
"trans output_attr_value failed, op: %s", node->name().c_str());


+ 4
- 4
parser/tensorflow/tensorflow_frameworkop_parser.cc View File

@@ -40,8 +40,8 @@ Status ParseParams(const Message *op_src, FrameworkOpOperator *op) {
domi::tensorflow::AttrValue output_attr_value;
if (TensorFlowUtil::FindAttrValue(node, ge::ATTR_NAME_INPUT_TENSOR_DESC, input_attr_value)) {
GE_CHK_STATUS_RET(
TensorFlowUtil::TransTensorDescriptor(input_attr_value, op, TENSORFLOW_NORMAL_INPUT_TENSOR_FLAG, type),
"trans input_attr_value failed, op: %s", node->name().c_str());
TensorFlowUtil::TransTensorDescriptor(input_attr_value, op, TENSORFLOW_NORMAL_INPUT_TENSOR_FLAG, type),
"trans input_attr_value failed, op: %s", node->name().c_str());
} else {
GELOGD("Frameworkop has no input tensor desc, name:%s, type:%s.", node->name().c_str(), type.c_str());
/// _Retval constructed from inference function do not has input_tensor_dec
@@ -53,8 +53,8 @@ Status ParseParams(const Message *op_src, FrameworkOpOperator *op) {
}
if (TensorFlowUtil::FindAttrValue(node, ge::ATTR_NAME_OUTPUT_TENSOR_DESC, output_attr_value)) {
GE_CHK_STATUS_RET(
TensorFlowUtil::TransTensorDescriptor(output_attr_value, op, TENSORFLOW_NORMAL_OUTPUT_TENSOR_FLAG, type),
"trans output_attr_value failed, op: %s", node->name().c_str());
TensorFlowUtil::TransTensorDescriptor(output_attr_value, op, TENSORFLOW_NORMAL_OUTPUT_TENSOR_FLAG, type),
"trans output_attr_value failed, op: %s", node->name().c_str());
} else {
GELOGD("Frameworkop has no output tensor desc, name:%s, type:%s.", node->name().c_str(), type.c_str());
}


+ 1
- 1
parser/tensorflow/tensorflow_fusion_custom_parser_adapter.cc View File

@@ -25,7 +25,7 @@ using domi::FusionParseParamByOpFunc;

namespace ge {
Status TensorFlowFusionCustomParserAdapter::ParseParams(const vector<const NodeDef *> &v_input_const,
ge::NodePtr &node) {
ge::NodePtr &node) const {
GE_CHECK_NOTNULL(node);
auto op_dest = node->GetOpDesc();
GE_CHECK_NOTNULL(op_dest);


+ 1
- 1
parser/tensorflow/tensorflow_fusion_custom_parser_adapter.h View File

@@ -31,7 +31,7 @@ class PARSER_FUNC_VISIBILITY TensorFlowFusionCustomParserAdapter : public Tensor
* @return FAILED parse failed
* @author
*/
Status ParseParams(const vector<const NodeDef *> &v_input_const, ge::NodePtr &node) override;
Status ParseParams(const vector<const NodeDef *> &v_input_const, ge::NodePtr &node) const override;

/**
* @ingroup domi_parser


+ 1
- 1
parser/tensorflow/tensorflow_fusion_op_parser.cc View File

@@ -75,7 +75,7 @@ Status TensorFlowFusionOpParser::GetTensorFromNode(const NodeDef *node_def, Tens
return SUCCESS;
}

Status TensorFlowFusionOpParser::ParseParams(const std::vector<const NodeDef *> &v_input_const, NodePtr &op_dest) {
Status TensorFlowFusionOpParser::ParseParams(const std::vector<const NodeDef *> &v_input_const, NodePtr &op_dest) const {
(void)v_input_const;
(void)op_dest;
return SUCCESS;


+ 1
- 1
parser/tensorflow/tensorflow_fusion_op_parser.h View File

@@ -44,7 +44,7 @@ class PARSER_FUNC_VISIBILITY TensorFlowFusionOpParser : public TensorFlowOpParse
* @return SUCCESS Parsing success
* @return FAILED Parsing failed
*/
virtual Status ParseParams(const std::vector<const NodeDef *> &v_input_const, ge::NodePtr &node);
virtual Status ParseParams(const std::vector<const NodeDef *> &v_input_const, ge::NodePtr &node) const;

/**
* @ingroup domi_omg


+ 14
- 14
parser/tensorflow/tensorflow_parser.cc View File

@@ -494,7 +494,7 @@ Status TensorFlowModelParser::AddNode(const domi::tensorflow::NodeDef *node_def,
// node is released in destructor
string node_name = node_def->name();
string node_op = node_def->op();
auto type_it = tensorflow_op_map.find(node_op);
std::map<std::string, std::string>::const_iterator type_it = tensorflow_op_map.find(node_op);
if (type_it == tensorflow_op_map.end()) {
GELOGI("Can not find,maybe this node has no plugin node_name is %s, node_op is %s ", node_name.c_str(),
node_op.c_str());
@@ -553,7 +553,7 @@ Status TensorFlowModelParser::AddNode(const domi::tensorflow::NodeDef *node_def,
shared_ptr<OpParser> fusion_op_parser = factory->CreateFusionOpParser(op_type);
GE_CHECK_NOTNULL(fusion_op_parser);
// Find all children of the fusion operator
auto iter = fusion_op_nodedef_map_.find(node_def->name());
std::map<string, vector<const NodeDef *>>::const_iterator iter = fusion_op_nodedef_map_.find(node_def->name());
if (iter == fusion_op_nodedef_map_.end()) {
REPORT_INNER_ERROR("E19999", "FusionOp node %s has no children node, check invalid", node_name.c_str());
GELOGE(FAILED, "FusionOp node %s has no children node!", node_name.c_str());
@@ -756,7 +756,7 @@ Status TensorFlowModelParser::AddEdges(ge::ComputeGraphPtr &graph) {
}
// Find that the output of the source node is equal to the destination node
std::map<std::string, std::vector<std::pair<int32_t, int32_t>>> &dest_input_map = dest_iter->second.input_map;
auto input_iter = dest_input_map.find(src_op_name);
std::map<std::string, std::vector<std::pair<int32_t, int32_t>>>::const_iterator input_iter = dest_input_map.find(src_op_name);
// Find output and input
if (input_iter == dest_input_map.end()) {
continue;
@@ -919,7 +919,7 @@ Status TensorFlowModelParser::ParseNodeDef(TensorFlowModelParser *parser, ge::Co
return AddScopeInnerNode(parser, graph, graphMutex, node_def);
}

auto iterator = parser->adaptedOpTypeMap_.find(node_name);
std::map<std::string, std::string>::const_iterator iterator = parser->adaptedOpTypeMap_.find(node_name);
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(
iterator == parser->adaptedOpTypeMap_.end(),
REPORT_INNER_ERROR("E19999", "get adapted op type failed, node name = %s", node_name.c_str());
@@ -1374,7 +1374,7 @@ Status TensorFlowModelParser::Parse(const char *model_path, ge::ComputeGraphPtr
}
}

auto iter = function_name_to_graphdef.find(arg.function_name);
std::map<std::string, domi::tensorflow::GraphDef>::const_iterator iter = function_name_to_graphdef.find(arg.function_name);
if (iter == function_name_to_graphdef.end()) {
ErrorManager::GetInstance().ATCReportErrMessage("E12013", {"functionname"}, {arg.function_name});
GELOGE(FAILED, "Failed to get subgraph by function name %s", arg.function_name.c_str());
@@ -1866,7 +1866,7 @@ Status TensorFlowModelParser::UpdateAllNodeOpContext(shared_ptr<ge::ScopeGraph>
ge::ScopeFusionOpInfo info;
if (IsFusionOpChild(op_node_name, &info) && nodedef_map_[op_node_name]->op() != TENSORFLOWF_NODE_OP_CONST) {
// This node is a fusion operator
auto fusion_iter = tmp_fusion_op_node_context_map.find(info.fusion_node_name);
std::map<std::string, OpNodeContext>::const_iterator fusion_iter = tmp_fusion_op_node_context_map.find(info.fusion_node_name);
if (fusion_iter == tmp_fusion_op_node_context_map.end()) {
OpNodeContext op_node_context;
tmp_fusion_op_node_context_map[info.fusion_node_name] = op_node_context;
@@ -2116,7 +2116,7 @@ Status TensorFlowModelParser::NormalizeInputOrOutputMap(
}

string name = to_string(pair.first) + ":" + to_string(pair.second);
auto compare_iter = compare_set.find(name);
std::set<std::string>::const_iterator compare_iter = compare_set.find(name);
if (compare_iter != compare_set.end()) {
// pair<from,to> repeat, ignore
continue;
@@ -2155,7 +2155,7 @@ void TensorFlowModelParser::SaveEdgesControlInfo(const string &node_name, const
}

void TensorFlowModelParser::UpdateEdgesControlInfo(const ge::ScopeFusionOpInfo &info) {
auto iter = edges_control_map.find(info.node_name);
std::map<std::string, std::vector<int32_t>>::const_iterator iter = edges_control_map.find(info.node_name);
if (iter != edges_control_map.end()) {
// Delete the original fusion operator node information and add the fusion operator control edge information
edges_control_map.erase(iter);
@@ -2479,7 +2479,7 @@ Status TensorFlowModelParser::OptimizeIdentityByOutput(map<string, NodeDef *> &n
return INTERNAL_ERROR, "Can't find op node context.");
OpNodeContext op_node_context = context_iter->second;

auto node_def_iter = nodedef_map.find(curr_node_name);
std::map<std::string, NodeDef *>::const_iterator node_def_iter = nodedef_map.find(curr_node_name);
GE_CHK_BOOL_TRUE_EXEC_WITH_LOG(
(node_def_iter == nodedef_map.end()),
REPORT_INNER_ERROR("E19999", "Node:%s can't find in nodedef_map, check invalid", curr_node_name.c_str());
@@ -2809,7 +2809,7 @@ Status GetTransposeInfo(GraphDef *graph_def, std::map<std::string, std::string>

Status EraseTransposeNode(std::map<std::string, std::string> &softmaxInfo,
std::map<std::string, DelTransposeInfo> &transposeInfo) {
auto itTranspose = transposeInfo.begin();
std::map<std::string, DelTransposeInfo>::const_iterator itTranspose = transposeInfo.begin();
for (; itTranspose != transposeInfo.end();) {
// transpose --> softmax
bool bErase = true;
@@ -3144,7 +3144,7 @@ Status TensorFlowModelParser::TrimGraphByInput(const domi::tensorflow::GraphDef
const ge::ParserContext &ctx = ge::GetParserContext();
std::map<std::string, std::vector<int64_t>> input_dims = ctx.input_dims;
std::vector<int64_t> designated_dims = input_dims.at(node.name());
for (int32_t i = 0; i < (int32_t)designated_dims.size(); i++) {
for (int32_t i = 0; i < static_cast<int32_t>(designated_dims.size()); i++) {
data_shape->add_dim()->set_size(designated_dims[i]);
}
google::protobuf::Map<std::string, domi::tensorflow::AttrValue> *attr = placeholder_node.mutable_attr();
@@ -3217,7 +3217,7 @@ Status TensorFlowModelParser::TrimGraphByOutput(const domi::tensorflow::GraphDef
const ge::ParserContext &ctx = ge::GetParserContext();
std::map<std::string, std::vector<int64_t>> input_dims = ctx.input_dims;
std::vector<int64_t> designated_dims = input_dims.at(node.name());
for (int32_t i = 0; i < (int32_t)designated_dims.size(); i++) {
for (int32_t i = 0; i < static_cast<int32_t>(designated_dims.size()); i++) {
data_shape->add_dim()->set_size(designated_dims[i]);
}
google::protobuf::Map<std::string, domi::tensorflow::AttrValue> *attr = placeholder_node.mutable_attr();
@@ -3425,7 +3425,7 @@ Status TensorFlowModelParser::OptimizeConstNodes4CustomOp(domi::tensorflow::Grap
Status TensorFlowModelParser::AddControlEdgeAfterRemoveInputs(domi::tensorflow::GraphDef *graph_def,
domi::tensorflow::NodeDef *node_def,
const map<string, NodeDef *> &all_node_map,
const vector<string> &removed_inputs_vec) {
const vector<string> &removed_inputs_vec) const {
GE_CHECK_NOTNULL(graph_def);
GE_CHECK_NOTNULL(node_def);
for (const auto &remove_input : removed_inputs_vec) {
@@ -3513,7 +3513,7 @@ Status TensorFlowModelParser::RemoveInputs(domi::tensorflow::GraphDef *graph_def
}

void TensorFlowModelParser::RemoveInputAttr(domi::tensorflow::NodeDef *node_def,
const map<string, vector<int>> &remove_inputs_map) {
const map<string, vector<int>> &remove_inputs_map) const {
// The caller guarantees that the pointer is not null
auto *inputs = node_def->mutable_input();
google::protobuf::Map<std::string, domi::tensorflow::AttrValue> *attr_map = node_def->mutable_attr();


+ 2
- 2
parser/tensorflow/tensorflow_parser.h View File

@@ -570,9 +570,9 @@ class PARSER_FUNC_VISIBILITY TensorFlowModelParser : public domi::ModelParser {
Status AddControlEdgeAfterRemoveInputs(domi::tensorflow::GraphDef *graph_def,
domi::tensorflow::NodeDef *node_def,
const map<string, NodeDef *> &all_node_map,
const vector<string> &removed_inputs_vec);
const vector<string> &removed_inputs_vec) const;

void RemoveInputAttr(domi::tensorflow::NodeDef *node_def, const map<string, vector<int>> &remove_inputs_map);
void RemoveInputAttr(domi::tensorflow::NodeDef *node_def, const map<string, vector<int>> &remove_inputs_map) const;

/**
* @ingroup domi_omg


+ 2
- 2
parser/tensorflow/tensorflow_reshape_parser.cc View File

@@ -74,11 +74,11 @@ Status TensorFlowReshapeParser::ParseParams(const Message *op_src, ge::OpDescPtr
ge::GeTensorDesc output_desc;

if (TensorFlowUtil::FindAttrValue(node_src, ge::ATTR_NAME_INPUT_TENSOR_DESC, input_attr_value)) {
GE_CHK_BOOL_RET_STATUS(SUCCESS == ParseDesc(input_attr_value, input_desc), FAILED, "parse input desc failed");
GE_CHK_BOOL_RET_STATUS(ParseDesc(input_attr_value, input_desc) == SUCCESS, FAILED, "parse input desc failed");
}

if (TensorFlowUtil::FindAttrValue(node_src, ge::ATTR_NAME_OUTPUT_TENSOR_DESC, output_attr_value)) {
GE_CHK_BOOL_RET_STATUS(SUCCESS == ParseDesc(output_attr_value, output_desc), FAILED,
GE_CHK_BOOL_RET_STATUS(ParseDesc(output_attr_value, output_desc) == SUCCESS, FAILED,
"parse output desc failed");
}



Loading…
Cancel
Save