diff --git a/parser/caffe/caffe_data_parser.cc b/parser/caffe/caffe_data_parser.cc index f3b4453..87d9679 100644 --- a/parser/caffe/caffe_data_parser.cc +++ b/parser/caffe/caffe_data_parser.cc @@ -29,7 +29,7 @@ using domi::CAFFE; namespace ge { Status CaffeDataParser::GetOutputDesc(const string &name, const std::vector &input_dims, - const ge::OpDescPtr &op) { + const ge::OpDescPtr &op) const { GE_CHECK_NOTNULL(op); GELOGI("The input dim size is %zu in layer %s.", input_dims.size(), name.c_str()); diff --git a/parser/caffe/caffe_data_parser.h b/parser/caffe/caffe_data_parser.h index 26e046e..b592f50 100644 --- a/parser/caffe/caffe_data_parser.h +++ b/parser/caffe/caffe_data_parser.h @@ -45,7 +45,7 @@ class PARSER_FUNC_VISIBILITY CaffeDataParser : public CaffeOpParser, public Data * @return SUCCESS parse successfully * @return FAILED parse failed */ - Status GetOutputDesc(const std::string &name, const std::vector &input_dims, const ge::OpDescPtr &op); + Status GetOutputDesc(const std::string &name, const std::vector &input_dims, const ge::OpDescPtr &op) const; // caffe data layer type could be type of `Input` or `DummyData` Status ParseParamsForInput(const domi::caffe::LayerParameter *layer, ge::OpDescPtr &op); diff --git a/parser/caffe/caffe_parser.cc b/parser/caffe/caffe_parser.cc index a362763..1ef1e29 100644 --- a/parser/caffe/caffe_parser.cc +++ b/parser/caffe/caffe_parser.cc @@ -512,7 +512,7 @@ Status CaffeModelParser::ReadModelWithoutWarning(const char *model_path, google: return SUCCESS; } -Status CaffeModelParser::ReadCaffeModelFromText(const char *model_path, google::protobuf::Message *message) { +Status CaffeModelParser::ReadCaffeModelFromText(const char *model_path, google::protobuf::Message *message) const { GE_CHECK_NOTNULL(model_path); GE_CHECK_NOTNULL(message); GELOGI("Start to read model file: %s.", model_path); @@ -586,7 +586,7 @@ Status CaffeModelParser::ParseLayerParameter(const google::protobuf::Descriptor } Status CaffeModelParser::CreateCustomOperator(string op_name, string op_type, const google::protobuf::Message *message, - int index, vector &operators) { + int index, vector &operators) const { if (op_name.empty() || op_type.empty()) { REPORT_INNER_ERROR("E19999", "[Check][Param]Name or type of layer is empty, name: %s, type: %s.", op_name.c_str(), op_type.c_str()); @@ -616,7 +616,7 @@ Status CaffeModelParser::CreateCustomOperator(string op_name, string op_type, co return SUCCESS; } -void CaffeModelParser::AddOutputInfoToContext(string layer_name, int32_t top_index) { +void CaffeModelParser::AddOutputInfoToContext(string layer_name, int32_t top_index) const { auto iter_node_name = ge::GetParserContext().out_nodes_map.find(layer_name); if (iter_node_name != ge::GetParserContext().out_nodes_map.end()) { iter_node_name->second.emplace_back(top_index); @@ -705,7 +705,7 @@ Status CaffeModelParser::AddBlobsToMap(const domi::caffe::LayerParameter &layer, return SUCCESS; } -bool CaffeModelParser::IsOpAttrEmpty(const ge::Operator &op, const std::string &type) { +bool CaffeModelParser::IsOpAttrEmpty(const ge::Operator &op, const std::string &type) const { std::map attrs; (void)op.GetAllAttrNamesAndTypes(attrs); @@ -899,7 +899,7 @@ Status CaffeModelParser::AddNode(const domi::caffe::LayerParameter &layer, ge::C return SUCCESS; } -Status CaffeModelParser::AddTensorDescToOpDesc(ge::OpDescPtr &op_desc, const domi::caffe::LayerParameter &layer) { +Status CaffeModelParser::AddTensorDescToOpDesc(ge::OpDescPtr &op_desc, const domi::caffe::LayerParameter &layer) const { GE_CHECK_NOTNULL(op_desc); // Data node input and output tensordesc added in parserparam if (op_desc->GetType() == ge::parser::DATA) { @@ -1069,7 +1069,7 @@ Status CaffeModelParser::AddEdges(ge::ComputeGraphPtr &graph) { return SUCCESS; } -bool CaffeModelParser::IsOutputTop(const string &op_name, const int32_t index) { +bool CaffeModelParser::IsOutputTop(const string &op_name, const int32_t index) const { bool ret = false; auto iter = ge::GetParserContext().out_nodes_map.find(op_name); if (iter != ge::GetParserContext().out_nodes_map.end()) { @@ -1169,7 +1169,7 @@ Status CaffeModelParser::AddOutputTop(const domi::caffe::NetParameter &proto_mes return SUCCESS; } -bool CaffeModelParser::CheckValidLayer(const domi::caffe::LayerParameter &layer) { +bool CaffeModelParser::CheckValidLayer(const domi::caffe::LayerParameter &layer) const { if (layer.include_size() != 0) { bool filter_flag = false; for (int32_t j = 0; j < layer.include_size(); j++) { @@ -1189,7 +1189,7 @@ bool CaffeModelParser::CheckValidLayer(const domi::caffe::LayerParameter &layer) return true; } -bool CaffeModelParser::IsInplaceTopBlob(const domi::caffe::LayerParameter &layer, const std::string &top_name) { +bool CaffeModelParser::IsInplaceTopBlob(const domi::caffe::LayerParameter &layer, const std::string &top_name) const { for (auto &bottom_name : layer.bottom()) { if (top_name == bottom_name) { return true; @@ -1199,7 +1199,7 @@ bool CaffeModelParser::IsInplaceTopBlob(const domi::caffe::LayerParameter &layer } std::string CaffeModelParser::RemapTopNameByLayer(const domi::caffe::LayerParameter &layer, const std::string &top_name, - int index) { + int index) const { return (top_name + "_" + layer.name() + "_" + std::to_string(index)); } @@ -1294,7 +1294,7 @@ Status CaffeModelParser::ParseFromMemory(const char *data, uint32_t size, ge::Co "[Check][Layer]layer phase is train, skip this layer, name:%s, type:%s.", layer.name().c_str(), layer.type().c_str()); - CHECK_FALSE_EXEC(!((layer.type() == ge::parser::DATA_TYPE) && (input_data_flag == true)), has_error = true; + CHECK_FALSE_EXEC(!((layer.type() == ge::parser::DATA_TYPE) && input_data_flag), has_error = true; REPORT_INNER_ERROR("E19999", "net %s has input and data layer simultaneously, check invalid." "layer name:%s, layer type:%s", proto_message.name().c_str(), layer.name().c_str(), layer.type().c_str()); @@ -1516,7 +1516,7 @@ Status CaffeModelParser::Parse(const char *model_path, ge::ComputeGraphPtr &grap "[Check][Layer]layer phase is train, skip this layer, name:%s, type:%s.", layer.name().c_str(), layer.type().c_str()); - CHECK_FALSE_EXEC(!((layer.type() == ge::parser::DATA_TYPE) && (input_data_flag == true)), has_error = true; + CHECK_FALSE_EXEC(!((layer.type() == ge::parser::DATA_TYPE) && input_data_flag), has_error = true; GELOGE(FAILED, "[Check][Layer]net %s has input and data layer simultaneously, check invalid." "layer name:%s, layer type:%s", proto_message.name().c_str(), layer.name().c_str(), layer.type().c_str())); @@ -1591,7 +1591,7 @@ Status CaffeModelParser::Parse(const char *model_path, ge::ComputeGraphPtr &grap return SUCCESS; } -Status CaffeModelParser::FindShareParamLayers(const std::map> &layer_params_map) { +Status CaffeModelParser::FindShareParamLayers(const std::map> &layer_params_map) const { for (auto p_iter = layer_params_map.begin(); p_iter != layer_params_map.end(); ++p_iter) { for (auto p2_iter = p_iter; p2_iter != layer_params_map.end(); ++p2_iter) { if (p_iter->first != p2_iter->first && p_iter->second == p2_iter->second) { @@ -1625,7 +1625,7 @@ Status CaffeModelParser::ToJson(const char *model_file, const char *json_file) { return ModelSaver::SaveJsonToFile(json_file, j); } -Status CaffeModelParser::ReorderInput(domi::caffe::NetParameter &net) { +Status CaffeModelParser::ReorderInput(domi::caffe::NetParameter &net) const { int layer_size = net.layer_size(); for (int i = 0; i < layer_size; ++i) { domi::caffe::LayerParameter *layer = net.mutable_layer(i); @@ -2018,7 +2018,7 @@ Status CaffeWeightsParser::ConvertBlobsProto(const google::protobuf::Message *me } Status CaffeWeightsParser::ConvertBlobShapeProto(const google::protobuf::Message *message, - google::protobuf::Message *dest_message) { + google::protobuf::Message *dest_message) const { const google::protobuf::Reflection *reflection = message->GetReflection(); CAFFE_CHECK_NULL_AND_REPROT_ERRORMSG(reflection, "Get Reflection failed in google::protobuf::Message"); vector field_desc; @@ -2040,7 +2040,7 @@ Status CaffeWeightsParser::ConvertBlobShapeProto(const google::protobuf::Message } Status CaffeWeightsParser::ConvertConvParamProto(const google::protobuf::Message *message, - google::protobuf::Message *dest_message) { + google::protobuf::Message *dest_message) const { const google::protobuf::Reflection *reflection = message->GetReflection(); CAFFE_CHECK_NULL_AND_REPROT_ERRORMSG(reflection, "Get Reflection failed in google::protobuf::Message"); vector field_desc; @@ -2060,7 +2060,7 @@ Status CaffeWeightsParser::ConvertConvParamProto(const google::protobuf::Message } Status CaffeWeightsParser::ConvertInnerProdcutProto(const google::protobuf::Message *message, - google::protobuf::Message *dest_message) { + google::protobuf::Message *dest_message) const { const google::protobuf::Reflection *reflection = message->GetReflection(); CAFFE_CHECK_NULL_AND_REPROT_ERRORMSG(reflection, "Get Reflection failed in google::protobuf::Message"); vector field_desc; @@ -2079,7 +2079,7 @@ Status CaffeWeightsParser::ConvertInnerProdcutProto(const google::protobuf::Mess return SUCCESS; } -Status CaffeWeightsParser::CheckLayersSize(const google::protobuf::Message *message) { +Status CaffeWeightsParser::CheckLayersSize(const google::protobuf::Message *message) const { const google::protobuf::Reflection *reflection = message->GetReflection(); CAFFE_CHECK_NULL_AND_REPROT_ERRORMSG(reflection, "Get Reflection failed in google::protobuf::Message"); vector field_desc; diff --git a/parser/caffe/caffe_parser.h b/parser/caffe/caffe_parser.h index a7f0d39..36bf244 100644 --- a/parser/caffe/caffe_parser.h +++ b/parser/caffe/caffe_parser.h @@ -56,17 +56,17 @@ static std::map, std::vector> params_share class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser { public: CaffeModelParser() {} - virtual ~CaffeModelParser() {} + virtual ~CaffeModelParser() override {} /** * @ingroup domi_omg * @brief Parse the relevant data from the model file and save it to graph - * @param [in] file Path of model file + * @param [in] model_path Path of model file * @param [in|out] graph graph for saving model information * @return SUCCESS parse successfully * @return FAILED parse failed */ - Status Parse(const char *file, ge::Graph &graph) override; + Status Parse(const char *model_path, ge::Graph &graph) override; /** * @ingroup domi_omg @@ -124,7 +124,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser { } private: - Status Parse(const char *file, ge::ComputeGraphPtr &graph); + Status Parse(const char *model_path, ge::ComputeGraphPtr &graph); /** * @ingroup domi_omg @@ -190,7 +190,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser { * @return SUCCESS read file successfully * @return FAILED read file failed */ - Status ReadCaffeModelFromText(const char *model_path, google::protobuf::Message *message); + Status ReadCaffeModelFromText(const char *model_path, google::protobuf::Message *message) const; /* * @ingroup domi_omg @@ -216,7 +216,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser { * @return FAILED create operator failed */ Status CreateCustomOperator(std::string op_name, std::string op_type, const google::protobuf::Message *message, - int index, std::vector &operators); + int index, std::vector &operators) const; /** * @ingroup domi_omg * @brief Add blob information to the bottom_blobs_map and top_blobs_map_ @@ -259,7 +259,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser { * @return true valid * @return false invalid */ - bool CheckValidLayer(const domi::caffe::LayerParameter &layer); + bool CheckValidLayer(const domi::caffe::LayerParameter &layer) const; /** * @ingroup domi_omg @@ -267,7 +267,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser { * @return true is 'Inplace' * @return false not is 'Inplace' */ - bool IsInplaceTopBlob(const domi::caffe::LayerParameter &layer, const std::string &top_name); + bool IsInplaceTopBlob(const domi::caffe::LayerParameter &layer, const std::string &top_name) const; /** * @ingroup domi_omg @@ -275,7 +275,7 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser { * @return true yes * @return false no */ - bool IsOutputTop(const string &op_name, int32_t index); + bool IsOutputTop(const string &op_name, const int32_t index) const; /** * @ingroup domi_omg @@ -284,29 +284,30 @@ class PARSER_FUNC_VISIBILITY CaffeModelParser : public domi::ModelParser { * @param [in|out] Layer set of the same param * @return Status */ - Status FindShareParamLayers(const std::map> &); + Status FindShareParamLayers(const std::map> &layer_params_map) const; - Status AddTensorDescToOpDesc(ge::OpDescPtr &op_desc, const domi::caffe::LayerParameter &layer); + Status AddTensorDescToOpDesc(ge::OpDescPtr &op_desc, const domi::caffe::LayerParameter &layer) const; Status AddTensorDescToOpDescByIr(ge::OpDescPtr &op_desc, const domi::caffe::LayerParameter &layer, const string &op_type); Status AddUserOutNodesTop(); - std::string RemapTopNameByLayer(const domi::caffe::LayerParameter &layer, const std::string &top_name, int index); + std::string RemapTopNameByLayer(const domi::caffe::LayerParameter &layer, const std::string &top_name, + int index) const; Status GetCustomOp(const domi::caffe::LayerParameter &layer, vector &operators); - bool IsOpAttrEmpty(const ge::Operator &op, const std::string &type); + bool IsOpAttrEmpty(const ge::Operator &op, const std::string &type) const; Status ParseOpParam(const domi::caffe::LayerParameter &layer, ge::OpDescPtr &op, std::shared_ptr &op_parser); void SaveOrigionLayerTops(domi::caffe::LayerParameter &layer); - Status ReorderInput(domi::caffe::NetParameter &net); + Status ReorderInput(domi::caffe::NetParameter &net) const; - void AddOutputInfoToContext(string layer_name, int32_t top_index); + void AddOutputInfoToContext(string layer_name, int32_t top_index) const; Status ParseOutputNodeTopInfo(const domi::caffe::NetParameter &proto_message); @@ -357,8 +358,8 @@ class PARSER_FUNC_VISIBILITY CaffeWeightsParser : public domi::WeightsParser { Status Parse(const char *file, ge::ComputeGraphPtr &graph); - Status ParseWeightByFusionProto(const char *model_path, const string &custom_proto_path, - const string &custom_proto_name, ge::ComputeGraphPtr &graph); + Status ParseWeightByFusionProto(const char *weight_path, const string &fusion_proto_path, + const string &fusion_proto_name, ge::ComputeGraphPtr &graph); Status ParseLayerParameter(const google::protobuf::Descriptor *layer_descriptor, const google::protobuf::Message *message, @@ -367,7 +368,7 @@ class PARSER_FUNC_VISIBILITY CaffeWeightsParser : public domi::WeightsParser { Status ConvertLayerParameter(const google::protobuf::Message *layer_message, ge::ComputeGraphPtr &graph); - Status CheckLayersSize(const google::protobuf::Message *message); + Status CheckLayersSize(const google::protobuf::Message *message) const; Status ConvertLayerProto(const google::protobuf::Message *message, google::protobuf::Message *layer); @@ -381,13 +382,13 @@ class PARSER_FUNC_VISIBILITY CaffeWeightsParser : public domi::WeightsParser { google::protobuf::Message *blobs); Status ConvertBlobShapeProto(const google::protobuf::Message *message, - google::protobuf::Message *dest_message); + google::protobuf::Message *dest_message) const; Status ConvertInnerProdcutProto(const google::protobuf::Message *message, - google::protobuf::Message *dest_message); + google::protobuf::Message *dest_message) const; Status ConvertConvParamProto(const google::protobuf::Message *message, - google::protobuf::Message *dest_message); + google::protobuf::Message *dest_message) const; /** * @ingroup domi_omg * @brief Layer types to be ignored in weight resolution diff --git a/parser/common/acl_graph_parser_util.cc b/parser/common/acl_graph_parser_util.cc index 32603b2..e1ca2ae 100644 --- a/parser/common/acl_graph_parser_util.cc +++ b/parser/common/acl_graph_parser_util.cc @@ -341,7 +341,7 @@ domi::Status AclGrphParseUtil::ParseAclOutputNodes(const string &out_nodes) { return SUCCESS; } -domi::Status AclGrphParseUtil::ParseAclOutputFp16NodesFormat(const string &is_output_fp16) { +domi::Status AclGrphParseUtil::ParseAclOutputFp16NodesFormat(const string &is_output_fp16) const { if (is_output_fp16.empty()) { return SUCCESS; } @@ -365,7 +365,7 @@ domi::Status AclGrphParseUtil::ParseAclOutputFp16NodesFormat(const string &is_ou return SUCCESS; } -domi::Status AclGrphParseUtil::ParseAclEnableScope(const string &enable_scope_fusion_passes) { +domi::Status AclGrphParseUtil::ParseAclEnableScope(const string &enable_scope_fusion_passes) const { ge::GetParserContext().enable_scope_fusion_passes.clear(); if (enable_scope_fusion_passes.empty()) { return SUCCESS; @@ -387,7 +387,7 @@ void AclGrphParseUtil::AddAttrsForInputNodes(const vector &adjust_fp16_f } domi::Status AclGrphParseUtil::ParseAclInputFp16Nodes(const ComputeGraphPtr &graph, const string &input_fp16_nodes, - const string &is_input_adjust_hw_layout) { + const string &is_input_adjust_hw_layout) const { GE_CHECK_NOTNULL(graph); vector adjust_fp16_format_vec; if (!is_input_adjust_hw_layout.empty()) { @@ -430,7 +430,7 @@ domi::Status AclGrphParseUtil::ParseAclInputFp16Nodes(const ComputeGraphPtr &gra } void AclGrphParseUtil::CreateOutputNodesInfo(std::vector> &output_nodes_info, - std::vector &output_nodes_name) { + std::vector &output_nodes_name) const { output_nodes_name.clear(); auto &out_tensor_names = ge::GetParserContext().out_tensor_names; if (out_tensor_names.empty()) { @@ -462,7 +462,7 @@ void AclGrphParseUtil::CreateOutputNodesInfo(std::vector> &output_nodes_info) { + std::vector> &output_nodes_info) const { ge::OpDescPtr tmpDescPtr = node->GetOpDesc(); if (tmpDescPtr == nullptr) { REPORT_INNER_ERROR("E19999", "param node has no opdesc."); @@ -576,7 +576,7 @@ domi::Status AclGrphParseUtil::SetOutputNodeInfo(ge::Graph &graph, return domi::SUCCESS; } -domi::Status AclGrphParseUtil::CheckOptions(const std::map &parser_params) { +domi::Status AclGrphParseUtil::CheckOptions(const std::map &parser_params) const { for (auto &ele : parser_params) { const char *key_ascend = ele.first.GetString(); if (key_ascend == nullptr) { diff --git a/parser/common/acl_graph_parser_util.h b/parser/common/acl_graph_parser_util.h index abc65a6..4c63c7e 100644 --- a/parser/common/acl_graph_parser_util.h +++ b/parser/common/acl_graph_parser_util.h @@ -48,18 +48,18 @@ class AclGrphParseUtil { private: bool parser_initialized = false; - domi::Status CheckOptions(const std::map &parser_params); - domi::Status GetOutputLeaf(NodePtr node, std::vector> &output_nodes_info); + domi::Status CheckOptions(const std::map &parser_params) const; + domi::Status GetOutputLeaf(NodePtr node, std::vector> &output_nodes_info) const; void CreateOutputNodesInfo(std::vector> &output_nodes_info, - std::vector &output_nodes_name); + std::vector &output_nodes_name) const; static void SetDefaultFormat(); domi::Status ParseAclOutputNodes(const std::string &out_nodes); - domi::Status ParseAclOutputFp16NodesFormat(const std::string &is_output_fp16); - domi::Status ParseAclEnableScope(const std::string &enable_scope_fusion_passes); + domi::Status ParseAclOutputFp16NodesFormat(const std::string &is_output_fp16) const; + domi::Status ParseAclEnableScope(const std::string &enable_scope_fusion_passes) const; static void AddAttrsForInputNodes(const vector &adjust_fp16_format_vec, const string &fp16_nodes_name, size_t index, OpDescPtr &op_desc); domi::Status ParseAclInputFp16Nodes(const ComputeGraphPtr &graph, const string &input_fp16_nodes, - const string &is_input_adjust_hw_layout); + const string &is_input_adjust_hw_layout) const; domi::Status GetDefaultOutInfo(ge::ComputeGraphPtr &compute_graph, std::vector> &output_nodes_info); }; diff --git a/parser/common/model_saver.cc b/parser/common/model_saver.cc index 08a7f82..0102848 100644 --- a/parser/common/model_saver.cc +++ b/parser/common/model_saver.cc @@ -36,7 +36,7 @@ const uint32_t kInteval = 2; FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY Status ModelSaver::SaveJsonToFile(const char *file_path, const Json &model) { Status ret = SUCCESS; - if (file_path == nullptr || SUCCESS != CheckPath(file_path)) { + if ((file_path == nullptr) || (CheckPath(file_path) != SUCCESS)) { REPORT_INNER_ERROR("E19999", "param file_path is nullptr or checkpath not return success"); GELOGE(FAILED, "[Check][Param]Check output file failed."); return FAILED; diff --git a/parser/common/parser_fp16_t.cc b/parser/common/parser_fp16_t.cc index 560d1c1..43306e6 100644 --- a/parser/common/parser_fp16_t.cc +++ b/parser/common/parser_fp16_t.cc @@ -500,8 +500,8 @@ static uint16_t Fp16AddCalVal(uint16_t s_ret, int16_t e_ret, uint16_t m_ret, uin bool b_last_bit = ((m_ret & 1) > 0); bool b_trunc_high = 0; bool b_trunc_left = 0; - b_trunc_high = (TagFp16RoundMode::kRoundToNearest == g_round_mode) && ((m_trunc & kFp32SignMask) > 0); - b_trunc_left = (TagFp16RoundMode::kRoundToNearest == g_round_mode) && ((m_trunc & kFp32AbsMax) > 0); + b_trunc_high = (g_round_mode == TagFp16RoundMode::kRoundToNearest) && ((m_trunc & kFp32SignMask) > 0); + b_trunc_left = (g_round_mode == TagFp16RoundMode::kRoundToNearest) && ((m_trunc & kFp32AbsMax) > 0); m_ret = ManRoundToNearest(b_last_bit, b_trunc_high, b_trunc_left, m_ret, shift_out); while (m_ret >= m_max) { m_ret = m_ret >> 1; @@ -623,8 +623,8 @@ static uint16_t Fp16Mul(uint16_t v_1, uint16_t v_2) { bool b_last_bit = ((mul_m & 1) > 0); bool b_trunc_high = 0; bool b_trunc_left = 0; - b_trunc_high = (TagFp16RoundMode::kRoundToNearest == g_round_mode) && ((m_trunc & kFp32SignMask) > 0); - b_trunc_left = (TagFp16RoundMode::kRoundToNearest == g_round_mode) && ((m_trunc & kFp32AbsMax) > 0); + b_trunc_high = (g_round_mode == TagFp16RoundMode::kRoundToNearest) && ((m_trunc & kFp32SignMask) > 0); + b_trunc_left = (g_round_mode == TagFp16RoundMode::kRoundToNearest) && ((m_trunc & kFp32AbsMax) > 0); mul_m = ManRoundToNearest(b_last_bit, b_trunc_high, b_trunc_left, mul_m); while (mul_m >= m_max || e_ret < 0) { @@ -966,7 +966,7 @@ static void SetValByUint16Val(const uint16_t &input_val, const uint16_t &sign, u bool b_last_bit = ((m_tmp & 1) > 0); bool b_trunc_high = 0; bool b_trunc_left = 0; - if (TagFp16RoundMode::kRoundToNearest == g_round_mode) { // trunc + if (g_round_mode == TagFp16RoundMode::kRoundToNearest) { // trunc b_trunc_high = ((m_trunc & kFp32SignMask) > 0); b_trunc_left = ((m_trunc & kFp32AbsMax) > 0); } @@ -1025,7 +1025,7 @@ fp16_t &fp16_t::operator=(const uint16_t &ui_val) { bool b_last_bit = ((m_ret & 1) > 0); bool b_trunc_high = 0; bool b_trunc_left = 0; - if (TagFp16RoundMode::kRoundToNearest == g_round_mode) { // trunc + if (g_round_mode == TagFp16RoundMode::kRoundToNearest) { // trunc b_trunc_high = ((m_trunc & kFp32SignMask) > 0); b_trunc_left = ((m_trunc & kFp32AbsMax) > 0); } @@ -1069,7 +1069,7 @@ static void SetValByUint32Val(const uint32_t &input_val, const uint16_t &sign, u bool b_last_bit = ((m_tmp & 1) > 0); bool b_trunc_high = 0; bool b_trunc_left = 0; - if (TagFp16RoundMode::kRoundToNearest == g_round_mode) { // trunc + if (g_round_mode == TagFp16RoundMode::kRoundToNearest) { // trunc b_trunc_high = ((m_trunc & kFp32SignMask) > 0); b_trunc_left = ((m_trunc & kFp32AbsMax) > 0); } diff --git a/parser/common/parser_types.cc b/parser/common/parser_types.cc index b53d37a..f35cc6d 100644 --- a/parser/common/parser_types.cc +++ b/parser/common/parser_types.cc @@ -16,7 +16,7 @@ #include "framework/omg/parser/parser_types.h" -namespace ge{ +namespace ge { namespace parser { const char *DATA = "Data"; const char *AIPPDATA = "AippData"; diff --git a/parser/common/proto_file_parser.cc b/parser/common/proto_file_parser.cc index 747c8d0..f47e1a6 100644 --- a/parser/common/proto_file_parser.cc +++ b/parser/common/proto_file_parser.cc @@ -117,7 +117,7 @@ string GetMessageName(const std::string &line) { string CreatTmpName(int len) { std::uniform_int_distribution u(kMinRandomNum, kMaxRandomNum); std::default_random_engine e; - e.seed(time(0)); + e.seed(time(nullptr)); string tmp_name = ""; for (int i = 0; i < len; i++) { tmp_name += std::to_string(u(e)); @@ -200,7 +200,7 @@ Status ProtoFileParser::CreatProtoFile() { Status ProtoFileParser::ParseProtoFile(const string &proto_file, std::map> &identifier_op_map, - std::map> &op_identifier_map) { + std::map> &op_identifier_map) const { ifstream read_file; read_file.open(proto_file, std::ios::in); if (read_file.fail()) { diff --git a/parser/common/proto_file_parser.h b/parser/common/proto_file_parser.h index 517de37..55f32a5 100644 --- a/parser/common/proto_file_parser.h +++ b/parser/common/proto_file_parser.h @@ -34,7 +34,7 @@ private: Status CreatProtoFile(); Status ParseProtoFile(const std::string &proto_file, std::map > &identifier_op_map, - std::map > &op_identifier_map); + std::map > &op_identifier_map) const; Status WriteCaffeProtoFile(const char *custom_proto_file, std::ifstream &read_caffe, std::ofstream &write_tmp) const; Status WriteProtoFile(const char *caffe_proto_file, const char *custom_proto_file); diff --git a/parser/common/prototype_pass_manager.cc b/parser/common/prototype_pass_manager.cc index ce4a099..b54702c 100644 --- a/parser/common/prototype_pass_manager.cc +++ b/parser/common/prototype_pass_manager.cc @@ -25,7 +25,7 @@ ProtoTypePassManager &ProtoTypePassManager::Instance() { return instance; } -Status ProtoTypePassManager::Run(google::protobuf::Message *message, const domi::FrameworkType &fmk_type) { +Status ProtoTypePassManager::Run(google::protobuf::Message *message, const domi::FrameworkType &fmk_type) const { GE_CHECK_NOTNULL(message); const auto &pass_vec = ProtoTypePassRegistry::GetInstance().GetCreateFnByType(fmk_type); for (const auto &pass_item : pass_vec) { diff --git a/parser/common/prototype_pass_manager.h b/parser/common/prototype_pass_manager.h index d9c2f9f..028ffbe 100644 --- a/parser/common/prototype_pass_manager.h +++ b/parser/common/prototype_pass_manager.h @@ -24,7 +24,7 @@ class ProtoTypePassManager { public: static ProtoTypePassManager &Instance(); - Status Run(google::protobuf::Message *message, const domi::FrameworkType &fmk_type); + Status Run(google::protobuf::Message *message, const domi::FrameworkType &fmk_type) const; ~ProtoTypePassManager() = default; diff --git a/parser/onnx/onnx_custom_parser_adapter.cc b/parser/onnx/onnx_custom_parser_adapter.cc index ba7d4cc..0ba1b4d 100644 --- a/parser/onnx/onnx_custom_parser_adapter.cc +++ b/parser/onnx/onnx_custom_parser_adapter.cc @@ -44,7 +44,7 @@ Status OnnxCustomParserAdapter::ParseParams(const Message *op_src, ge::Operator return SUCCESS; } -Status OnnxCustomParserAdapter::ParseParams(const Operator &op_src, Operator &op_dest) { +Status OnnxCustomParserAdapter::ParseParams(const Operator &op_src, Operator &op_dest) const { ParseParamByOpFunc custom_op_parser = domi::OpRegistry::Instance()->GetParseParamByOperatorFunc( ParserUtils::GetOperatorType(op_src)); GE_CHECK_NOTNULL(custom_op_parser); diff --git a/parser/onnx/onnx_custom_parser_adapter.h b/parser/onnx/onnx_custom_parser_adapter.h index 7e0fb06..8ac1df9 100644 --- a/parser/onnx/onnx_custom_parser_adapter.h +++ b/parser/onnx/onnx_custom_parser_adapter.h @@ -29,7 +29,7 @@ class PARSER_FUNC_VISIBILITY OnnxCustomParserAdapter : public OnnxOpParser { /// @return FAILED parse failed Status ParseParams(const Message *op_src, ge::Operator &op_dest) override; - Status ParseParams(const Operator &op_src, Operator &op_dest); + Status ParseParams(const Operator &op_src, Operator &op_dest) const; }; } // namespace ge diff --git a/parser/onnx/onnx_parser.cc b/parser/onnx/onnx_parser.cc index 4fce687..6c07e48 100644 --- a/parser/onnx/onnx_parser.cc +++ b/parser/onnx/onnx_parser.cc @@ -140,7 +140,7 @@ graphStatus aclgrphParseONNXFromMem(const char *buffer, size_t size, } // parse caffe model_file to GE graph - ge::graphStatus ret = model_parser->ParseFromMemory(buffer, (uint32_t)size, graph); + ge::graphStatus ret = model_parser->ParseFromMemory(buffer, static_cast(size), graph); if (ret != ge::SUCCESS) { REPORT_CALL_ERROR("E19999", "ParseFromMemory failed"); GELOGE(ret, "[Parser][Graph] %s failed.", ParserUtils::GetGraphName(graph).c_str()); @@ -344,7 +344,7 @@ Status OnnxModelParser::ParseInput(const std::map &initializer_name_tensor) { + std::map &initializer_name_tensor) const { // Construct const node for weight int index = 0; for (auto it : initializer_name_tensor) { @@ -362,7 +362,7 @@ Status OnnxModelParser::ParseInitializer(ge::onnx::GraphProto &onnx_graph, return SUCCESS; } -void OnnxModelParser::UpdateAllNodeName(ge::onnx::GraphProto &onnx_graph) { +void OnnxModelParser::UpdateAllNodeName(ge::onnx::GraphProto &onnx_graph) const { int index = 0; for (int i = 0; i < onnx_graph.node_size(); i++) { ge::onnx::NodeProto *node = onnx_graph.mutable_node(i); @@ -443,7 +443,7 @@ Status OnnxModelParser::AdapterOpType(const ge::onnx::NodeProto *node_proto, std } Status OnnxModelParser::TransNodeToOperator(const ge::onnx::NodeProto *node_proto, ge::Operator &op, - const string &op_type) { + const string &op_type) const { GE_CHECK_NOTNULL(node_proto); string node_name = node_proto->name(); op = ge::OperatorFactory::CreateOperator(node_name.c_str(), op_type.c_str()); @@ -560,7 +560,7 @@ Status OnnxModelParser::Prechecker(ge::onnx::GraphProto &onnx_graph) { } Status OnnxModelParser::ParseOpParam(const ge::onnx::NodeProto *node_proto, ge::Operator &op, - std::shared_ptr &op_parser) { + std::shared_ptr &op_parser) const { GE_CHECK_NOTNULL(node_proto); GE_CHECK_NOTNULL(op_parser); std::string op_type = node_proto->op_type(); @@ -707,7 +707,7 @@ Status OnnxModelParser::GetGraphOutputs(std::vector &name_to_onnx_graph) { + std::map &name_to_onnx_graph) const { std::queue onnx_graph_tasks; int index = 0; onnx_graph_tasks.push(&root_onnx_graph); @@ -1040,7 +1040,7 @@ ge::DataType OnnxModelParser::ConvertToGeDataType(const uint32_t type) { return ge::OnnxUtil::ConvertOnnxDataType(type); } -void OnnxModelParser::UpdateDataFormat(ge::Graph &graph) { +void OnnxModelParser::UpdateDataFormat(ge::Graph &graph) const { for (GNode &gn : graph.GetDirectNode()) { AscendString type; (void)gn.GetType(type); diff --git a/parser/onnx/onnx_parser.h b/parser/onnx/onnx_parser.h index 862c4e5..acec764 100644 --- a/parser/onnx/onnx_parser.h +++ b/parser/onnx/onnx_parser.h @@ -90,15 +90,15 @@ class PARSER_FUNC_VISIBILITY OnnxModelParser : public domi::ModelParser { Status ParseOutput(ge::onnx::GraphProto &onnx_graph); Status ParseInitializer(ge::onnx::GraphProto &onnx_graph, - std::map &initializer_name_tensor); + std::map &initializer_name_tensor) const; - void UpdateAllNodeName(ge::onnx::GraphProto &onnx_graph); + void UpdateAllNodeName(ge::onnx::GraphProto &onnx_graph) const; Status ConstructOriType(const ge::onnx::NodeProto *node_proto, std::string &ori_type); Status AdapterOpType(const ge::onnx::NodeProto *node_proto, std::string &ori_type, std::string &om_type); - Status TransNodeToOperator(const ge::onnx::NodeProto *node_proto, ge::Operator &op, const string &op_type); + Status TransNodeToOperator(const ge::onnx::NodeProto *node_proto, ge::Operator &op, const string &op_type) const; Status ConstructInputOutputContext(const ge::onnx::NodeProto *node_proto); @@ -111,22 +111,23 @@ class PARSER_FUNC_VISIBILITY OnnxModelParser : public domi::ModelParser { Status Prechecker(ge::onnx::GraphProto &onnx_graph); - Status GetModelFromFile(const char *file, ge::onnx::ModelProto &onnx_model); + Status GetModelFromFile(const char *file, ge::onnx::ModelProto &onnx_model) const; - Status GetModelFromMemory(const char *data, uint32_t size, ge::onnx::ModelProto &onnx_model); + Status GetModelFromMemory(const char *data, uint32_t size, ge::onnx::ModelProto &onnx_model) const; Status ModelParseToGraph(const ge::onnx::ModelProto &onnx_model, ge::Graph &graph); Status ModelParseToGraphImpl(bool is_subgraph, ge::onnx::GraphProto &onnx_graph, ge::Graph &graph); - void UpdateDataFormat(ge::Graph &graph); + void UpdateDataFormat(ge::Graph &graph) const; void ClearMembers(); - Status ParseOpParam(const ge::onnx::NodeProto *node_proto, ge::Operator &op, std::shared_ptr &op_parser); + Status ParseOpParam(const ge::onnx::NodeProto *node_proto, ge::Operator &op, + std::shared_ptr &op_parser) const; Status AdaptAndFindAllOnnxGraph(ge::onnx::GraphProto &root_onnx_graph, - std::map &name_to_onnx_graph); + std::map &name_to_onnx_graph) const; Status SetOutputsInfo(const ParserUtils::OutputMapping &final_output_nodes, const ParserUtils::OutputMapping &tensor_to_nodes); diff --git a/parser/onnx/subgraph_adapter/if_subgraph_adapter.cc b/parser/onnx/subgraph_adapter/if_subgraph_adapter.cc index e694f3b..50f8531 100644 --- a/parser/onnx/subgraph_adapter/if_subgraph_adapter.cc +++ b/parser/onnx/subgraph_adapter/if_subgraph_adapter.cc @@ -90,7 +90,7 @@ domi::Status IfSubgraphAdapter::ParseIfNodeSubgraphs( } domi::Status IfSubgraphAdapter::GetSubgraphsAllInputs(ge::onnx::GraphProto &onnx_graph, - std::set &all_inputs) { + std::set &all_inputs) const { std::set graph_inputs; std::set graph_outputs; for (int i = 0; i < onnx_graph.node_size(); i++) { @@ -115,7 +115,7 @@ domi::Status IfSubgraphAdapter::GetSubgraphsAllInputs(ge::onnx::GraphProto &onnx } void IfSubgraphAdapter::AddInputNodeForGraph(const std::set &all_inputs, - ge::onnx::GraphProto &onnx_graph) { + ge::onnx::GraphProto &onnx_graph) const { for (const auto &input_name : all_inputs) { ge::onnx::ValueInfoProto *value_info = onnx_graph.add_input(); value_info->set_name(input_name); @@ -123,7 +123,7 @@ void IfSubgraphAdapter::AddInputNodeForGraph(const std::set &all_in } void IfSubgraphAdapter::AddInputForParentNode(const std::set &all_inputs, - ge::onnx::NodeProto &parent_node) { + ge::onnx::NodeProto &parent_node) const { for (const auto &input_name : all_inputs) { parent_node.add_input(input_name); } diff --git a/parser/onnx/subgraph_adapter/if_subgraph_adapter.h b/parser/onnx/subgraph_adapter/if_subgraph_adapter.h index bcebd67..2723a86 100644 --- a/parser/onnx/subgraph_adapter/if_subgraph_adapter.h +++ b/parser/onnx/subgraph_adapter/if_subgraph_adapter.h @@ -31,9 +31,9 @@ class PARSER_FUNC_VISIBILITY IfSubgraphAdapter : public SubgraphAdapter { private: domi::Status ParseIfNodeSubgraphs(ge::onnx::NodeProto *parent_node, std::vector &onnx_graphs, std::map &name_to_onnx_graph); - domi::Status GetSubgraphsAllInputs(ge::onnx::GraphProto &onnx_graph, std::set &all_inputs); - void AddInputNodeForGraph(const std::set &all_inputs, ge::onnx::GraphProto &onnx_graph); - void AddInputForParentNode(const std::set &all_inputs, ge::onnx::NodeProto &parent_node); + domi::Status GetSubgraphsAllInputs(ge::onnx::GraphProto &onnx_graph, std::set &all_inputs) const; + void AddInputNodeForGraph(const std::set &all_inputs, ge::onnx::GraphProto &onnx_graph) const; + void AddInputForParentNode(const std::set &all_inputs, ge::onnx::NodeProto &parent_node) const; }; } // namespace ge diff --git a/parser/onnx/subgraph_adapter/subgraph_adapter_factory.cc b/parser/onnx/subgraph_adapter/subgraph_adapter_factory.cc index 7632520..489018e 100644 --- a/parser/onnx/subgraph_adapter/subgraph_adapter_factory.cc +++ b/parser/onnx/subgraph_adapter/subgraph_adapter_factory.cc @@ -17,7 +17,7 @@ #include "subgraph_adapter_factory.h" #include "framework/common/debug/ge_log.h" -namespace ge{ +namespace ge { SubgraphAdapterFactory* SubgraphAdapterFactory::Instance() { static SubgraphAdapterFactory instance; return &instance; diff --git a/parser/tensorflow/graph_functiondef.cc b/parser/tensorflow/graph_functiondef.cc index 38589b4..2208c0d 100644 --- a/parser/tensorflow/graph_functiondef.cc +++ b/parser/tensorflow/graph_functiondef.cc @@ -78,7 +78,7 @@ string NameMapHelper::UniqueNodeName(const string &name) { string NameMapHelper::Renormalize(const string &name) const { const auto iter = name_mapping_.find(name); - if (iter == name_mapping_.end()) return string(); + if (iter == name_mapping_.end()) {return string();} return iter->second; } diff --git a/parser/tensorflow/graph_optimizer.cc b/parser/tensorflow/graph_optimizer.cc index 9769b2a..95d9189 100644 --- a/parser/tensorflow/graph_optimizer.cc +++ b/parser/tensorflow/graph_optimizer.cc @@ -301,7 +301,7 @@ Status ParserGraphOptimizer::InsertNode(ge::ComputeGraphPtr sub_graph, vector::iterator iter = find(nodes.begin(), nodes.end(), peer_in_anchor->GetOwnerNode()); GE_IF_BOOL_EXEC(iter == nodes.end(), output_in_map[out_anchor].emplace_back(peer_in_anchor); hasOutNode = true); } - GE_IF_BOOL_EXEC(hasOutNode == true, output_anchors.emplace_back(out_anchor)); + GE_IF_BOOL_EXEC(hasOutNode, output_anchors.emplace_back(out_anchor)); } InControlAnchorPtr node_in_control = node->GetInControlAnchor(); @@ -381,7 +381,7 @@ Status ParserGraphOptimizer::RebuildOutputAnchors(vector & GE_CHK_BOOL_EXEC(fusion_op_desc->AddOutputDesc(src_out_desc) == ge::GRAPH_SUCCESS, return FAILED); ge::DataType data_type = src_out_desc.GetDataType(); - auto iter = GE_TENSORFLOW_DATA_TYPE_MAP.find((int32_t)data_type); + std::map::const_iterator iter = GE_TENSORFLOW_DATA_TYPE_MAP.find((int32_t)data_type); GE_IF_BOOL_EXEC( iter == GE_TENSORFLOW_DATA_TYPE_MAP.end(), REPORT_INNER_ERROR("E19999", "datatype:%d of output:%d in node:%s:%s is not supported", @@ -417,7 +417,7 @@ Status ParserGraphOptimizer::RebuildInputAnchors(vector &in return FAILED, "Add fusion_op_desc AddInputDesc failed"); ge::DataType data_type = tensorDescPtr->GetDataType(); - auto iter = GE_TENSORFLOW_DATA_TYPE_MAP.find((int32_t)data_type); + std::map::const_iterator iter = GE_TENSORFLOW_DATA_TYPE_MAP.find((int32_t)data_type); GE_IF_BOOL_EXEC( iter == GE_TENSORFLOW_DATA_TYPE_MAP.end(), REPORT_INNER_ERROR("E19999", "datatype:%d of input:%d in node:%s:%s is not supported", diff --git a/parser/tensorflow/tensorflow_arg_parser.cc b/parser/tensorflow/tensorflow_arg_parser.cc index 577577a..f300bdc 100644 --- a/parser/tensorflow/tensorflow_arg_parser.cc +++ b/parser/tensorflow/tensorflow_arg_parser.cc @@ -37,8 +37,8 @@ Status ParseParams(const Message *op_src, ArgOpOperator *const op) { domi::tensorflow::AttrValue output_attr_value; if (TensorFlowUtil::FindAttrValue(node, ge::ATTR_NAME_OUTPUT_TENSOR_DESC, output_attr_value)) { GE_CHK_STATUS_RET( - TensorFlowUtil::TransTensorDescriptor(output_attr_value, op, TENSORFLOW_NORMAL_OUTPUT_TENSOR_FLAG), - "trans output_attr_value failed, op: %s", node->name().c_str()); + TensorFlowUtil::TransTensorDescriptor(output_attr_value, op, TENSORFLOW_NORMAL_OUTPUT_TENSOR_FLAG), + "trans output_attr_value failed, op: %s", node->name().c_str()); // For the needs of the Data operator, copy the output description to the input description GE_CHK_STATUS_RET(TensorFlowUtil::TransTensorDescriptor(output_attr_value, op, TENSORFLOW_NORMAL_INPUT_TENSOR_FLAG), "trans output_attr_value failed, op: %s", node->name().c_str()); diff --git a/parser/tensorflow/tensorflow_frameworkop_parser.cc b/parser/tensorflow/tensorflow_frameworkop_parser.cc index 9c6eb71..1ccd725 100644 --- a/parser/tensorflow/tensorflow_frameworkop_parser.cc +++ b/parser/tensorflow/tensorflow_frameworkop_parser.cc @@ -40,8 +40,8 @@ Status ParseParams(const Message *op_src, FrameworkOpOperator *op) { domi::tensorflow::AttrValue output_attr_value; if (TensorFlowUtil::FindAttrValue(node, ge::ATTR_NAME_INPUT_TENSOR_DESC, input_attr_value)) { GE_CHK_STATUS_RET( - TensorFlowUtil::TransTensorDescriptor(input_attr_value, op, TENSORFLOW_NORMAL_INPUT_TENSOR_FLAG, type), - "trans input_attr_value failed, op: %s", node->name().c_str()); + TensorFlowUtil::TransTensorDescriptor(input_attr_value, op, TENSORFLOW_NORMAL_INPUT_TENSOR_FLAG, type), + "trans input_attr_value failed, op: %s", node->name().c_str()); } else { GELOGD("Frameworkop has no input tensor desc, name:%s, type:%s.", node->name().c_str(), type.c_str()); /// _Retval constructed from inference function do not has input_tensor_dec @@ -53,8 +53,8 @@ Status ParseParams(const Message *op_src, FrameworkOpOperator *op) { } if (TensorFlowUtil::FindAttrValue(node, ge::ATTR_NAME_OUTPUT_TENSOR_DESC, output_attr_value)) { GE_CHK_STATUS_RET( - TensorFlowUtil::TransTensorDescriptor(output_attr_value, op, TENSORFLOW_NORMAL_OUTPUT_TENSOR_FLAG, type), - "trans output_attr_value failed, op: %s", node->name().c_str()); + TensorFlowUtil::TransTensorDescriptor(output_attr_value, op, TENSORFLOW_NORMAL_OUTPUT_TENSOR_FLAG, type), + "trans output_attr_value failed, op: %s", node->name().c_str()); } else { GELOGD("Frameworkop has no output tensor desc, name:%s, type:%s.", node->name().c_str(), type.c_str()); } diff --git a/parser/tensorflow/tensorflow_fusion_custom_parser_adapter.cc b/parser/tensorflow/tensorflow_fusion_custom_parser_adapter.cc index 0e5586b..dd27d55 100644 --- a/parser/tensorflow/tensorflow_fusion_custom_parser_adapter.cc +++ b/parser/tensorflow/tensorflow_fusion_custom_parser_adapter.cc @@ -25,7 +25,7 @@ using domi::FusionParseParamByOpFunc; namespace ge { Status TensorFlowFusionCustomParserAdapter::ParseParams(const vector &v_input_const, - ge::NodePtr &node) { + ge::NodePtr &node) const { GE_CHECK_NOTNULL(node); auto op_dest = node->GetOpDesc(); GE_CHECK_NOTNULL(op_dest); diff --git a/parser/tensorflow/tensorflow_fusion_custom_parser_adapter.h b/parser/tensorflow/tensorflow_fusion_custom_parser_adapter.h index 2662af3..676ff64 100644 --- a/parser/tensorflow/tensorflow_fusion_custom_parser_adapter.h +++ b/parser/tensorflow/tensorflow_fusion_custom_parser_adapter.h @@ -31,7 +31,7 @@ class PARSER_FUNC_VISIBILITY TensorFlowFusionCustomParserAdapter : public Tensor * @return FAILED parse failed * @author */ - Status ParseParams(const vector &v_input_const, ge::NodePtr &node) override; + Status ParseParams(const vector &v_input_const, ge::NodePtr &node) const override; /** * @ingroup domi_parser diff --git a/parser/tensorflow/tensorflow_fusion_op_parser.cc b/parser/tensorflow/tensorflow_fusion_op_parser.cc index f9a344a..a67e94e 100644 --- a/parser/tensorflow/tensorflow_fusion_op_parser.cc +++ b/parser/tensorflow/tensorflow_fusion_op_parser.cc @@ -75,7 +75,7 @@ Status TensorFlowFusionOpParser::GetTensorFromNode(const NodeDef *node_def, Tens return SUCCESS; } -Status TensorFlowFusionOpParser::ParseParams(const std::vector &v_input_const, NodePtr &op_dest) { +Status TensorFlowFusionOpParser::ParseParams(const std::vector &v_input_const, NodePtr &op_dest) const { (void)v_input_const; (void)op_dest; return SUCCESS; diff --git a/parser/tensorflow/tensorflow_fusion_op_parser.h b/parser/tensorflow/tensorflow_fusion_op_parser.h index e98bba5..3aed4b2 100644 --- a/parser/tensorflow/tensorflow_fusion_op_parser.h +++ b/parser/tensorflow/tensorflow_fusion_op_parser.h @@ -44,7 +44,7 @@ class PARSER_FUNC_VISIBILITY TensorFlowFusionOpParser : public TensorFlowOpParse * @return SUCCESS Parsing success * @return FAILED Parsing failed */ - virtual Status ParseParams(const std::vector &v_input_const, ge::NodePtr &node); + virtual Status ParseParams(const std::vector &v_input_const, ge::NodePtr &node) const; /** * @ingroup domi_omg diff --git a/parser/tensorflow/tensorflow_parser.cc b/parser/tensorflow/tensorflow_parser.cc index 485a90e..1c18de2 100644 --- a/parser/tensorflow/tensorflow_parser.cc +++ b/parser/tensorflow/tensorflow_parser.cc @@ -494,7 +494,7 @@ Status TensorFlowModelParser::AddNode(const domi::tensorflow::NodeDef *node_def, // node is released in destructor string node_name = node_def->name(); string node_op = node_def->op(); - auto type_it = tensorflow_op_map.find(node_op); + std::map::const_iterator type_it = tensorflow_op_map.find(node_op); if (type_it == tensorflow_op_map.end()) { GELOGI("Can not find,maybe this node has no plugin node_name is %s, node_op is %s ", node_name.c_str(), node_op.c_str()); @@ -553,7 +553,7 @@ Status TensorFlowModelParser::AddNode(const domi::tensorflow::NodeDef *node_def, shared_ptr fusion_op_parser = factory->CreateFusionOpParser(op_type); GE_CHECK_NOTNULL(fusion_op_parser); // Find all children of the fusion operator - auto iter = fusion_op_nodedef_map_.find(node_def->name()); + std::map>::const_iterator iter = fusion_op_nodedef_map_.find(node_def->name()); if (iter == fusion_op_nodedef_map_.end()) { REPORT_INNER_ERROR("E19999", "FusionOp node %s has no children node, check invalid", node_name.c_str()); GELOGE(FAILED, "FusionOp node %s has no children node!", node_name.c_str()); @@ -756,7 +756,7 @@ Status TensorFlowModelParser::AddEdges(ge::ComputeGraphPtr &graph) { } // Find that the output of the source node is equal to the destination node std::map>> &dest_input_map = dest_iter->second.input_map; - auto input_iter = dest_input_map.find(src_op_name); + std::map>>::const_iterator input_iter = dest_input_map.find(src_op_name); // Find output and input if (input_iter == dest_input_map.end()) { continue; @@ -919,7 +919,7 @@ Status TensorFlowModelParser::ParseNodeDef(TensorFlowModelParser *parser, ge::Co return AddScopeInnerNode(parser, graph, graphMutex, node_def); } - auto iterator = parser->adaptedOpTypeMap_.find(node_name); + std::map::const_iterator iterator = parser->adaptedOpTypeMap_.find(node_name); GE_CHK_BOOL_TRUE_EXEC_WITH_LOG( iterator == parser->adaptedOpTypeMap_.end(), REPORT_INNER_ERROR("E19999", "get adapted op type failed, node name = %s", node_name.c_str()); @@ -1374,7 +1374,7 @@ Status TensorFlowModelParser::Parse(const char *model_path, ge::ComputeGraphPtr } } - auto iter = function_name_to_graphdef.find(arg.function_name); + std::map::const_iterator iter = function_name_to_graphdef.find(arg.function_name); if (iter == function_name_to_graphdef.end()) { ErrorManager::GetInstance().ATCReportErrMessage("E12013", {"functionname"}, {arg.function_name}); GELOGE(FAILED, "Failed to get subgraph by function name %s", arg.function_name.c_str()); @@ -1866,7 +1866,7 @@ Status TensorFlowModelParser::UpdateAllNodeOpContext(shared_ptr ge::ScopeFusionOpInfo info; if (IsFusionOpChild(op_node_name, &info) && nodedef_map_[op_node_name]->op() != TENSORFLOWF_NODE_OP_CONST) { // This node is a fusion operator - auto fusion_iter = tmp_fusion_op_node_context_map.find(info.fusion_node_name); + std::map::const_iterator fusion_iter = tmp_fusion_op_node_context_map.find(info.fusion_node_name); if (fusion_iter == tmp_fusion_op_node_context_map.end()) { OpNodeContext op_node_context; tmp_fusion_op_node_context_map[info.fusion_node_name] = op_node_context; @@ -2116,7 +2116,7 @@ Status TensorFlowModelParser::NormalizeInputOrOutputMap( } string name = to_string(pair.first) + ":" + to_string(pair.second); - auto compare_iter = compare_set.find(name); + std::set::const_iterator compare_iter = compare_set.find(name); if (compare_iter != compare_set.end()) { // pair repeat, ignore continue; @@ -2155,7 +2155,7 @@ void TensorFlowModelParser::SaveEdgesControlInfo(const string &node_name, const } void TensorFlowModelParser::UpdateEdgesControlInfo(const ge::ScopeFusionOpInfo &info) { - auto iter = edges_control_map.find(info.node_name); + std::map>::const_iterator iter = edges_control_map.find(info.node_name); if (iter != edges_control_map.end()) { // Delete the original fusion operator node information and add the fusion operator control edge information edges_control_map.erase(iter); @@ -2479,7 +2479,7 @@ Status TensorFlowModelParser::OptimizeIdentityByOutput(map &n return INTERNAL_ERROR, "Can't find op node context."); OpNodeContext op_node_context = context_iter->second; - auto node_def_iter = nodedef_map.find(curr_node_name); + std::map::const_iterator node_def_iter = nodedef_map.find(curr_node_name); GE_CHK_BOOL_TRUE_EXEC_WITH_LOG( (node_def_iter == nodedef_map.end()), REPORT_INNER_ERROR("E19999", "Node:%s can't find in nodedef_map, check invalid", curr_node_name.c_str()); @@ -2809,7 +2809,7 @@ Status GetTransposeInfo(GraphDef *graph_def, std::map Status EraseTransposeNode(std::map &softmaxInfo, std::map &transposeInfo) { - auto itTranspose = transposeInfo.begin(); + std::map::const_iterator itTranspose = transposeInfo.begin(); for (; itTranspose != transposeInfo.end();) { // transpose --> softmax bool bErase = true; @@ -3144,7 +3144,7 @@ Status TensorFlowModelParser::TrimGraphByInput(const domi::tensorflow::GraphDef const ge::ParserContext &ctx = ge::GetParserContext(); std::map> input_dims = ctx.input_dims; std::vector designated_dims = input_dims.at(node.name()); - for (int32_t i = 0; i < (int32_t)designated_dims.size(); i++) { + for (int32_t i = 0; i < static_cast(designated_dims.size()); i++) { data_shape->add_dim()->set_size(designated_dims[i]); } google::protobuf::Map *attr = placeholder_node.mutable_attr(); @@ -3217,7 +3217,7 @@ Status TensorFlowModelParser::TrimGraphByOutput(const domi::tensorflow::GraphDef const ge::ParserContext &ctx = ge::GetParserContext(); std::map> input_dims = ctx.input_dims; std::vector designated_dims = input_dims.at(node.name()); - for (int32_t i = 0; i < (int32_t)designated_dims.size(); i++) { + for (int32_t i = 0; i < static_cast(designated_dims.size()); i++) { data_shape->add_dim()->set_size(designated_dims[i]); } google::protobuf::Map *attr = placeholder_node.mutable_attr(); @@ -3425,7 +3425,7 @@ Status TensorFlowModelParser::OptimizeConstNodes4CustomOp(domi::tensorflow::Grap Status TensorFlowModelParser::AddControlEdgeAfterRemoveInputs(domi::tensorflow::GraphDef *graph_def, domi::tensorflow::NodeDef *node_def, const map &all_node_map, - const vector &removed_inputs_vec) { + const vector &removed_inputs_vec) const { GE_CHECK_NOTNULL(graph_def); GE_CHECK_NOTNULL(node_def); for (const auto &remove_input : removed_inputs_vec) { @@ -3513,7 +3513,7 @@ Status TensorFlowModelParser::RemoveInputs(domi::tensorflow::GraphDef *graph_def } void TensorFlowModelParser::RemoveInputAttr(domi::tensorflow::NodeDef *node_def, - const map> &remove_inputs_map) { + const map> &remove_inputs_map) const { // The caller guarantees that the pointer is not null auto *inputs = node_def->mutable_input(); google::protobuf::Map *attr_map = node_def->mutable_attr(); diff --git a/parser/tensorflow/tensorflow_parser.h b/parser/tensorflow/tensorflow_parser.h index 3e95a90..7f7124e 100644 --- a/parser/tensorflow/tensorflow_parser.h +++ b/parser/tensorflow/tensorflow_parser.h @@ -570,9 +570,9 @@ class PARSER_FUNC_VISIBILITY TensorFlowModelParser : public domi::ModelParser { Status AddControlEdgeAfterRemoveInputs(domi::tensorflow::GraphDef *graph_def, domi::tensorflow::NodeDef *node_def, const map &all_node_map, - const vector &removed_inputs_vec); + const vector &removed_inputs_vec) const; - void RemoveInputAttr(domi::tensorflow::NodeDef *node_def, const map> &remove_inputs_map); + void RemoveInputAttr(domi::tensorflow::NodeDef *node_def, const map> &remove_inputs_map) const; /** * @ingroup domi_omg diff --git a/parser/tensorflow/tensorflow_reshape_parser.cc b/parser/tensorflow/tensorflow_reshape_parser.cc index a3d5b1d..966a4ca 100644 --- a/parser/tensorflow/tensorflow_reshape_parser.cc +++ b/parser/tensorflow/tensorflow_reshape_parser.cc @@ -74,11 +74,11 @@ Status TensorFlowReshapeParser::ParseParams(const Message *op_src, ge::OpDescPtr ge::GeTensorDesc output_desc; if (TensorFlowUtil::FindAttrValue(node_src, ge::ATTR_NAME_INPUT_TENSOR_DESC, input_attr_value)) { - GE_CHK_BOOL_RET_STATUS(SUCCESS == ParseDesc(input_attr_value, input_desc), FAILED, "parse input desc failed"); + GE_CHK_BOOL_RET_STATUS(ParseDesc(input_attr_value, input_desc) == SUCCESS, FAILED, "parse input desc failed"); } if (TensorFlowUtil::FindAttrValue(node_src, ge::ATTR_NAME_OUTPUT_TENSOR_DESC, output_attr_value)) { - GE_CHK_BOOL_RET_STATUS(SUCCESS == ParseDesc(output_attr_value, output_desc), FAILED, + GE_CHK_BOOL_RET_STATUS(ParseDesc(output_attr_value, output_desc) == SUCCESS, FAILED, "parse output desc failed"); }