diff --git a/parser/caffe/caffe_custom_parser_adapter.cc b/parser/caffe/caffe_custom_parser_adapter.cc index 891c217..9ea831a 100644 --- a/parser/caffe/caffe_custom_parser_adapter.cc +++ b/parser/caffe/caffe_custom_parser_adapter.cc @@ -85,7 +85,7 @@ Status CaffeCustomParserAdapter::ParseWeights(const Message *op_src, ge::NodePtr GE_CHECK_NOTNULL(op); const LayerParameter *layer = reinterpret_cast(op_src); - GE_CHK_BOOL_RET_STATUS(nullptr != layer, FAILED, "[Convert][Type]Dynamic cast op_src to LayerParameter failed"); + GE_CHK_BOOL_RET_STATUS(layer != nullptr, FAILED, "[Convert][Type]Dynamic cast op_src to LayerParameter failed"); GELOGI("layer: %s blobs_size: %d bottom_size: %d", layer->name().c_str(), layer->blobs_size(), layer->bottom_size()); if (layer->blobs_size() == 0) { return SUCCESS; diff --git a/parser/caffe/caffe_parser.cc b/parser/caffe/caffe_parser.cc index 1571d8b..51d01a5 100644 --- a/parser/caffe/caffe_parser.cc +++ b/parser/caffe/caffe_parser.cc @@ -874,8 +874,7 @@ Status CaffeModelParser::AddNode(const domi::caffe::LayerParameter &layer, ge::C // AddConstInput is a function defined in caffe_op_parser, override in caffe_reshape_parser. std::shared_ptr caffe_op_parser = std::static_pointer_cast(op_parser); GE_CHECK_NOTNULL(caffe_op_parser); - Status status; - status = caffe_op_parser->AddConstInput(node); + Status status = caffe_op_parser->AddConstInput(node); if (status != SUCCESS) { REPORT_CALL_ERROR("E19999", "AddConstInput failed for node:%s", node->GetOpDesc()->GetName().c_str()); GELOGE(FAILED, "[Add][ConstInput] to node %s fail.", node->GetOpDesc()->GetName().c_str()); @@ -2112,17 +2111,17 @@ Status CaffeWeightsParser::ConvertLayerParameter(const google::protobuf::Message ge::ComputeGraphPtr &graph) { vector need_share_layers; const domi::caffe::LayerParameter *layer = reinterpret_cast(layer_message); - const string &layer_name = layer->name(); + const string &shared_layer_name = layer->name(); const string &layer_type = layer->type(); for (auto p_iter = params_share_map.begin(); p_iter != params_share_map.end(); ++p_iter) { - if (find(p_iter->second.begin(), p_iter->second.end(), layer_name) != p_iter->second.end()) { - GELOGI("layer:%s need share weights !", layer_name.c_str()); + if (find(p_iter->second.begin(), p_iter->second.end(), shared_layer_name) != p_iter->second.end()) { + GELOGI("layer:%s need share weights !", shared_layer_name.c_str()); need_share_layers = p_iter->second; } } if (need_share_layers.size() == 0) { - need_share_layers.push_back(layer_name); + need_share_layers.push_back(shared_layer_name); } for (auto share_iter = need_share_layers.begin(); share_iter != need_share_layers.end(); ++share_iter) { @@ -2229,27 +2228,27 @@ Status CaffeWeightsParser::ConvertNetParameter(const NetParameter ¶m, ge::Co for (int i = 0; i < num_layer; ++i) { const LayerParameter &layer = param.layer(i); - const string &layer_name = layer.name(); + const string ¶m_layer_name = layer.name(); // Skip some layer types if (skiped_layer_type_.find(layer.type()) != skiped_layer_type_.end()) { - GELOGI("Skip layer %s", layer_name.c_str()); + GELOGI("Skip layer %s", param_layer_name.c_str()); continue; } - GELOGI("Parse layer %s", layer_name.c_str()); + GELOGI("Parse layer %s", param_layer_name.c_str()); vector need_share_layers; for (auto p_iter = params_share_map.begin(); p_iter != params_share_map.end(); ++p_iter) { - if (find(p_iter->second.begin(), p_iter->second.end(), layer_name) != p_iter->second.end()) { - GELOGI("Layer: %s need share weights !", layer_name.c_str()); + if (find(p_iter->second.begin(), p_iter->second.end(), param_layer_name) != p_iter->second.end()) { + GELOGI("Layer: %s need share weights !", param_layer_name.c_str()); need_share_layers = p_iter->second; } } if (need_share_layers.size() == 0) { - need_share_layers.push_back(layer_name); + need_share_layers.push_back(param_layer_name); } for (auto share_iter = need_share_layers.begin(); share_iter != need_share_layers.end(); ++share_iter) { diff --git a/parser/common/parser_fp16_t.cc b/parser/common/parser_fp16_t.cc index fe6af3e..560d1c1 100644 --- a/parser/common/parser_fp16_t.cc +++ b/parser/common/parser_fp16_t.cc @@ -675,8 +675,7 @@ static uint16_t Fp16Div(uint16_t v_1, uint16_t v_2) { uint64_t m_tmp; if (e_a > e_b) { m_tmp = m_a; - uint16_t tmp; - tmp = e_a - e_b; + uint16_t tmp = e_a - e_b; for (int i = 0; i < tmp; i++) { m_tmp = m_tmp << 1; } @@ -690,8 +689,7 @@ static uint16_t Fp16Div(uint16_t v_1, uint16_t v_2) { m_b = m_tmp; } m_div = static_cast(m_a * 1.0f / m_b); - fp16_t fp_div; - fp_div = m_div; + fp16_t fp_div = m_div; ret = fp_div.val; if (s_a != s_b) { ret |= kFp16SignMask; diff --git a/parser/common/parser_utils.cc b/parser/common/parser_utils.cc index 31dc518..d053a38 100644 --- a/parser/common/parser_utils.cc +++ b/parser/common/parser_utils.cc @@ -214,8 +214,7 @@ Status ParserUtils::HandleInputContext(const NodePtr &node, // add control edge if (node->GetInControlAnchor() != nullptr) { for (const auto &out_anchor : node->GetInControlAnchor()->GetPeerAnchors()) { - graphStatus ret = GraphUtils::AddEdge(out_anchor, peer_in_anchor->GetOwnerNode()->GetInControlAnchor()); - if (ret != GRAPH_SUCCESS) { + if (GraphUtils::AddEdge(out_anchor, peer_in_anchor->GetOwnerNode()->GetInControlAnchor()) != GRAPH_SUCCESS) { REPORT_CALL_ERROR("E19999", "add control edge from %s to %s failed.", out_anchor->GetOwnerNode()->GetName().c_str(), peer_in_anchor->GetOwnerNode()->GetName().c_str()); diff --git a/parser/onnx/onnx_parser.cc b/parser/onnx/onnx_parser.cc index 88e9ba9..4fce687 100644 --- a/parser/onnx/onnx_parser.cc +++ b/parser/onnx/onnx_parser.cc @@ -772,8 +772,8 @@ Status OnnxModelParser::AdaptAndFindAllOnnxGraph(ge::onnx::GraphProto &root_onnx return FAILED; } - for (const auto &onnx_graph : onnx_graphs) { - onnx_graph_tasks.push(onnx_graph); + for (const auto &sub_onnx_graph : onnx_graphs) { + onnx_graph_tasks.push(sub_onnx_graph); } for (const auto &itr : name_to_onnx_subgraph) { name_to_onnx_graph.emplace(itr.first, itr.second); diff --git a/parser/tensorflow/tensorflow_parser.cc b/parser/tensorflow/tensorflow_parser.cc index 7ededa7..485a90e 100644 --- a/parser/tensorflow/tensorflow_parser.cc +++ b/parser/tensorflow/tensorflow_parser.cc @@ -1516,7 +1516,7 @@ Status TensorFlowModelParser::ParseAllGraph(const google::protobuf::Message *pro if (tensorflow_op_map.find(node_op) == tensorflow_op_map.end()) { GELOGW("%s not found in tensorflow_op_map.", node_op.c_str()); } - Status ret = AddNode(node_def, graph, scope_graph); + ret = AddNode(node_def, graph, scope_graph); if (ret != SUCCESS) { GELOGE(ret, "Add op[%s] failed", node_def->name().c_str()); DeleteFuisonNodeDef(); @@ -1676,7 +1676,6 @@ Status TensorFlowModelParser::CheckInputNodeName(const string &input_node_name, } } - int32_t tmp_index = 0; auto find = tmp_input_node_name.find(":"); if (find == string::npos) { *node_name = tmp_input_node_name; @@ -1684,7 +1683,7 @@ Status TensorFlowModelParser::CheckInputNodeName(const string &input_node_name, if (index == nullptr) { return SUCCESS; } - *index = tmp_index; + *index = 0; return SUCCESS; } @@ -2011,15 +2010,13 @@ Status TensorFlowModelParser::EraseNormalOpOutputIfChild(shared_ptrfirst; ge::ScopeFusionOpInfo to_info; - int32_t from_index = 0; - int32_t to_index = 0; - if (IsFusionOpChild(output_node_name, &to_info) && nodedef_map_[output_node_name]->op() != TENSORFLOWF_NODE_OP_CONST) { // Fuse operator, update index std::vector> &pairs = iter->second; + int32_t to_index = 0; for (auto &pair : pairs) { - from_index = pair.first; + int32_t from_index = pair.first; GE_RETURN_WITH_LOG_IF_ERROR(GetInPutIndex(scope_graph, to_info, pair.second, to_index), "GetInPutIndex failed ,output_node_name %s.", output_node_name.c_str()); tmp_output_map[to_info.fusion_node_name].push_back({from_index, to_index}); @@ -2048,15 +2045,13 @@ Status TensorFlowModelParser::UpdateNormalOpContext(shared_ptr & for (auto iter = normal_op_node_context.input_map.begin(); iter != normal_op_node_context.input_map.end();) { string input_node_name = iter->first; ge::ScopeFusionOpInfo from_info; - int32_t from_index = 0; - int32_t to_index = 0; - if (IsFusionOpChild(input_node_name, &from_info) && nodedef_map_[input_node_name]->op() != TENSORFLOWF_NODE_OP_CONST) { // Fuse operator, update index std::vector> &pairs = iter->second; + int32_t from_index = 0; for (auto &pair : pairs) { - to_index = pair.second; + int32_t to_index = pair.second; GE_RETURN_WITH_LOG_IF_ERROR(GetOutPutIndex(scope_graph, from_info, pair.first, from_index), "GetOutPutIndex failed ,input_node_name %s.", input_node_name.c_str()); tmp_input_map[from_info.fusion_node_name].push_back({from_index, to_index}); @@ -2282,7 +2277,7 @@ Status TensorFlowModelParser::ParseProto(const google::protobuf::Message *proto, } // Do not exit immediately when there is an error, wait until all errors are collected before exiting - Status ret = AddFmkNodeDefToMap(node_def, op_node_name_list); + ret = AddFmkNodeDefToMap(node_def, op_node_name_list); GE_CHK_STATUS_EXEC(ret, return PARAM_INVALID, "add node_def to map failed"); } PARSER_TIMESTAMP_END(AddFmkNodeDefToMap, "TensorFlowModelParser::AddFmkNodeDefToMap"); @@ -3140,8 +3135,7 @@ Status TensorFlowModelParser::TrimGraphByInput(const domi::tensorflow::GraphDef output_graph_def->Clear(); for (const NodeDef &node : filtered_graph_def.node()) { if (input_nodes.count(node.name())) { - NodeDef placeholder_node; - placeholder_node = node; + NodeDef placeholder_node = node; placeholder_node.clear_input(); GE_IF_BOOL_EXEC(node.op() != "Placeholder", placeholder_node.set_op("Placeholder")); domi::tensorflow::AttrValue attr_value; @@ -3214,8 +3208,7 @@ Status TensorFlowModelParser::TrimGraphByOutput(const domi::tensorflow::GraphDef output_graph_def->Clear(); for (const NodeDef &node : filtered_graph_def.node()) { if (input_nodes.count(node.name())) { - NodeDef placeholder_node; - placeholder_node = node; + NodeDef placeholder_node = node; placeholder_node.clear_input(); GE_IF_BOOL_EXEC(node.op() != "Placeholder", placeholder_node.set_op("Placeholder")); domi::tensorflow::AttrValue attr_value; @@ -3737,8 +3730,8 @@ void TensorFlowModelParser::UpdateInnerInputMap(const string &fusion_op_name, Op std::map>> tmp_input_map; for (auto iter = op_node_context.input_map.begin(); iter != op_node_context.input_map.end();) { string src_name = iter->first; - std::vector> &input_idx = iter->second; if (src_name == ge::kInputFromFusionScope) { + std::vector> &input_idx = iter->second; for (const auto &in_pair : input_idx) { if (in_pair.second != kControlSlot) { auto data = remap_data_input[fusion_op_name + std::to_string(in_pair.first)]; @@ -3784,8 +3777,8 @@ void TensorFlowModelParser::UpdateInnerOutputMap(const string &fusion_op_name, O std::map>> tmp_output_map; for (auto iter = op_node_context.output_map.begin(); iter != op_node_context.output_map.end();) { string dst_name = iter->first; - std::vector> &output_idx = iter->second; if (dst_name == ge::kOutputToFusionScope) { + std::vector> &output_idx = iter->second; for (const auto &out_pair : output_idx) { if (out_pair.second != kControlSlot) { auto data_outputs = remap_data_output[fusion_op_name + std::to_string(out_pair.second)]; diff --git a/parser/tensorflow/tensorflow_reshape_parser.cc b/parser/tensorflow/tensorflow_reshape_parser.cc index a90ae5e..a0df1f6 100644 --- a/parser/tensorflow/tensorflow_reshape_parser.cc +++ b/parser/tensorflow/tensorflow_reshape_parser.cc @@ -33,9 +33,6 @@ Status TensorFlowReshapeParser::ParseDesc(const domi::tensorflow::AttrValue &att GE_CHK_BOOL_RET_STATUS(TensorFlowUtil::ParseFromAttrValueList(ge_desc, a_list, 0, tf_datatype), PARAM_INVALID, "parse ge_desc failed."); uint32_t size_type = 1; - int64_t real_size = 1; - int64_t tmp_dim = 0; - auto data_type = ge_desc.GetDataType(); bool type_ret = ge::TypeUtils::GetDataTypeLength(data_type, size_type); GE_IF_BOOL_EXEC(!type_ret, @@ -45,6 +42,8 @@ Status TensorFlowReshapeParser::ParseDesc(const domi::tensorflow::AttrValue &att ge::TypeUtils::DataTypeToSerialString(data_type).c_str()); return PARAM_INVALID); // calculate size + int64_t tmp_dim = 0; + int64_t real_size = 1; for (uint32_t j = 0; j < ge_desc.GetShape().GetDimNum(); ++j) { tmp_dim = ge_desc.GetShape().GetDim(j); GE_IF_BOOL_EXEC(tmp_dim < 0, real_size = tmp_dim * (-1) * real_size; continue;); diff --git a/parser/tensorflow/tensorflow_squeeze_parser.cc b/parser/tensorflow/tensorflow_squeeze_parser.cc index 27bb075..4d1601e 100644 --- a/parser/tensorflow/tensorflow_squeeze_parser.cc +++ b/parser/tensorflow/tensorflow_squeeze_parser.cc @@ -37,9 +37,6 @@ Status TensorFlowSqueezeParser::ParseDesc(const domi::tensorflow::AttrValue &att GE_CHK_BOOL_RET_STATUS(TensorFlowUtil::ParseFromAttrValueList(ge_desc, a_list, 0, tf_datatype), domi::PARAM_INVALID, "parse ge_desc failed."); uint32_t size_type; - int64_t real_size = 1; - int64_t tmp_dim = 0; - auto data_type = ge_desc.GetDataType(); bool type_ret = ge::TypeUtils::GetDataTypeLength(data_type, size_type); GE_IF_BOOL_EXEC(!type_ret, @@ -49,6 +46,8 @@ Status TensorFlowSqueezeParser::ParseDesc(const domi::tensorflow::AttrValue &att ge::TypeUtils::DataTypeToSerialString(data_type).c_str()); return domi::PARAM_INVALID); // calculate size + int64_t real_size = 1; + int64_t tmp_dim = 0; for (uint32_t j = 0; j < ge_desc.GetShape().GetDimNum(); ++j) { tmp_dim = ge_desc.GetShape().GetDim(j); GE_IF_BOOL_EXEC(tmp_dim < 0, real_size = tmp_dim * (-1) * real_size; continue;); diff --git a/parser/tensorflow/tensorflow_util.cc b/parser/tensorflow/tensorflow_util.cc index d3df7b0..697881c 100644 --- a/parser/tensorflow/tensorflow_util.cc +++ b/parser/tensorflow/tensorflow_util.cc @@ -267,11 +267,11 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY domi::Status TensorFlowUtil::Tr GE_CHK_BOOL_RET_STATUS(ParseFromAttrValueList(ge_desc, a_list, i, tf_datatype), PARAM_INVALID, "parse ge_desc failed."); uint32_t size_type = 1; - int64_t tmp_dim = 0; auto data_type = ge_desc.GetDataType(); GE_CHK_BOOL_RET_STATUS(ge::TypeUtils::GetDataTypeLength(data_type, size_type), PARAM_INVALID, "dataType no define size , parse ge_desc failed."); // get size + int64_t tmp_dim = 0; for (uint32_t j = 0; j < ge_desc.GetShape().GetDimNum(); ++j) { tmp_dim = ge_desc.GetShape().GetDim(j);