From 56c8279f0b74eeea495bde1ebf738b40abbd876e Mon Sep 17 00:00:00 2001 From: wangzhengjun Date: Tue, 21 Dec 2021 16:43:42 +0800 Subject: [PATCH] code review --- parser/caffe/caffe_op_parser.cc | 6 +++--- parser/caffe/caffe_parser.cc | 24 +++++++++++----------- parser/common/acl_graph_parser_util.cc | 8 ++++---- parser/common/acl_graph_parser_util.h | 2 +- parser/tensorflow/tensorflow_parser.cc | 11 ++++++---- tests/st/testcase/test_tensorflow_parser.cc | 2 +- .../tensorflow_parser_unittest.cc | 4 ++-- 7 files changed, 30 insertions(+), 27 deletions(-) diff --git a/parser/caffe/caffe_op_parser.cc b/parser/caffe/caffe_op_parser.cc index 45089d2..2e0e33a 100644 --- a/parser/caffe/caffe_op_parser.cc +++ b/parser/caffe/caffe_op_parser.cc @@ -54,16 +54,16 @@ Status CaffeOpParser::ConvertWeight(const BlobProto &proto, const string &lay_na ConvertShape(proto, shape_vec); ge::GeShape shape(shape_vec); // Calculate the number of data in weight - int count = 1; + int32_t count = 1; for (size_t i = 0; i < shape.GetDimNum(); ++i) { - int dim = shape.GetDim(i); + int32_t dim = static_cast(shape.GetDim(i)); if (dim <= 0) { REPORT_INNER_ERROR("E19999", "Convert weight fail, dim:%d of layer:%s <=0, check invalid", dim, lay_name.c_str()); GELOGE(FAILED, "[Check][Size]Convert weight fail, dim:%d of layer:%s <=0, check invalid", dim, lay_name.c_str()); return FAILED; } - if (dim >= INT64_MAX / count) { + if (dim >= INT32_MAX / count) { REPORT_INNER_ERROR("E19999", "Convert weight fail, shape:%s of layer:%s will overflow after multi", shape.ToString().c_str(), lay_name.c_str()); GELOGE(FAILED, "[Check][Size]Convert weight fail, Blob size exceeds INT64_MAX, dim:%d, count:%d, layer name:%s", diff --git a/parser/caffe/caffe_parser.cc b/parser/caffe/caffe_parser.cc index 3c62f8b..5788edd 100644 --- a/parser/caffe/caffe_parser.cc +++ b/parser/caffe/caffe_parser.cc @@ -1101,14 +1101,14 @@ Status CaffeModelParser::AddUserOutNodesTop() { } Status CaffeModelParser::AddOutputTop(const domi::caffe::NetParameter &proto_message) { - for (int32_t i = 0; i < proto_message.layer_size(); i++) { - const domi::caffe::LayerParameter &layer = proto_message.layer(i); + for (int32_t layer_index = 0; layer_index < proto_message.layer_size(); ++layer_index) { + const domi::caffe::LayerParameter &layer = proto_message.layer(layer_index); if (!CheckValidLayer(layer)) { continue; } - for (int i = 0; i < layer.top_size(); i++) { + for (int32_t i = 0; i < layer.top_size(); i++) { string top = layer.top(i); string top_origin = top; // Handling 'inplace' scenarios @@ -1134,7 +1134,7 @@ Status CaffeModelParser::AddOutputTop(const domi::caffe::NetParameter &proto_mes GELOGI("output in top_blob: %s", layer.name().c_str()); if (top_node_iter != node_map.end()) { ge::GetParserContext().out_tensor_names.push_back(top_origin); - ge::GetParserContext().default_out_nodes.push_back(std::make_pair(layer.name(), (int32_t)i)); + ge::GetParserContext().default_out_nodes.push_back(std::make_pair(layer.name(), i)); GELOGI("The top of out node [%s] is [%s]", layer.name().c_str(), top_origin.c_str()); } } @@ -1261,8 +1261,8 @@ Status CaffeModelParser::ParseFromMemory(const char *data, uint32_t size, ge::Co std::map> layer_params_map; // same param name set // std::map, std::vector> params_share_map; - for (int32_t i = 0; i < layer_count; i++) { - domi::caffe::LayerParameter &layer = const_cast(proto_message.layer(i)); + for (int32_t layer_index = 0; layer_index < layer_count; ++layer_index) { + domi::caffe::LayerParameter &layer = const_cast(proto_message.layer(layer_index)); GE_CHK_BOOL_EXEC_INFO(CheckValidLayer(layer), continue, "[Check][Layer]layer phase is train, skip this layer, name:%s, type:%s.", @@ -1284,7 +1284,7 @@ Status CaffeModelParser::ParseFromMemory(const char *data, uint32_t size, ge::Co // Times accumulation of duplicate operators layer_name_map[layer.name()]++; // Set the name in proto and layer - domi::caffe::LayerParameter *duplicate_name_layer = proto_message.mutable_layer(i); + domi::caffe::LayerParameter *duplicate_name_layer = proto_message.mutable_layer(layer_index); duplicate_name_layer->set_name(new_name); layer.set_name(new_name);) // Insert the new operator name, the number of times of duplicate name is recorded as 1 @@ -1300,7 +1300,7 @@ Status CaffeModelParser::ParseFromMemory(const char *data, uint32_t size, ge::Co // parse ParamSpec std::vector v_param_names; - for (int i = 0; i < layer.param_size(); i++) { + for (int32_t i = 0; i < layer.param_size(); i++) { const domi::caffe::ParamSpec ¶m = layer.param(i); GE_IF_BOOL_EXEC((param.has_name()), v_param_names.emplace_back(param.name())); } @@ -1483,8 +1483,8 @@ Status CaffeModelParser::Parse(const char *model_path, ge::ComputeGraphPtr &grap // std::map> layer_params_map; // same param name set - for (int32_t i = 0; i < layer_count; i++) { - domi::caffe::LayerParameter &layer = const_cast(proto_message.layer(i)); + for (int32_t layer_index = 0; layer_index < layer_count; ++layer_index) { + domi::caffe::LayerParameter &layer = const_cast(proto_message.layer(layer_index)); SaveOrigionLayerTops(layer); GE_CHK_BOOL_EXEC_INFO(CheckValidLayer(layer), continue, "[Check][Layer]layer phase is train, skip this layer, name:%s, type:%s.", @@ -1503,7 +1503,7 @@ Status CaffeModelParser::Parse(const char *model_path, ge::ComputeGraphPtr &grap // Times accumulation of duplicate operators layer_name_map[layer.name()]++; // Set the name in proto and layer - domi::caffe::LayerParameter *duplicate_name_layer = proto_message.mutable_layer(i); + domi::caffe::LayerParameter *duplicate_name_layer = proto_message.mutable_layer(layer_index); duplicate_name_layer->set_name(new_name); layer.set_name(new_name);) // Insert the new operator name, the number of times of duplicate name is recorded as 1 @@ -1519,7 +1519,7 @@ Status CaffeModelParser::Parse(const char *model_path, ge::ComputeGraphPtr &grap // parse ParamSpec std::vector v_param_names; - for (int i = 0; i < layer.param_size(); i++) { + for (int32_t i = 0; i < layer.param_size(); i++) { const domi::caffe::ParamSpec ¶m = layer.param(i); GE_IF_BOOL_EXEC((param.has_name()), v_param_names.emplace_back(param.name())); } diff --git a/parser/common/acl_graph_parser_util.cc b/parser/common/acl_graph_parser_util.cc index de38203..dd30d46 100644 --- a/parser/common/acl_graph_parser_util.cc +++ b/parser/common/acl_graph_parser_util.cc @@ -374,7 +374,7 @@ domi::Status AclGrphParseUtil::ParseAclEnableScope(const string &enable_scope_fu } void AclGrphParseUtil::AddAttrsForInputNodes(const vector &adjust_fp16_format_vec, - const string &fp16_nodes_name, uint32_t index, OpDescPtr &op_desc) { + const string &fp16_nodes_name, size_t index, OpDescPtr &op_desc) { if (AttrUtils::SetStr(op_desc, ATTR_ATC_USER_DEFINE_DATATYPE, TypeUtils::DataTypeToSerialString(DT_FLOAT16))) { if ((index < adjust_fp16_format_vec.size()) && (adjust_fp16_format_vec[index] == "true")) { GELOGI("This node [%s] should be set NC1HWC0", fp16_nodes_name.c_str()); @@ -405,7 +405,7 @@ domi::Status AclGrphParseUtil::ParseAclInputFp16Nodes(const ComputeGraphPtr &gra } GELOGI("The input_fp16_nodes is set %s", input_fp16_nodes.c_str()); vector input_fp16_nodes_vec = StringUtils::Split(input_fp16_nodes, ';'); - for (uint32_t i = 0; i < input_fp16_nodes_vec.size(); ++i) { + for (size_t i = 0; i < input_fp16_nodes_vec.size(); ++i) { ge::NodePtr node = graph->FindNode(input_fp16_nodes_vec[i]); if (node == nullptr) { ErrorManager::GetInstance().ATCReportErrMessage("E10016", {"parameter", "opname"}, @@ -494,12 +494,12 @@ domi::Status AclGrphParseUtil::GetDefaultOutInfo(ge::ComputeGraphPtr &compute_gr std::vector> &output_nodes_info) { std::vector> default_out_nodes = ge::GetParserContext().default_out_nodes; if (!default_out_nodes.empty()) { - for (uint32_t i = 0; i < default_out_nodes.size(); ++i) { + for (size_t i = 0; i < default_out_nodes.size(); ++i) { ge::NodePtr out_node = compute_graph->FindNode(default_out_nodes[i].first); if (out_node == nullptr) { ErrorManager::GetInstance().ATCReportErrMessage("E10016", {"parameter", "opname"}, {"out_nodes", default_out_nodes[i].first}); - GELOGE(domi::FAILED, "[Check][Param] Can not find out_nodes(%d) (%s) in graph.", + GELOGE(domi::FAILED, "[Check][Param] Can not find out_nodes(%zu) (%s) in graph.", i, default_out_nodes[i].first.c_str()); return domi::FAILED; } diff --git a/parser/common/acl_graph_parser_util.h b/parser/common/acl_graph_parser_util.h index a17f95b..c133915 100644 --- a/parser/common/acl_graph_parser_util.h +++ b/parser/common/acl_graph_parser_util.h @@ -57,7 +57,7 @@ class AclGrphParseUtil { domi::Status ParseAclOutputFp16NodesFormat(const std::string &is_output_fp16); domi::Status ParseAclEnableScope(const std::string &enable_scope_fusion_passes); static void AddAttrsForInputNodes(const vector &adjust_fp16_format_vec, const string &fp16_nodes_name, - uint32_t index, OpDescPtr &op_desc); + size_t index, OpDescPtr &op_desc); domi::Status ParseAclInputFp16Nodes(const ComputeGraphPtr &graph, const string &input_fp16_nodes, const string &is_input_adjust_hw_layout); domi::Status GetDefaultOutInfo(ge::ComputeGraphPtr &compute_graph, diff --git a/parser/tensorflow/tensorflow_parser.cc b/parser/tensorflow/tensorflow_parser.cc index 5afce4c..345226c 100644 --- a/parser/tensorflow/tensorflow_parser.cc +++ b/parser/tensorflow/tensorflow_parser.cc @@ -1818,17 +1818,20 @@ Status TensorFlowModelParser::GetInPutIndex(shared_ptr &scope_gr auto &impl = scope_graph->impl_; return impl->GetInputOrOutputIndex(info, old_index, true, new_index); } - return SUCCESS; + GELOGE(INTERNAL_ERROR, "Fusion op should come from scope fusion pass, node name:%s, fusion node name:%s", + info.node_name.c_str(), info.fusion_node_name.c_str()); + return INTERNAL_ERROR; } Status TensorFlowModelParser::GetOutPutIndex(shared_ptr &scope_graph, const ge::ScopeFusionOpInfo &info, const int32_t old_index, int32_t &new_index) { GE_CHECK_NOTNULL(scope_graph); - Status ret; if (info.scope_pass) { auto &impl = scope_graph->impl_; - ret = impl->GetInputOrOutputIndex(info, old_index, false, new_index); + return impl->GetInputOrOutputIndex(info, old_index, false, new_index); } - return ret; + GELOGE(INTERNAL_ERROR, "Fusion op should come from scope fusion pass, node name:%s, fusion node name:%s", + info.node_name.c_str(), info.fusion_node_name.c_str()); + return INTERNAL_ERROR; } bool TensorFlowModelParser::ConstOpNeedUpdate(const string &op_name) { diff --git a/tests/st/testcase/test_tensorflow_parser.cc b/tests/st/testcase/test_tensorflow_parser.cc index 47786a8..aa87e61 100644 --- a/tests/st/testcase/test_tensorflow_parser.cc +++ b/tests/st/testcase/test_tensorflow_parser.cc @@ -4135,7 +4135,7 @@ TEST_F(STestTensorflowParser, parser_UppdateInputMap_test) info.fusion_op_type = parser::FUSIONBATCHNORM; info.node_name = "conv_conv5/BatchNorm/batchnorm/add"; info.description = ""; - info.scope_pass = false; + info.scope_pass = true; tensorflow_parser.nodedef_map_["dropout"] = node1; tensorflow_parser.nodedef_map_["conv_conv5/BatchNorm/moving_variance"] = node2; diff --git a/tests/ut/parser/testcase/tensorflow_parser_testcase/tensorflow_parser_unittest.cc b/tests/ut/parser/testcase/tensorflow_parser_testcase/tensorflow_parser_unittest.cc index feff339..190cf5a 100644 --- a/tests/ut/parser/testcase/tensorflow_parser_testcase/tensorflow_parser_unittest.cc +++ b/tests/ut/parser/testcase/tensorflow_parser_testcase/tensorflow_parser_unittest.cc @@ -2743,7 +2743,7 @@ TEST_F(UtestTensorflowParser, Tensorflow_GetInOutPutIndex_scope_pass) input_node_info.scope_pass = false; ret = tensorflow_parser.GetInPutIndex(scope_graph, input_node_info, old_index, new_index); - EXPECT_EQ(domi::SUCCESS, ret); + EXPECT_EQ(INTERNAL_ERROR, ret); delete graph; } @@ -4287,7 +4287,7 @@ TEST_F(UtestTensorflowParser, parser_UppdateInputMap_test) info.fusion_op_type = parser::FUSIONBATCHNORM; info.node_name = "conv_conv5/BatchNorm/batchnorm/add"; info.description = ""; - info.scope_pass = false; + info.scope_pass = true; tensorflow_parser.nodedef_map_["dropout"] = node1; tensorflow_parser.nodedef_map_["conv_conv5/BatchNorm/moving_variance"] = node2;