@@ -54,16 +54,16 @@ Status CaffeOpParser::ConvertWeight(const BlobProto &proto, const string &lay_na | |||
ConvertShape(proto, shape_vec); | |||
ge::GeShape shape(shape_vec); | |||
// Calculate the number of data in weight | |||
int count = 1; | |||
int32_t count = 1; | |||
for (size_t i = 0; i < shape.GetDimNum(); ++i) { | |||
int dim = shape.GetDim(i); | |||
int32_t dim = static_cast<int32_t>(shape.GetDim(i)); | |||
if (dim <= 0) { | |||
REPORT_INNER_ERROR("E19999", "Convert weight fail, dim:%d of layer:%s <=0, check invalid", dim, lay_name.c_str()); | |||
GELOGE(FAILED, "[Check][Size]Convert weight fail, dim:%d of layer:%s <=0, check invalid", dim, lay_name.c_str()); | |||
return FAILED; | |||
} | |||
if (dim >= INT64_MAX / count) { | |||
if (dim >= INT32_MAX / count) { | |||
REPORT_INNER_ERROR("E19999", "Convert weight fail, shape:%s of layer:%s will overflow after multi", | |||
shape.ToString().c_str(), lay_name.c_str()); | |||
GELOGE(FAILED, "[Check][Size]Convert weight fail, Blob size exceeds INT64_MAX, dim:%d, count:%d, layer name:%s", | |||
@@ -1101,14 +1101,14 @@ Status CaffeModelParser::AddUserOutNodesTop() { | |||
} | |||
Status CaffeModelParser::AddOutputTop(const domi::caffe::NetParameter &proto_message) { | |||
for (int32_t i = 0; i < proto_message.layer_size(); i++) { | |||
const domi::caffe::LayerParameter &layer = proto_message.layer(i); | |||
for (int32_t layer_index = 0; layer_index < proto_message.layer_size(); ++layer_index) { | |||
const domi::caffe::LayerParameter &layer = proto_message.layer(layer_index); | |||
if (!CheckValidLayer(layer)) { | |||
continue; | |||
} | |||
for (int i = 0; i < layer.top_size(); i++) { | |||
for (int32_t i = 0; i < layer.top_size(); i++) { | |||
string top = layer.top(i); | |||
string top_origin = top; | |||
// Handling 'inplace' scenarios | |||
@@ -1134,7 +1134,7 @@ Status CaffeModelParser::AddOutputTop(const domi::caffe::NetParameter &proto_mes | |||
GELOGI("output in top_blob: %s", layer.name().c_str()); | |||
if (top_node_iter != node_map.end()) { | |||
ge::GetParserContext().out_tensor_names.push_back(top_origin); | |||
ge::GetParserContext().default_out_nodes.push_back(std::make_pair(layer.name(), (int32_t)i)); | |||
ge::GetParserContext().default_out_nodes.push_back(std::make_pair(layer.name(), i)); | |||
GELOGI("The top of out node [%s] is [%s]", layer.name().c_str(), top_origin.c_str()); | |||
} | |||
} | |||
@@ -1261,8 +1261,8 @@ Status CaffeModelParser::ParseFromMemory(const char *data, uint32_t size, ge::Co | |||
std::map<std::string, std::vector<std::string>> layer_params_map; | |||
// same param name set <paramnames,layernames> | |||
// std::map<std::vector<std::string>, std::vector<std::string>> params_share_map; | |||
for (int32_t i = 0; i < layer_count; i++) { | |||
domi::caffe::LayerParameter &layer = const_cast<domi::caffe::LayerParameter &>(proto_message.layer(i)); | |||
for (int32_t layer_index = 0; layer_index < layer_count; ++layer_index) { | |||
domi::caffe::LayerParameter &layer = const_cast<domi::caffe::LayerParameter &>(proto_message.layer(layer_index)); | |||
GE_CHK_BOOL_EXEC_INFO(CheckValidLayer(layer), continue, | |||
"[Check][Layer]layer phase is train, skip this layer, name:%s, type:%s.", | |||
@@ -1284,7 +1284,7 @@ Status CaffeModelParser::ParseFromMemory(const char *data, uint32_t size, ge::Co | |||
// Times accumulation of duplicate operators | |||
layer_name_map[layer.name()]++; | |||
// Set the name in proto and layer | |||
domi::caffe::LayerParameter *duplicate_name_layer = proto_message.mutable_layer(i); | |||
domi::caffe::LayerParameter *duplicate_name_layer = proto_message.mutable_layer(layer_index); | |||
duplicate_name_layer->set_name(new_name); layer.set_name(new_name);) | |||
// Insert the new operator name, the number of times of duplicate name is recorded as 1 | |||
@@ -1300,7 +1300,7 @@ Status CaffeModelParser::ParseFromMemory(const char *data, uint32_t size, ge::Co | |||
// parse ParamSpec | |||
std::vector<string> v_param_names; | |||
for (int i = 0; i < layer.param_size(); i++) { | |||
for (int32_t i = 0; i < layer.param_size(); i++) { | |||
const domi::caffe::ParamSpec ¶m = layer.param(i); | |||
GE_IF_BOOL_EXEC((param.has_name()), v_param_names.emplace_back(param.name())); | |||
} | |||
@@ -1483,8 +1483,8 @@ Status CaffeModelParser::Parse(const char *model_path, ge::ComputeGraphPtr &grap | |||
// <layername,paramnames> | |||
std::map<std::string, std::vector<std::string>> layer_params_map; | |||
// same param name set <paramnames,layernames> | |||
for (int32_t i = 0; i < layer_count; i++) { | |||
domi::caffe::LayerParameter &layer = const_cast<domi::caffe::LayerParameter &>(proto_message.layer(i)); | |||
for (int32_t layer_index = 0; layer_index < layer_count; ++layer_index) { | |||
domi::caffe::LayerParameter &layer = const_cast<domi::caffe::LayerParameter &>(proto_message.layer(layer_index)); | |||
SaveOrigionLayerTops(layer); | |||
GE_CHK_BOOL_EXEC_INFO(CheckValidLayer(layer), continue, | |||
"[Check][Layer]layer phase is train, skip this layer, name:%s, type:%s.", | |||
@@ -1503,7 +1503,7 @@ Status CaffeModelParser::Parse(const char *model_path, ge::ComputeGraphPtr &grap | |||
// Times accumulation of duplicate operators | |||
layer_name_map[layer.name()]++; | |||
// Set the name in proto and layer | |||
domi::caffe::LayerParameter *duplicate_name_layer = proto_message.mutable_layer(i); | |||
domi::caffe::LayerParameter *duplicate_name_layer = proto_message.mutable_layer(layer_index); | |||
duplicate_name_layer->set_name(new_name); layer.set_name(new_name);) | |||
// Insert the new operator name, the number of times of duplicate name is recorded as 1 | |||
@@ -1519,7 +1519,7 @@ Status CaffeModelParser::Parse(const char *model_path, ge::ComputeGraphPtr &grap | |||
// parse ParamSpec | |||
std::vector<string> v_param_names; | |||
for (int i = 0; i < layer.param_size(); i++) { | |||
for (int32_t i = 0; i < layer.param_size(); i++) { | |||
const domi::caffe::ParamSpec ¶m = layer.param(i); | |||
GE_IF_BOOL_EXEC((param.has_name()), v_param_names.emplace_back(param.name())); | |||
} | |||
@@ -374,7 +374,7 @@ domi::Status AclGrphParseUtil::ParseAclEnableScope(const string &enable_scope_fu | |||
} | |||
void AclGrphParseUtil::AddAttrsForInputNodes(const vector<string> &adjust_fp16_format_vec, | |||
const string &fp16_nodes_name, uint32_t index, OpDescPtr &op_desc) { | |||
const string &fp16_nodes_name, size_t index, OpDescPtr &op_desc) { | |||
if (AttrUtils::SetStr(op_desc, ATTR_ATC_USER_DEFINE_DATATYPE, TypeUtils::DataTypeToSerialString(DT_FLOAT16))) { | |||
if ((index < adjust_fp16_format_vec.size()) && (adjust_fp16_format_vec[index] == "true")) { | |||
GELOGI("This node [%s] should be set NC1HWC0", fp16_nodes_name.c_str()); | |||
@@ -405,7 +405,7 @@ domi::Status AclGrphParseUtil::ParseAclInputFp16Nodes(const ComputeGraphPtr &gra | |||
} | |||
GELOGI("The input_fp16_nodes is set %s", input_fp16_nodes.c_str()); | |||
vector<string> input_fp16_nodes_vec = StringUtils::Split(input_fp16_nodes, ';'); | |||
for (uint32_t i = 0; i < input_fp16_nodes_vec.size(); ++i) { | |||
for (size_t i = 0; i < input_fp16_nodes_vec.size(); ++i) { | |||
ge::NodePtr node = graph->FindNode(input_fp16_nodes_vec[i]); | |||
if (node == nullptr) { | |||
ErrorManager::GetInstance().ATCReportErrMessage("E10016", {"parameter", "opname"}, | |||
@@ -494,12 +494,12 @@ domi::Status AclGrphParseUtil::GetDefaultOutInfo(ge::ComputeGraphPtr &compute_gr | |||
std::vector<std::pair<ge::NodePtr, int32_t>> &output_nodes_info) { | |||
std::vector<std::pair<std::string, int32_t>> default_out_nodes = ge::GetParserContext().default_out_nodes; | |||
if (!default_out_nodes.empty()) { | |||
for (uint32_t i = 0; i < default_out_nodes.size(); ++i) { | |||
for (size_t i = 0; i < default_out_nodes.size(); ++i) { | |||
ge::NodePtr out_node = compute_graph->FindNode(default_out_nodes[i].first); | |||
if (out_node == nullptr) { | |||
ErrorManager::GetInstance().ATCReportErrMessage("E10016", {"parameter", "opname"}, | |||
{"out_nodes", default_out_nodes[i].first}); | |||
GELOGE(domi::FAILED, "[Check][Param] Can not find out_nodes(%d) (%s) in graph.", | |||
GELOGE(domi::FAILED, "[Check][Param] Can not find out_nodes(%zu) (%s) in graph.", | |||
i, default_out_nodes[i].first.c_str()); | |||
return domi::FAILED; | |||
} | |||
@@ -57,7 +57,7 @@ class AclGrphParseUtil { | |||
domi::Status ParseAclOutputFp16NodesFormat(const std::string &is_output_fp16); | |||
domi::Status ParseAclEnableScope(const std::string &enable_scope_fusion_passes); | |||
static void AddAttrsForInputNodes(const vector<string> &adjust_fp16_format_vec, const string &fp16_nodes_name, | |||
uint32_t index, OpDescPtr &op_desc); | |||
size_t index, OpDescPtr &op_desc); | |||
domi::Status ParseAclInputFp16Nodes(const ComputeGraphPtr &graph, const string &input_fp16_nodes, | |||
const string &is_input_adjust_hw_layout); | |||
domi::Status GetDefaultOutInfo(ge::ComputeGraphPtr &compute_graph, | |||
@@ -1818,17 +1818,20 @@ Status TensorFlowModelParser::GetInPutIndex(shared_ptr<ge::ScopeGraph> &scope_gr | |||
auto &impl = scope_graph->impl_; | |||
return impl->GetInputOrOutputIndex(info, old_index, true, new_index); | |||
} | |||
return SUCCESS; | |||
GELOGE(INTERNAL_ERROR, "Fusion op should come from scope fusion pass, node name:%s, fusion node name:%s", | |||
info.node_name.c_str(), info.fusion_node_name.c_str()); | |||
return INTERNAL_ERROR; | |||
} | |||
Status TensorFlowModelParser::GetOutPutIndex(shared_ptr<ge::ScopeGraph> &scope_graph, const ge::ScopeFusionOpInfo &info, | |||
const int32_t old_index, int32_t &new_index) { | |||
GE_CHECK_NOTNULL(scope_graph); | |||
Status ret; | |||
if (info.scope_pass) { | |||
auto &impl = scope_graph->impl_; | |||
ret = impl->GetInputOrOutputIndex(info, old_index, false, new_index); | |||
return impl->GetInputOrOutputIndex(info, old_index, false, new_index); | |||
} | |||
return ret; | |||
GELOGE(INTERNAL_ERROR, "Fusion op should come from scope fusion pass, node name:%s, fusion node name:%s", | |||
info.node_name.c_str(), info.fusion_node_name.c_str()); | |||
return INTERNAL_ERROR; | |||
} | |||
bool TensorFlowModelParser::ConstOpNeedUpdate(const string &op_name) { | |||
@@ -4135,7 +4135,7 @@ TEST_F(STestTensorflowParser, parser_UppdateInputMap_test) | |||
info.fusion_op_type = parser::FUSIONBATCHNORM; | |||
info.node_name = "conv_conv5/BatchNorm/batchnorm/add"; | |||
info.description = ""; | |||
info.scope_pass = false; | |||
info.scope_pass = true; | |||
tensorflow_parser.nodedef_map_["dropout"] = node1; | |||
tensorflow_parser.nodedef_map_["conv_conv5/BatchNorm/moving_variance"] = node2; | |||
@@ -2743,7 +2743,7 @@ TEST_F(UtestTensorflowParser, Tensorflow_GetInOutPutIndex_scope_pass) | |||
input_node_info.scope_pass = false; | |||
ret = tensorflow_parser.GetInPutIndex(scope_graph, input_node_info, old_index, new_index); | |||
EXPECT_EQ(domi::SUCCESS, ret); | |||
EXPECT_EQ(INTERNAL_ERROR, ret); | |||
delete graph; | |||
} | |||
@@ -4287,7 +4287,7 @@ TEST_F(UtestTensorflowParser, parser_UppdateInputMap_test) | |||
info.fusion_op_type = parser::FUSIONBATCHNORM; | |||
info.node_name = "conv_conv5/BatchNorm/batchnorm/add"; | |||
info.description = ""; | |||
info.scope_pass = false; | |||
info.scope_pass = true; | |||
tensorflow_parser.nodedef_map_["dropout"] = node1; | |||
tensorflow_parser.nodedef_map_["conv_conv5/BatchNorm/moving_variance"] = node2; | |||