Compare commits

...

11 Commits

Author SHA1 Message Date
  王涛 87f695e2cf !386 回退代码 3 years ago
  zhao-lupeng 7c00192dcb backspace 3 years ago
  王涛 15a3bd5da1 !381 clear sc 3 years ago
  王涛 0e2afbfc26 !377 c79 opensource告警清除 3 years ago
  zhao-lupeng 5dc79a9b7d fix opensource 3 years ago
  zhao-lupeng 50edd7aa2f fix opensource 3 years ago
  zhao-lupeng 1e68fdfd3a fix opensource 3 years ago
  刘康 f58302c3aa clear sc 3 years ago
  zhao-lupeng c110be6445 fix opensource 3 years ago
  王涛 927c50a94b update OWNERS. 3 years ago
  王涛 de84350c26 update .gitmodules. 3 years ago
17 changed files with 44 additions and 59 deletions
Split View
  1. +1
    -1
      .gitmodules
  2. +1
    -1
      OWNERS
  3. +1
    -1
      parser/caffe/caffe_custom_parser_adapter.cc
  4. +18
    -19
      parser/caffe/caffe_parser.cc
  5. +0
    -2
      parser/common/acl_graph_parser_util.h
  6. +0
    -2
      parser/common/op_map.h
  7. +1
    -1
      parser/common/op_parser_factory.h
  8. +2
    -4
      parser/common/parser_fp16_t.cc
  9. +1
    -1
      parser/common/parser_utils.cc
  10. +2
    -2
      parser/onnx/onnx_parser.cc
  11. +1
    -1
      parser/onnx/subgraph_adapter/subgraph_adapter_factory.h
  12. +2
    -4
      parser/tensorflow/graph_optimizer.cc
  13. +1
    -1
      parser/tensorflow/graph_optimizer.h
  14. +10
    -13
      parser/tensorflow/tensorflow_parser.cc
  15. +1
    -2
      parser/tensorflow/tensorflow_reshape_parser.cc
  16. +1
    -2
      parser/tensorflow/tensorflow_squeeze_parser.cc
  17. +1
    -2
      parser/tensorflow/tensorflow_util.cc

+ 1
- 1
.gitmodules View File

@@ -1,4 +1,4 @@
[submodule "metadef"]
path = metadef
url = https://gitee.com/ascend/metadef.git
branch = master
branch = r1.6.0

+ 1
- 1
OWNERS View File

@@ -1,7 +1,7 @@
approvers:
- ji_chen
- wqtshg
- ljl0711
- liyihan123
reviewers:
- xchu42
- sheng-nan

+ 1
- 1
parser/caffe/caffe_custom_parser_adapter.cc View File

@@ -85,10 +85,10 @@ Status CaffeCustomParserAdapter::ParseWeights(const Message *op_src, ge::NodePtr
return SUCCESS;
}

bool bias_en = false;
bool update_in_turn = (static_cast<int64_t >(op->GetAllInputsSize()) == (layer->bottom_size() + layer->blobs_size()));
int start_pos = layer->bottom_size();
for (int i = 0; i < layer->blobs_size(); ++i) {
bool bias_en = false;
ge::GeTensorPtr weight = ge::parser::MakeShared<ge::GeTensor>();
GE_CHECK_NOTNULL(weight);
GE_CHK_STATUS_RET(ConvertWeight(layer->blobs(i), layer->name(), weight),


+ 18
- 19
parser/caffe/caffe_parser.cc View File

@@ -857,8 +857,7 @@ Status CaffeModelParser::AddNode(const domi::caffe::LayerParameter &layer, ge::C
// AddConstInput is a function defined in caffe_op_parser, override in caffe_reshape_parser.
std::shared_ptr<CaffeOpParser> caffe_op_parser = std::static_pointer_cast<CaffeOpParser>(op_parser);
GE_CHECK_NOTNULL(caffe_op_parser);
Status status;
status = caffe_op_parser->AddConstInput(node);
Status status = caffe_op_parser->AddConstInput(node);
if (status != SUCCESS) {
REPORT_CALL_ERROR("E19999", "AddConstInput failed for node:%s", node->GetOpDesc()->GetName().c_str());
GELOGE(FAILED, "[Add][ConstInput] to node %s fail.", node->GetOpDesc()->GetName().c_str());
@@ -938,7 +937,7 @@ Status CaffeModelParser::AddTensorDescToOpDescByIr(ge::OpDescPtr &op_desc, const
for (int i = 0; i < valid_input_size; i++) {
ge::GeTensorDesc input_tensor;
std::string input_name;
ge::graphStatus ret = ge::GRAPH_SUCCESS;
ge::graphStatus ret;
// Below cases are supported fow now when there are optional inputs
// x means optional, o means requierd input
// a. ooxxx, number of o and x>=layer.bottom_size+layer.blobs_size>=number of o
@@ -1097,8 +1096,8 @@ Status CaffeModelParser::AddUserOutNodesTop() {
}

Status CaffeModelParser::AddOutputTop(const domi::caffe::NetParameter &proto_message) {
for (int32_t i = 0; i < proto_message.layer_size(); i++) {
const domi::caffe::LayerParameter &layer = proto_message.layer(i);
for (int32_t j = 0; j < proto_message.layer_size(); j++) {
const domi::caffe::LayerParameter &layer = proto_message.layer(j);

if (!CheckValidLayer(layer)) {
continue;
@@ -1296,8 +1295,8 @@ Status CaffeModelParser::ParseFromMemory(const char *data, uint32_t size, ge::Co

// parse ParamSpec
std::vector<string> v_param_names;
for (int i = 0; i < layer.param_size(); i++) {
const domi::caffe::ParamSpec &param = layer.param(i);
for (int j = 0; j < layer.param_size(); j++) {
const domi::caffe::ParamSpec &param = layer.param(j);
GE_IF_BOOL_EXEC((param.has_name()), v_param_names.emplace_back(param.name()));
}

@@ -1515,8 +1514,8 @@ Status CaffeModelParser::Parse(const char *model_path, ge::ComputeGraphPtr &grap

// parse ParamSpec
std::vector<string> v_param_names;
for (int i = 0; i < layer.param_size(); i++) {
const domi::caffe::ParamSpec &param = layer.param(i);
for (int j = 0; j < layer.param_size(); j++) {
const domi::caffe::ParamSpec &param = layer.param(j);
GE_IF_BOOL_EXEC((param.has_name()), v_param_names.emplace_back(param.name()));
}

@@ -2095,17 +2094,17 @@ Status CaffeWeightsParser::ConvertLayerParameter(const google::protobuf::Message
ge::ComputeGraphPtr &graph) {
vector<string> need_share_layers;
const domi::caffe::LayerParameter *layer = reinterpret_cast<const domi::caffe::LayerParameter *>(layer_message);
const string &layer_name = layer->name();
const string &share_layer_name = layer->name();
const string &layer_type = layer->type();
for (auto p_iter = params_share_map.begin(); p_iter != params_share_map.end(); ++p_iter) {
if (find(p_iter->second.begin(), p_iter->second.end(), layer_name) != p_iter->second.end()) {
GELOGI("layer:%s need share weights !", layer_name.c_str());
if (find(p_iter->second.begin(), p_iter->second.end(), share_layer_name) != p_iter->second.end()) {
GELOGI("layer:%s need share weights !", share_layer_name.c_str());
need_share_layers = p_iter->second;
}
}

if (need_share_layers.size() == 0) {
need_share_layers.push_back(layer_name);
need_share_layers.push_back(share_layer_name);
}

for (auto share_iter = need_share_layers.begin(); share_iter != need_share_layers.end(); ++share_iter) {
@@ -2211,27 +2210,27 @@ Status CaffeWeightsParser::ConvertNetParameter(const NetParameter &param, ge::Co

for (int i = 0; i < num_layer; ++i) {
const LayerParameter &layer = param.layer(i);
const string &layer_name = layer.name();
const string &share_layer_name = layer.name();

// Skip some layer types
if (skiped_layer_type_.find(layer.type()) != skiped_layer_type_.end()) {
GELOGI("Skip layer %s", layer_name.c_str());
GELOGI("Skip layer %s", share_layer_name.c_str());
continue;
}

GELOGI("Parse layer %s", layer_name.c_str());
GELOGI("Parse layer %s", share_layer_name.c_str());

vector<string> need_share_layers;

for (auto p_iter = params_share_map.begin(); p_iter != params_share_map.end(); ++p_iter) {
if (find(p_iter->second.begin(), p_iter->second.end(), layer_name) != p_iter->second.end()) {
GELOGI("Layer: %s need share weights !", layer_name.c_str());
if (find(p_iter->second.begin(), p_iter->second.end(), share_layer_name) != p_iter->second.end()) {
GELOGI("Layer: %s need share weights !", share_layer_name.c_str());
need_share_layers = p_iter->second;
}
}

if (need_share_layers.size() == 0) {
need_share_layers.push_back(layer_name);
need_share_layers.push_back(share_layer_name);
}

for (auto share_iter = need_share_layers.begin(); share_iter != need_share_layers.end(); ++share_iter) {


+ 0
- 2
parser/common/acl_graph_parser_util.h View File

@@ -230,8 +230,6 @@ inline domi::Status CheckInt64Uint32MulOverflow(int64_t a, uint32_t b) {
} // namespace parser
} // namespace ge

/*lint --emacro((773),GE_TIMESTAMP_START)*/
/*lint -esym(773,GE_TIMESTAMP_START)*/
#define PARSER_TIMESTAMP_START(stage) uint64_t startUsec_##stage = ge::parser::GetCurrentTimestamp()

#define PARSER_TIMESTAMP_END(stage, stage_name) \


+ 0
- 2
parser/common/op_map.h View File

@@ -21,7 +21,6 @@
#include <string>
#include <vector>

/*lint -e1073*/
namespace ge {
// the operator type mapping table of caffe and mindspore
extern std::map<std::string, std::string> caffe_op_map;
@@ -41,5 +40,4 @@ extern std::vector<std::string> is_dataset_op_vec;
// output tensor num
extern std::map<std::string, int32_t> op_output_tensor_num;
} // namespace ge
/*lint +e1073*/
#endif // GE_COMMON_OP_MAP_H_

+ 1
- 1
parser/common/op_parser_factory.h View File

@@ -101,7 +101,7 @@ class OpParserFactory {
* @ingroup domi_omg
* @brief Each Op corresponds to a Creator function
*/
std::map<std::string, CREATOR_FUN> op_parser_creator_map_; // lint !e1073
std::map<std::string, CREATOR_FUN> op_parser_creator_map_;
std::map<std::string, CREATOR_FUN> fusion_op_parser_creator_map_;

friend class OpParserRegisterar;


+ 2
- 4
parser/common/parser_fp16_t.cc View File

@@ -675,8 +675,7 @@ static uint16_t Fp16Div(uint16_t v_1, uint16_t v_2) {
uint64_t m_tmp;
if (e_a > e_b) {
m_tmp = m_a;
uint16_t tmp;
tmp = e_a - e_b;
uint16_t tmp = e_a - e_b;
for (int i = 0; i < tmp; i++) {
m_tmp = m_tmp << 1;
}
@@ -690,8 +689,7 @@ static uint16_t Fp16Div(uint16_t v_1, uint16_t v_2) {
m_b = m_tmp;
}
m_div = static_cast<float>(m_a * 1.0f / m_b);
fp16_t fp_div;
fp_div = m_div;
fp16_t fp_div = m_div;
ret = fp_div.val;
if (s_a != s_b) {
ret |= kFp16SignMask;


+ 1
- 1
parser/common/parser_utils.cc View File

@@ -212,7 +212,7 @@ Status ParserUtils::HandleInputContext(const NodePtr &node,
// add control edge
if (node->GetInControlAnchor() != nullptr) {
for (const auto &out_anchor : node->GetInControlAnchor()->GetPeerAnchors()) {
graphStatus ret = GraphUtils::AddEdge(out_anchor, peer_in_anchor->GetOwnerNode()->GetInControlAnchor());
ret = GraphUtils::AddEdge(out_anchor, peer_in_anchor->GetOwnerNode()->GetInControlAnchor());
if (ret != GRAPH_SUCCESS) {
REPORT_CALL_ERROR("E19999", "add control edge from %s to %s failed.",
out_anchor->GetOwnerNode()->GetName().c_str(),


+ 2
- 2
parser/onnx/onnx_parser.cc View File

@@ -767,8 +767,8 @@ Status OnnxModelParser::AdaptAndFindAllOnnxGraph(ge::onnx::GraphProto &root_onnx
return FAILED;
}

for (const auto &onnx_graph : onnx_graphs) {
onnx_graph_tasks.push(onnx_graph);
for (const auto &onnx_graph_task : onnx_graphs) {
onnx_graph_tasks.push(onnx_graph_task);
}
for (const auto &itr : name_to_onnx_subgraph) {
name_to_onnx_graph.emplace(itr.first, itr.second);


+ 1
- 1
parser/onnx/subgraph_adapter/subgraph_adapter_factory.h View File

@@ -77,7 +77,7 @@ protected:
void RegisterCreator(const std::string &type, CREATOR_FUN fun);

private:
std::map<std::string, CREATOR_FUN> subgraph_adapter_creator_map_; // lint !e1073
std::map<std::string, CREATOR_FUN> subgraph_adapter_creator_map_;

friend class SubgraphAdapterRegisterar;
};


+ 2
- 4
parser/tensorflow/graph_optimizer.cc View File

@@ -858,7 +858,6 @@ Status CreateNodeDefBytes(ge::NodePtr n, string originalType, map<string, PIOLis
// Set size
for (auto ge_desc : opDesc->GetAllOutputsDescPtr()) {
int64_t real_size = 1;
int64_t tmp_dim = 0;
auto data_type = ge_desc->GetDataType();

uint32_t size_type = 1;
@@ -873,7 +872,7 @@ Status CreateNodeDefBytes(ge::NodePtr n, string originalType, map<string, PIOLis

// calculate size
for (uint32_t j = 0; j < ge_desc->GetShape().GetDimNum(); ++j) {
tmp_dim = ge_desc->GetShape().GetDim(j);
int64_t tmp_dim = ge_desc->GetShape().GetDim(j);
GE_CHECK_GE(tmp_dim, 0);
PARSER_INT64_MULCHECK(real_size, tmp_dim);
real_size *= tmp_dim;
@@ -1279,8 +1278,7 @@ Status CreateFuncDefBytes(ge::NodePtr n, string original_type, string func_bin_p

GELOGI("len =%d\n", len);

ge::GeAttrValue::BYTES funcDefBytes;
funcDefBytes = ge::Buffer::CopyFrom((std::uint8_t *)buf, len);
ge::GeAttrValue::BYTES funcDefBytes = ge::Buffer::CopyFrom((std::uint8_t *)buf, len);
(void)ge::AttrUtils::SetBytes(opDesc, ge::ATTR_NAME_FRAMEWORK_FUNC_DEF, funcDefBytes);
GELOGI("funcDefBytes.GetSize() =%zu", funcDefBytes.GetSize());



+ 1
- 1
parser/tensorflow/graph_optimizer.h View File

@@ -55,7 +55,7 @@ class ParserGraphOptimizer {

const bool GetLocalFmkopFlag() const { return local_fmk_op_flag_; }

void SetFuncBinPath(std::string isFuncBinPath) { func_bin_path_ = isFuncBinPath; }
void SetFuncBinPath(const std::string &isFuncBinPath) { func_bin_path_ = isFuncBinPath; }
const std::string GetFuncBinPath() const { return func_bin_path_; }

domi::Status InsertHWCK2FZ(ge::OutDataAnchorPtr src_anchor, ge::InDataAnchorPtr dst_anchor,


+ 10
- 13
parser/tensorflow/tensorflow_parser.cc View File

@@ -1528,7 +1528,7 @@ Status TensorFlowModelParser::ParseAllGraph(const google::protobuf::Message *pro
if (tensorflow_op_map.find(node_op) == tensorflow_op_map.end()) {
GELOGW("%s not found in tensorflow_op_map.", node_op.c_str());
}
Status ret = AddNode(node_def, graph, scope_graph);
ret = AddNode(node_def, graph, scope_graph);
if (ret != SUCCESS) {
GELOGE(ret, "Add op[%s] failed", node_def->name().c_str());
DeleteFuisonNodeDef();
@@ -1688,7 +1688,6 @@ Status TensorFlowModelParser::CheckInputNodeName(const string &input_node_name,
}
}

int32_t tmp_index = 0;
auto find = tmp_input_node_name.find(":");
if (find == string::npos) {
*node_name = tmp_input_node_name;
@@ -1696,6 +1695,7 @@ Status TensorFlowModelParser::CheckInputNodeName(const string &input_node_name,
if (index == nullptr) {
return SUCCESS;
}
int32_t tmp_index = 0;
*index = tmp_index;

return SUCCESS;
@@ -2060,15 +2060,14 @@ Status TensorFlowModelParser::EraseNormalOpOutputIfChild(shared_ptr<ge::ScopeGra
for (auto iter = normal_op_node_context.output_map.begin(); iter != normal_op_node_context.output_map.end();) {
string output_node_name = iter->first;
ge::ScopeFusionOpInfo to_info;
int32_t from_index = 0;
int32_t to_index = 0;

if (IsFusionOpChild(output_node_name, &to_info) &&
nodedef_map_[output_node_name]->op() != TENSORFLOWF_NODE_OP_CONST) {
// Fuse operator, update index
std::vector<std::pair<int32_t, int32_t>> &pairs = iter->second;
for (auto &pair : pairs) {
from_index = pair.first;
int32_t from_index = pair.first;
int32_t to_index = 0;
GE_RETURN_WITH_LOG_IF_ERROR(GetInPutIndex(scope_graph, to_info, pair.second, to_index),
"GetInPutIndex failed ,output_node_name %s.", output_node_name.c_str());
tmp_output_map[to_info.fusion_node_name].push_back({from_index, to_index});
@@ -2097,15 +2096,14 @@ Status TensorFlowModelParser::UpdateNormalOpContext(shared_ptr<ge::ScopeGraph> &
for (auto iter = normal_op_node_context.input_map.begin(); iter != normal_op_node_context.input_map.end();) {
string input_node_name = iter->first;
ge::ScopeFusionOpInfo from_info;
int32_t from_index = 0;
int32_t to_index = 0;

if (IsFusionOpChild(input_node_name, &from_info) &&
nodedef_map_[input_node_name]->op() != TENSORFLOWF_NODE_OP_CONST) {
// Fuse operator, update index
std::vector<std::pair<int32_t, int32_t>> &pairs = iter->second;
for (auto &pair : pairs) {
to_index = pair.second;
int32_t from_index = 0;
int32_t to_index = pair.second;
GE_RETURN_WITH_LOG_IF_ERROR(GetOutPutIndex(scope_graph, from_info, pair.first, from_index),
"GetOutPutIndex failed ,input_node_name %s.", input_node_name.c_str());
tmp_input_map[from_info.fusion_node_name].push_back({from_index, to_index});
@@ -2325,7 +2323,7 @@ Status TensorFlowModelParser::ParseProto(const google::protobuf::Message *proto,
}

// Do not exit immediately when there is an error, wait until all errors are collected before exiting
Status ret = AddFmkNodeDefToMap(*graph_def, node_def, op_node_name_list);
ret = AddFmkNodeDefToMap(*graph_def, node_def, op_node_name_list);
GE_CHK_STATUS_EXEC(ret, return PARAM_INVALID, "add node_def to map failed");
}
PARSER_TIMESTAMP_END(AddFmkNodeDefToMap, "TensorFlowModelParser::AddFmkNodeDefToMap");
@@ -3180,8 +3178,7 @@ Status TensorFlowModelParser::TrimGraphByInput(const domi::tensorflow::GraphDef
output_graph_def->Clear();
for (const NodeDef &node : filtered_graph_def.node()) {
if (input_nodes.count(node.name())) {
NodeDef placeholder_node;
placeholder_node = node;
NodeDef placeholder_node = node;
placeholder_node.clear_input();
GE_IF_BOOL_EXEC(node.op() != "Placeholder", placeholder_node.set_op("Placeholder"));
domi::tensorflow::AttrValue attr_value;
@@ -3778,8 +3775,8 @@ void TensorFlowModelParser::UpdateInnerInputMap(const string &fusion_op_name, Op
std::map<std::string, std::vector<std::pair<int32_t, int32_t>>> tmp_input_map;
for (auto iter = op_node_context.input_map.begin(); iter != op_node_context.input_map.end();) {
string src_name = iter->first;
std::vector<std::pair<int32_t, int32_t>> &input_idx = iter->second;
if (src_name == ge::kInputFromFusionScope) {
std::vector<std::pair<int32_t, int32_t>> &input_idx = iter->second;
for (const auto &in_pair : input_idx) {
if (in_pair.second != kControlSlot) {
auto data = remap_data_input[fusion_op_name + std::to_string(in_pair.first)];
@@ -3825,8 +3822,8 @@ void TensorFlowModelParser::UpdateInnerOutputMap(const string &fusion_op_name, O
std::map<std::string, std::vector<std::pair<int32_t, int32_t>>> tmp_output_map;
for (auto iter = op_node_context.output_map.begin(); iter != op_node_context.output_map.end();) {
string dst_name = iter->first;
std::vector<std::pair<int32_t, int32_t>> &output_idx = iter->second;
if (dst_name == ge::kOutputToFusionScope) {
std::vector<std::pair<int32_t, int32_t>> &output_idx = iter->second;
for (const auto &out_pair : output_idx) {
if (out_pair.second != kControlSlot) {
auto data_outputs = remap_data_output[fusion_op_name + std::to_string(out_pair.second)];


+ 1
- 2
parser/tensorflow/tensorflow_reshape_parser.cc View File

@@ -34,7 +34,6 @@ Status TensorFlowReshapeParser::ParseDesc(const domi::tensorflow::AttrValue &att
"parse ge_desc failed.");
uint32_t size_type = 1;
int64_t real_size = 1;
int64_t tmp_dim = 0;

auto data_type = ge_desc.GetDataType();
bool type_ret = ge::TypeUtils::GetDataTypeLength(data_type, size_type);
@@ -46,7 +45,7 @@ Status TensorFlowReshapeParser::ParseDesc(const domi::tensorflow::AttrValue &att
return PARAM_INVALID);
// calculate size
for (uint32_t j = 0; j < ge_desc.GetShape().GetDimNum(); ++j) {
tmp_dim = ge_desc.GetShape().GetDim(j);
int64_t tmp_dim = ge_desc.GetShape().GetDim(j);
GE_IF_BOOL_EXEC(tmp_dim < 0, real_size = tmp_dim * (-1) * real_size; continue;);
real_size *= tmp_dim;
}


+ 1
- 2
parser/tensorflow/tensorflow_squeeze_parser.cc View File

@@ -38,7 +38,6 @@ Status TensorFlowSqueezeParser::ParseDesc(const domi::tensorflow::AttrValue &att
"parse ge_desc failed.");
uint32_t size_type;
int64_t real_size = 1;
int64_t tmp_dim = 0;

auto data_type = ge_desc.GetDataType();
bool type_ret = ge::TypeUtils::GetDataTypeLength(data_type, size_type);
@@ -50,7 +49,7 @@ Status TensorFlowSqueezeParser::ParseDesc(const domi::tensorflow::AttrValue &att
return domi::PARAM_INVALID);
// calculate size
for (uint32_t j = 0; j < ge_desc.GetShape().GetDimNum(); ++j) {
tmp_dim = ge_desc.GetShape().GetDim(j);
int64_t tmp_dim = ge_desc.GetShape().GetDim(j);
GE_IF_BOOL_EXEC(tmp_dim < 0, real_size = tmp_dim * (-1) * real_size; continue;);
PARSER_INT64_MULCHECK(real_size, tmp_dim);
real_size *= tmp_dim;


+ 1
- 2
parser/tensorflow/tensorflow_util.cc View File

@@ -179,14 +179,13 @@ FMK_FUNC_HOST_VISIBILITY FMK_FUNC_DEV_VISIBILITY domi::Status TensorFlowUtil::Tr
"parse ge_desc failed.");

uint32_t size_type = 1;
int64_t tmp_dim = 0;

auto data_type = ge_desc.GetDataType();
GE_CHK_BOOL_RET_STATUS(ge::TypeUtils::GetDataTypeLength(data_type, size_type), PARAM_INVALID,
"dataType no define size , parse ge_desc failed.");
// get size
for (uint32_t j = 0; j < ge_desc.GetShape().GetDimNum(); ++j) {
tmp_dim = ge_desc.GetShape().GetDim(j);
int64_t tmp_dim = ge_desc.GetShape().GetDim(j);

// The shape infered by fusedbatchnormgrad and mean calling tensorflow is not accurate.
// Here, special treatment is given to the two operators.


Loading…
Cancel
Save